-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
102 lines (76 loc) · 3.17 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import os
# Use Either one of these as backend.
# os.environ["KERAS_BACKEND"] = "jax"
# os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["KERAS_BACKEND"] = "torch"
import torch
import torchvision.transforms as transforms
from torchvision.datasets import ImageNet
from torch.utils.data import DataLoader
from utils.load_weights import return_models
from utils.dataset import downloadImageNet
import argparse
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Set up argument parser
parser = argparse.ArgumentParser(description='Test the model on ImageNet')
parser.add_argument('--model_size', type=str, default='XS',
help='Model Size = S, XS or XXS')
args = parser.parse_args()
model_size = args.model_size
print('Model Size:',model_size)
model = return_models(model_size)
model = model.to(device)
# Define transformation and dataset
transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x[[2, 1, 0], ...]) # Convert from RGB to BGR (MobileViT is trained on BGR images)
])
downloadImageNet()
val_dataset = ImageNet(root='./res', split='val', transform=transform)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False, num_workers=1)
# Top-1 Accuracy
def evaluate_top1_accuracy(model, dataloader):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for images, labels in tqdm(dataloader, 'Evaluating...'):
images, labels = images.to(device), labels.to(device)
images = images.permute(0, 2, 3, 1) # Convert to (batch, height, width, channels)
outputs = model(images)
_, predicted = outputs.max(1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
top1_accuracy = evaluate_top1_accuracy(model, val_loader)
print(f'Top-1 Accuracy: {top1_accuracy:.2%}')
# Showing model parameters
num_params = model.count_params()
print(f'Number of Parameters: {num_params}')
# FLOPs
os.system("pip install torchprofile")
from torchprofile import profile_macs
model.eval()
input_tensor = torch.randn(1, 3, 256, 256).to(device)
input_tensor = input_tensor.permute(0, 2, 3, 1)
# Calculate MACs (multiply-accumulate operations)
macs = profile_macs(model, input_tensor)
flops = 2 * macs # FLOPs are generally twice the MACs in CNNs
print(f"FLOPs: {flops / 1e9:.2f} GFLOPs")
# Inference Time
import time
def measure_inference_time_keras(model, dataloader, num_batches=100):
start_time = time.time()
for i, (images, _) in enumerate(dataloader):
if i == num_batches:
break
# Rearrange dimensions to match Keras expected input shape (batch_size, height, width, channels)
images = images.permute(0, 2, 3, 1).to(device)
with torch.no_grad(): # Ensure no gradients are calculated
_ = model(images) # Run the inference
avg_inference_time = (time.time() - start_time) / num_batches
return avg_inference_time
avg_inference_time = measure_inference_time_keras(model, val_loader)
print(f'Average Inference Time per Batch: {avg_inference_time:.4f} seconds')