Update README.md
Browse files
README.md
CHANGED
|
@@ -48,11 +48,12 @@ This repository contains the **MobileCLIP2-S2** checkpoint.
|
|
| 48 |
import torch
|
| 49 |
import open_clip
|
| 50 |
from PIL import Image
|
|
|
|
| 51 |
from timm.utils import reparameterize_model
|
| 52 |
|
| 53 |
-
model, _, preprocess = open_clip.create_model_and_transforms('MobileCLIP2-
|
| 54 |
model.eval()
|
| 55 |
-
tokenizer = open_clip.get_tokenizer('MobileCLIP2-
|
| 56 |
|
| 57 |
# For inference/model exporting purposes, optionally reparameterize for better performance
|
| 58 |
model = reparameterize_model(model)
|
|
@@ -63,7 +64,7 @@ image = Image.open(urlopen(
|
|
| 63 |
image = preprocess(image).unsqueeze(0)
|
| 64 |
text = tokenizer(["a diagram", "a dog", "a cat", "a doughnut"])
|
| 65 |
|
| 66 |
-
with torch.no_grad(), torch.
|
| 67 |
image_features = model.encode_image(image)
|
| 68 |
text_features = model.encode_text(text)
|
| 69 |
image_features /= image_features.norm(dim=-1, keepdim=True)
|
|
|
|
| 48 |
import torch
|
| 49 |
import open_clip
|
| 50 |
from PIL import Image
|
| 51 |
+
from urllib.request import urlopen
|
| 52 |
from timm.utils import reparameterize_model
|
| 53 |
|
| 54 |
+
model, _, preprocess = open_clip.create_model_and_transforms('MobileCLIP2-S0', pretrained='dfndr2b')
|
| 55 |
model.eval()
|
| 56 |
+
tokenizer = open_clip.get_tokenizer('MobileCLIP2-S0')
|
| 57 |
|
| 58 |
# For inference/model exporting purposes, optionally reparameterize for better performance
|
| 59 |
model = reparameterize_model(model)
|
|
|
|
| 64 |
image = preprocess(image).unsqueeze(0)
|
| 65 |
text = tokenizer(["a diagram", "a dog", "a cat", "a doughnut"])
|
| 66 |
|
| 67 |
+
with torch.no_grad(), torch.amp.autocast(image.device.type):
|
| 68 |
image_features = model.encode_image(image)
|
| 69 |
text_features = model.encode_text(text)
|
| 70 |
image_features /= image_features.norm(dim=-1, keepdim=True)
|