mins
commited on
Commit
·
8ddf365
1
Parent(s):
c501468
fp16
Browse files
fp16.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2b8b14a66890d6c9edd8f9b39fcd04b100fa9bc781d68713736226a742a6e2dd
|
| 3 |
+
size 14575027161
|
projects/configs/OmniDrive/eva_base_tinyllama_fp16.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = [
|
| 2 |
+
'../../../mmdetection3d/configs/_base_/datasets/nus-3d.py',
|
| 3 |
+
'../../../mmdetection3d/configs/_base_/default_runtime.py'
|
| 4 |
+
]
|
| 5 |
+
backbone_norm_cfg = dict(type='LN', requires_grad=True)
|
| 6 |
+
plugin=True
|
| 7 |
+
plugin_dir='projects/mmdet3d_plugin/'
|
| 8 |
+
|
| 9 |
+
# If point cloud range is changed, the models should also change their point
|
| 10 |
+
# cloud range accordingly
|
| 11 |
+
point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
|
| 12 |
+
voxel_size = [0.2, 0.2, 8]
|
| 13 |
+
img_norm_cfg = dict(
|
| 14 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 15 |
+
# For nuScenes we usually do 10-class detection
|
| 16 |
+
class_names = [
|
| 17 |
+
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
|
| 18 |
+
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
num_gpus = 8
|
| 22 |
+
batch_size = 2
|
| 23 |
+
num_iters_per_epoch = 28130 // (num_gpus * batch_size)
|
| 24 |
+
num_epochs = 12
|
| 25 |
+
llm_path = 'ckpts/pretrain_tiny'
|
| 26 |
+
|
| 27 |
+
collect_keys=['lidar2img', 'intrinsics', 'extrinsics','timestamp', 'img_timestamp', 'ego_pose', 'ego_pose_inv', 'command', 'can_bus']
|
| 28 |
+
input_modality = dict(
|
| 29 |
+
use_lidar=False,
|
| 30 |
+
use_camera=True,
|
| 31 |
+
use_radar=False,
|
| 32 |
+
use_map=False,
|
| 33 |
+
use_external=True)
|
| 34 |
+
model = dict(
|
| 35 |
+
type='Petr3D',
|
| 36 |
+
save_path='./results_planning_tiny_fp16/', #save path for vlm models.
|
| 37 |
+
use_grid_mask=True,
|
| 38 |
+
frozen=False,
|
| 39 |
+
use_lora=False,
|
| 40 |
+
tokenizer=llm_path,
|
| 41 |
+
lm_head=llm_path, # set to None if don't use llm head
|
| 42 |
+
img_backbone=dict(
|
| 43 |
+
type='EVAViT',
|
| 44 |
+
img_size=640,
|
| 45 |
+
patch_size=16,
|
| 46 |
+
window_size=16,
|
| 47 |
+
in_chans=3,
|
| 48 |
+
embed_dim=768,
|
| 49 |
+
depth=12,
|
| 50 |
+
num_heads=12,
|
| 51 |
+
mlp_ratio=4*2/3,
|
| 52 |
+
window_block_indexes=(0, 1, 3, 4, 6, 7, 9, 10),
|
| 53 |
+
qkv_bias=True,
|
| 54 |
+
drop_path_rate=0.1,
|
| 55 |
+
flash_attn=True,
|
| 56 |
+
with_cp=True,
|
| 57 |
+
frozen=False),
|
| 58 |
+
map_head=dict(
|
| 59 |
+
type='PETRHeadM',
|
| 60 |
+
num_classes=1,
|
| 61 |
+
in_channels=768,
|
| 62 |
+
out_dims=2048,
|
| 63 |
+
memory_len=600,
|
| 64 |
+
with_mask=True, # map query can't see vlm tokens
|
| 65 |
+
topk_proposals=300,
|
| 66 |
+
num_lane=1800, # 300+1500
|
| 67 |
+
num_lanes_one2one=300,
|
| 68 |
+
k_one2many=5,
|
| 69 |
+
lambda_one2many=1.0,
|
| 70 |
+
num_extra=256,
|
| 71 |
+
n_control=11,
|
| 72 |
+
pc_range=point_cloud_range,
|
| 73 |
+
code_weights = [1.0, 1.0],
|
| 74 |
+
transformer=dict(
|
| 75 |
+
type='PETRTemporalTransformer',
|
| 76 |
+
input_dimension=256,
|
| 77 |
+
output_dimension=256,
|
| 78 |
+
num_layers=6,
|
| 79 |
+
embed_dims=256,
|
| 80 |
+
num_heads=8,
|
| 81 |
+
feedforward_dims=2048,
|
| 82 |
+
dropout=0.1,
|
| 83 |
+
with_cp=True,
|
| 84 |
+
flash_attn=True,),
|
| 85 |
+
train_cfg=dict(
|
| 86 |
+
assigner=dict(
|
| 87 |
+
type='LaneHungarianAssigner',
|
| 88 |
+
cls_cost=dict(type='FocalLossCost', weight=1.5),
|
| 89 |
+
reg_cost=dict(type='LaneL1Cost', weight=0.02),
|
| 90 |
+
iou_cost=dict(type='IoUCost', weight=0.0))), # dummy
|
| 91 |
+
loss_cls=dict(
|
| 92 |
+
type='FocalLoss',
|
| 93 |
+
use_sigmoid=True,
|
| 94 |
+
gamma=2.0,
|
| 95 |
+
alpha=0.25,
|
| 96 |
+
loss_weight=1.5),
|
| 97 |
+
loss_bbox=dict(type='L1Loss', loss_weight=0.02),
|
| 98 |
+
loss_dir=dict(type='PtsDirCosLoss', loss_weight=0.0)), #
|
| 99 |
+
pts_bbox_head=dict(
|
| 100 |
+
type='StreamPETRHead',
|
| 101 |
+
num_classes=10,
|
| 102 |
+
in_channels=768,
|
| 103 |
+
out_dims=2048,
|
| 104 |
+
num_query=600,
|
| 105 |
+
with_mask=True,
|
| 106 |
+
memory_len=600,
|
| 107 |
+
topk_proposals=300,
|
| 108 |
+
num_propagated=300,
|
| 109 |
+
num_extra=256,
|
| 110 |
+
n_control=11, # align with centerline query defination
|
| 111 |
+
match_with_velo=False,
|
| 112 |
+
scalar=10, ##noise groups
|
| 113 |
+
noise_scale = 1.0,
|
| 114 |
+
dn_weight= 1.0, ##dn loss weight
|
| 115 |
+
split = 0.75, ###positive rate
|
| 116 |
+
code_weights = [2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
| 117 |
+
transformer=dict(
|
| 118 |
+
type='PETRTemporalTransformer',
|
| 119 |
+
input_dimension=256,
|
| 120 |
+
output_dimension=256,
|
| 121 |
+
num_layers=6,
|
| 122 |
+
embed_dims=256,
|
| 123 |
+
num_heads=8,
|
| 124 |
+
feedforward_dims=2048,
|
| 125 |
+
dropout=0.1,
|
| 126 |
+
with_cp=True,
|
| 127 |
+
flash_attn=True,
|
| 128 |
+
),
|
| 129 |
+
bbox_coder=dict(
|
| 130 |
+
type='NMSFreeCoder',
|
| 131 |
+
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
|
| 132 |
+
pc_range=point_cloud_range,
|
| 133 |
+
max_num=300,
|
| 134 |
+
voxel_size=voxel_size,
|
| 135 |
+
num_classes=10),
|
| 136 |
+
loss_cls=dict(
|
| 137 |
+
type='FocalLoss',
|
| 138 |
+
use_sigmoid=True,
|
| 139 |
+
gamma=2.0,
|
| 140 |
+
alpha=0.25,
|
| 141 |
+
loss_weight=2.0),
|
| 142 |
+
loss_bbox=dict(type='L1Loss', loss_weight=0.25),
|
| 143 |
+
loss_iou=dict(type='GIoULoss', loss_weight=0.0),),
|
| 144 |
+
# model training and testing settings
|
| 145 |
+
train_cfg=dict(pts=dict(
|
| 146 |
+
grid_size=[512, 512, 1],
|
| 147 |
+
voxel_size=voxel_size,
|
| 148 |
+
point_cloud_range=point_cloud_range,
|
| 149 |
+
out_size_factor=4,
|
| 150 |
+
assigner=dict(
|
| 151 |
+
type='HungarianAssigner3D',
|
| 152 |
+
cls_cost=dict(type='FocalLossCost', weight=2.0),
|
| 153 |
+
reg_cost=dict(type='BBox3DL1Cost', weight=0.25),
|
| 154 |
+
iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head.
|
| 155 |
+
pc_range=point_cloud_range),)
|
| 156 |
+
)
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
dataset_type = 'CustomNuScenesDataset'
|
| 161 |
+
data_root = './data/nuscenes/'
|
| 162 |
+
|
| 163 |
+
file_client_args = dict(backend='disk')
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
ida_aug_conf = {
|
| 167 |
+
"resize_lim": (0.37, 0.45),
|
| 168 |
+
"final_dim": (320, 640),
|
| 169 |
+
"bot_pct_lim": (0.0, 0.0),
|
| 170 |
+
"rot_lim": (0.0, 0.0),
|
| 171 |
+
"H": 900,
|
| 172 |
+
"W": 1600,
|
| 173 |
+
"rand_flip": False,
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
train_pipeline = [
|
| 177 |
+
dict(type='LoadMultiViewImageFromFiles', to_float32=True),
|
| 178 |
+
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_bbox=True,
|
| 179 |
+
with_label=True, with_bbox_depth=True),
|
| 180 |
+
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
|
| 181 |
+
dict(type='ObjectNameFilter', classes=class_names),
|
| 182 |
+
dict(type='ResizeCropFlipRotImage', data_aug_conf = ida_aug_conf, training=True),
|
| 183 |
+
dict(type='ResizeMultiview3D', img_scale=(640, 640), keep_ratio=False, multiscale_mode='value'),
|
| 184 |
+
dict(type='LoadAnnoatationVQA',
|
| 185 |
+
base_vqa_path='./data/nuscenes/vqa/train/',
|
| 186 |
+
base_desc_path='./data/nuscenes/desc/train/',
|
| 187 |
+
base_conv_path='./data/nuscenes/conv/train/',
|
| 188 |
+
base_key_path='./data/nuscenes/keywords/train/',
|
| 189 |
+
tokenizer=llm_path,
|
| 190 |
+
max_length=2048,
|
| 191 |
+
ignore_type=[],
|
| 192 |
+
lane_objs_info="./data/nuscenes/lane_obj_train.pkl"),
|
| 193 |
+
dict(type='NormalizeMultiviewImage', **img_norm_cfg),
|
| 194 |
+
dict(type='PadMultiViewImage', size_divisor=32),
|
| 195 |
+
dict(type='PETRFormatBundle3D', class_names=class_names, collect_keys=collect_keys + ['prev_exists']),
|
| 196 |
+
dict(type='Collect3D', keys=['lane_pts', 'input_ids', 'vlm_labels', 'gt_bboxes_3d', 'gt_labels_3d', 'img', 'gt_bboxes', 'gt_labels', 'centers2d', 'depths', 'prev_exists'] + collect_keys,
|
| 197 |
+
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'box_mode_3d', 'box_type_3d', 'img_norm_cfg', 'scene_token', 'gt_bboxes_3d','gt_labels_3d'))
|
| 198 |
+
]
|
| 199 |
+
test_pipeline = [
|
| 200 |
+
dict(type='LoadMultiViewImageFromFiles', to_float32=True),
|
| 201 |
+
dict(type='ResizeCropFlipRotImage', data_aug_conf = ida_aug_conf, training=False),
|
| 202 |
+
dict(type='ResizeMultiview3D', img_scale=(640, 640), keep_ratio=False, multiscale_mode='value'),
|
| 203 |
+
dict(type='NormalizeMultiviewImage', **img_norm_cfg),
|
| 204 |
+
dict(type='PadMultiViewImage', size_divisor=32),
|
| 205 |
+
dict(type='LoadAnnoatationVQATest',
|
| 206 |
+
base_vqa_path='./data/nuscenes/vqa/val/',
|
| 207 |
+
base_conv_path='./data/nuscenes/conv/val/',
|
| 208 |
+
base_counter_path='./data/nuscenes/eval_cf/',
|
| 209 |
+
load_type=["planning"], # please don't test all the questions in single test, it requires quite long time
|
| 210 |
+
tokenizer=llm_path,
|
| 211 |
+
max_length=2048,),
|
| 212 |
+
dict(
|
| 213 |
+
type='MultiScaleFlipAug3D',
|
| 214 |
+
img_scale=(1333, 800),
|
| 215 |
+
pts_scale_ratio=1,
|
| 216 |
+
flip=False,
|
| 217 |
+
transforms=[
|
| 218 |
+
dict(
|
| 219 |
+
type='PETRFormatBundle3D',
|
| 220 |
+
collect_keys=collect_keys,
|
| 221 |
+
class_names=class_names,
|
| 222 |
+
with_label=False),
|
| 223 |
+
dict(type='Collect3D', keys=['input_ids', 'img'] + collect_keys,
|
| 224 |
+
meta_keys=('sample_idx', 'vlm_labels', 'filename', 'ori_shape', 'img_shape','pad_shape', 'scale_factor', 'flip', 'box_mode_3d', 'box_type_3d', 'img_norm_cfg', 'scene_token'))
|
| 225 |
+
])
|
| 226 |
+
]
|
| 227 |
+
|
| 228 |
+
data = dict(
|
| 229 |
+
samples_per_gpu=batch_size,
|
| 230 |
+
workers_per_gpu=2,
|
| 231 |
+
train=dict(
|
| 232 |
+
type=dataset_type,
|
| 233 |
+
data_root=data_root,
|
| 234 |
+
ann_file=data_root + 'nuscenes2d_ego_temporal_infos_train.pkl',
|
| 235 |
+
seq_split_num=1, # streaming video training
|
| 236 |
+
seq_mode=True, # streaming video training
|
| 237 |
+
pipeline=train_pipeline,
|
| 238 |
+
classes=class_names,
|
| 239 |
+
modality=input_modality,
|
| 240 |
+
test_mode=False,
|
| 241 |
+
use_valid_flag=True,
|
| 242 |
+
filter_empty_gt=False,
|
| 243 |
+
box_type_3d='LiDAR'),
|
| 244 |
+
val=dict(
|
| 245 |
+
type=dataset_type,
|
| 246 |
+
eval_mode=['lane', 'det'],
|
| 247 |
+
pipeline=test_pipeline,
|
| 248 |
+
ann_file=data_root + 'nuscenes2d_ego_temporal_infos_val.pkl',
|
| 249 |
+
classes=class_names,
|
| 250 |
+
modality=input_modality),
|
| 251 |
+
test=dict(
|
| 252 |
+
type=dataset_type,
|
| 253 |
+
eval_mode=['lane', 'det'],
|
| 254 |
+
pipeline=test_pipeline,
|
| 255 |
+
ann_file=data_root + 'nuscenes2d_ego_temporal_infos_val.pkl',
|
| 256 |
+
classes=class_names,
|
| 257 |
+
modality=input_modality),
|
| 258 |
+
shuffler_sampler=dict(
|
| 259 |
+
type='InfiniteGroupEachSampleInBatchSampler',
|
| 260 |
+
seq_split_num=2,
|
| 261 |
+
warmup_split_num=10, # lane det and vlm need short term temporal fusion in the early stage of training
|
| 262 |
+
num_iters_to_seq=num_iters_per_epoch,
|
| 263 |
+
),
|
| 264 |
+
nonshuffler_sampler=dict(type='DistributedSampler')
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', type='AdamW',
|
| 269 |
+
lr=1e-4, betas=(0.9, 0.999), weight_decay=1e-4,
|
| 270 |
+
paramwise_cfg={'decay_rate': 0.9,
|
| 271 |
+
'head_decay_rate': 4.0,
|
| 272 |
+
'lm_head_decay_rate': 0.1,
|
| 273 |
+
'decay_type': 'vit_wise',
|
| 274 |
+
'num_layers': 24,
|
| 275 |
+
})
|
| 276 |
+
|
| 277 |
+
optimizer_config = dict(type='Fp16OptimizerHook', loss_scale='dynamic', grad_clip=dict(max_norm=35, norm_type=2))
|
| 278 |
+
# learning policy
|
| 279 |
+
lr_config = dict(
|
| 280 |
+
policy='CosineAnnealing',
|
| 281 |
+
warmup='linear',
|
| 282 |
+
warmup_iters=500,
|
| 283 |
+
warmup_ratio=1.0 / 3,
|
| 284 |
+
min_lr_ratio=1e-3,
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
evaluation = dict(interval=num_iters_per_epoch*num_epochs, pipeline=test_pipeline)
|
| 288 |
+
|
| 289 |
+
find_unused_parameters=False #### when use checkpoint, find_unused_parameters must be False
|
| 290 |
+
checkpoint_config = dict(interval=num_iters_per_epoch//2, max_keep_ckpts=3)
|
| 291 |
+
runner = dict(
|
| 292 |
+
type='IterBasedRunner', max_iters=num_epochs * num_iters_per_epoch)
|
| 293 |
+
load_from=None
|
| 294 |
+
resume_from=None
|
projects/mmdet3d_plugin/models/backbones/eva_vit.py
CHANGED
|
@@ -14,6 +14,7 @@ import torch.utils.checkpoint as cp
|
|
| 14 |
from transformers import CLIPImageProcessor
|
| 15 |
from ..utils.attention import FlashAttention, FlashMHA
|
| 16 |
from mmdet.models.builder import BACKBONES
|
|
|
|
| 17 |
|
| 18 |
logger = logging.getLogger(__name__)
|
| 19 |
BatchNorm2d = torch.nn.BatchNorm2d
|
|
@@ -956,6 +957,7 @@ class EVAViT(nn.Module):
|
|
| 956 |
# stochastic depth decay rule
|
| 957 |
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
| 958 |
|
|
|
|
| 959 |
self.blocks = nn.ModuleList()
|
| 960 |
for i in range(depth):
|
| 961 |
block = Block(
|
|
@@ -993,7 +995,8 @@ class EVAViT(nn.Module):
|
|
| 993 |
self.eval()
|
| 994 |
for m in self.parameters():
|
| 995 |
m.requires_grad = False
|
| 996 |
-
|
|
|
|
| 997 |
def forward(self, x):
|
| 998 |
# x = x.to(torch.bfloat16)
|
| 999 |
x = self.patch_embed(x)
|
|
|
|
| 14 |
from transformers import CLIPImageProcessor
|
| 15 |
from ..utils.attention import FlashAttention, FlashMHA
|
| 16 |
from mmdet.models.builder import BACKBONES
|
| 17 |
+
from mmcv.runner import BaseModule, auto_fp16
|
| 18 |
|
| 19 |
logger = logging.getLogger(__name__)
|
| 20 |
BatchNorm2d = torch.nn.BatchNorm2d
|
|
|
|
| 957 |
# stochastic depth decay rule
|
| 958 |
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
| 959 |
|
| 960 |
+
self.fp16_enabled = True
|
| 961 |
self.blocks = nn.ModuleList()
|
| 962 |
for i in range(depth):
|
| 963 |
block = Block(
|
|
|
|
| 995 |
self.eval()
|
| 996 |
for m in self.parameters():
|
| 997 |
m.requires_grad = False
|
| 998 |
+
|
| 999 |
+
@auto_fp16(out_fp32=True)
|
| 1000 |
def forward(self, x):
|
| 1001 |
# x = x.to(torch.bfloat16)
|
| 1002 |
x = self.patch_embed(x)
|