tongyujun commited on
Commit
8c6b5ee
·
verified ·
1 Parent(s): 70dea02

Upload 641 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. Dassl.pytorch/.DS_Store +0 -0
  3. Dassl.pytorch/.flake8 +24 -0
  4. Dassl.pytorch/.gitignore +139 -0
  5. Dassl.pytorch/.isort.cfg +10 -0
  6. Dassl.pytorch/.style.yapf +7 -0
  7. Dassl.pytorch/DATASETS.md +318 -0
  8. Dassl.pytorch/LICENSE +21 -0
  9. Dassl.pytorch/README.md +279 -0
  10. Dassl.pytorch/configs/README.md +1 -0
  11. Dassl.pytorch/configs/datasets/da/cifar_stl.yaml +7 -0
  12. Dassl.pytorch/configs/datasets/da/digit5.yaml +12 -0
  13. Dassl.pytorch/configs/datasets/da/domainnet.yaml +10 -0
  14. Dassl.pytorch/configs/datasets/da/mini_domainnet.yaml +10 -0
  15. Dassl.pytorch/configs/datasets/da/office31.yaml +14 -0
  16. Dassl.pytorch/configs/datasets/da/office_home.yaml +5 -0
  17. Dassl.pytorch/configs/datasets/da/visda17.yaml +13 -0
  18. Dassl.pytorch/configs/datasets/dg/camelyon17.yaml +6 -0
  19. Dassl.pytorch/configs/datasets/dg/cifar100_c.yaml +14 -0
  20. Dassl.pytorch/configs/datasets/dg/cifar10_c.yaml +14 -0
  21. Dassl.pytorch/configs/datasets/dg/digit_single.yaml +12 -0
  22. Dassl.pytorch/configs/datasets/dg/digits_dg.yaml +12 -0
  23. Dassl.pytorch/configs/datasets/dg/fmow.yaml +6 -0
  24. Dassl.pytorch/configs/datasets/dg/iwildcam.yaml +6 -0
  25. Dassl.pytorch/configs/datasets/dg/office_home_dg.yaml +11 -0
  26. Dassl.pytorch/configs/datasets/dg/pacs.yaml +11 -0
  27. Dassl.pytorch/configs/datasets/dg/vlcs.yaml +11 -0
  28. Dassl.pytorch/configs/datasets/ssl/cifar10.yaml +14 -0
  29. Dassl.pytorch/configs/datasets/ssl/cifar100.yaml +15 -0
  30. Dassl.pytorch/configs/datasets/ssl/stl10.yaml +14 -0
  31. Dassl.pytorch/configs/datasets/ssl/svhn.yaml +15 -0
  32. Dassl.pytorch/configs/trainers/da/cdac/digit5.yaml +20 -0
  33. Dassl.pytorch/configs/trainers/da/cdac/domainnet.yaml +20 -0
  34. Dassl.pytorch/configs/trainers/da/cdac/mini_domainnet.yaml +21 -0
  35. Dassl.pytorch/configs/trainers/da/dael/digit5.yaml +20 -0
  36. Dassl.pytorch/configs/trainers/da/dael/domainnet.yaml +19 -0
  37. Dassl.pytorch/configs/trainers/da/dael/mini_domainnet.yaml +19 -0
  38. Dassl.pytorch/configs/trainers/da/m3sda/digit5.yaml +16 -0
  39. Dassl.pytorch/configs/trainers/da/m3sda/domainnet.yaml +15 -0
  40. Dassl.pytorch/configs/trainers/da/m3sda/mini_domainnet.yaml +15 -0
  41. Dassl.pytorch/configs/trainers/da/source_only/digit5.yaml +12 -0
  42. Dassl.pytorch/configs/trainers/da/source_only/mini_domainnet.yaml +11 -0
  43. Dassl.pytorch/configs/trainers/da/source_only/office31.yaml +11 -0
  44. Dassl.pytorch/configs/trainers/da/source_only/visda17.yaml +15 -0
  45. Dassl.pytorch/configs/trainers/dg/daeldg/digits_dg.yaml +16 -0
  46. Dassl.pytorch/configs/trainers/dg/daeldg/office_home_dg.yaml +16 -0
  47. Dassl.pytorch/configs/trainers/dg/daeldg/pacs.yaml +16 -0
  48. Dassl.pytorch/configs/trainers/dg/ddaig/digits_dg.yaml +20 -0
  49. Dassl.pytorch/configs/trainers/dg/ddaig/office_home_dg.yaml +21 -0
  50. Dassl.pytorch/configs/trainers/dg/ddaig/pacs.yaml +21 -0
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ docs/insight.jpg filter=lfs diff=lfs merge=lfs -text
37
+ docs/insight.pdf filter=lfs diff=lfs merge=lfs -text
38
+ docs/style.jpg filter=lfs diff=lfs merge=lfs -text
39
+ docs/texture.jpg filter=lfs diff=lfs merge=lfs -text
40
+ docs/vis.jpg filter=lfs diff=lfs merge=lfs -text
41
+ docs/walking.jpg filter=lfs diff=lfs merge=lfs -text
Dassl.pytorch/.DS_Store ADDED
Binary file (6.15 kB). View file
 
Dassl.pytorch/.flake8 ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ ignore =
3
+ # At least two spaces before inline comment
4
+ E261,
5
+ # Line lengths are recommended to be no greater than 79 characters
6
+ E501,
7
+ # Missing whitespace around arithmetic operator
8
+ E226,
9
+ # Blank line contains whitespace
10
+ W293,
11
+ # Do not use bare 'except'
12
+ E722,
13
+ # Line break after binary operator
14
+ W504,
15
+ # Too many leading '#' for block comment
16
+ E266,
17
+ # Line break before binary operator
18
+ W503,
19
+ # Continuation line over-indented for hanging indent
20
+ E126,
21
+ # Module level import not at top of file
22
+ E402
23
+ max-line-length = 79
24
+ exclude = __init__.py, build
Dassl.pytorch/.gitignore ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ # OS X
132
+ .DS_Store
133
+ .Spotlight-V100
134
+ .Trashes
135
+ ._*
136
+
137
+ # This project
138
+ output/
139
+ debug/
Dassl.pytorch/.isort.cfg ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ [isort]
2
+ line_length=79
3
+ multi_line_output=6
4
+ length_sort=true
5
+ known_standard_library=numpy,setuptools
6
+ known_myself=dassl
7
+ known_third_party=matplotlib,cv2,torch,torchvision,PIL,yacs,scipy,gdown
8
+ no_lines_before=STDLIB,THIRDPARTY
9
+ sections=FUTURE,STDLIB,THIRDPARTY,myself,FIRSTPARTY,LOCALFOLDER
10
+ default_section=FIRSTPARTY
Dassl.pytorch/.style.yapf ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [style]
2
+ BASED_ON_STYLE = pep8
3
+ BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
4
+ SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
5
+ DEDENT_CLOSING_BRACKETS = true
6
+ SPACES_BEFORE_COMMENT = 2
7
+ ARITHMETIC_PRECEDENCE_INDICATION = true
Dassl.pytorch/DATASETS.md ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How to Install Datasets
2
+
3
+ `$DATA` denotes the location where datasets are installed, e.g.
4
+
5
+ ```
6
+ $DATA/
7
+ |–– office31/
8
+ |–– office_home/
9
+ |–– visda17/
10
+ ```
11
+
12
+ [Domain Adaptation](#domain-adaptation)
13
+ - [Office-31](#office-31)
14
+ - [Office-Home](#office-home)
15
+ - [VisDA17](#visda17)
16
+ - [CIFAR10-STL10](#cifar10-stl10)
17
+ - [Digit-5](#digit-5)
18
+ - [DomainNet](#domainnet)
19
+ - [miniDomainNet](#miniDomainNet)
20
+
21
+ [Domain Generalization](#domain-generalization)
22
+ - [PACS](#pacs)
23
+ - [VLCS](#vlcs)
24
+ - [Office-Home-DG](#office-home-dg)
25
+ - [Digits-DG](#digits-dg)
26
+ - [Digit-Single](#digit-single)
27
+ - [CIFAR-10-C](#cifar-10-c)
28
+ - [CIFAR-100-C](#cifar-100-c)
29
+ - [WILDS](#wilds)
30
+
31
+ [Semi-Supervised Learning](#semi-supervised-learning)
32
+ - [CIFAR10/100 and SVHN](#cifar10100-and-svhn)
33
+ - [STL10](#stl10)
34
+
35
+ ## Domain Adaptation
36
+
37
+ ### Office-31
38
+
39
+ Download link: https://people.eecs.berkeley.edu/~jhoffman/domainadapt/#datasets_code.
40
+
41
+ File structure:
42
+
43
+ ```
44
+ office31/
45
+ |–– amazon/
46
+ | |–– back_pack/
47
+ | |–– bike/
48
+ | |–– ...
49
+ |–– dslr/
50
+ | |–– back_pack/
51
+ | |–– bike/
52
+ | |–– ...
53
+ |–– webcam/
54
+ | |–– back_pack/
55
+ | |–– bike/
56
+ | |–– ...
57
+ ```
58
+
59
+ Note that within each domain folder you need to move all class folders out of the `images/` folder and then delete the `images/` folder.
60
+
61
+ ### Office-Home
62
+
63
+ Download link: http://hemanthdv.org/OfficeHome-Dataset/.
64
+
65
+ File structure:
66
+
67
+ ```
68
+ office_home/
69
+ |–– art/
70
+ |–– clipart/
71
+ |–– product/
72
+ |–– real_world/
73
+ ```
74
+
75
+ ### VisDA17
76
+
77
+ Download link: http://ai.bu.edu/visda-2017/.
78
+
79
+ The dataset can also be downloaded using our script at `datasets/da/visda17.sh`. Run the following command in your terminal under `Dassl.pytorch/datasets/da`,
80
+
81
+ ```bash
82
+ sh visda17.sh $DATA
83
+ ```
84
+
85
+ Once the download is finished, the file structure will look like
86
+
87
+ ```
88
+ visda17/
89
+ |–– train/
90
+ |–– test/
91
+ |–– validation/
92
+ ```
93
+
94
+ ### CIFAR10-STL10
95
+
96
+ Run the following command in your terminal under `Dassl.pytorch/datasets/da`,
97
+
98
+ ```bash
99
+ python cifar_stl.py $DATA/cifar_stl
100
+ ```
101
+
102
+ This will create a folder named `cifar_stl` under `$DATA`. The file structure will look like
103
+
104
+ ```
105
+ cifar_stl/
106
+ |–– cifar/
107
+ | |–– train/
108
+ | |–– test/
109
+ |–– stl/
110
+ | |–– train/
111
+ | |–– test/
112
+ ```
113
+
114
+ Note that only 9 classes shared by both datasets are kept.
115
+
116
+ ### Digit-5
117
+
118
+ Create a folder `$DATA/digit5` and download to this folder the dataset from [here](https://github.com/VisionLearningGroup/VisionLearningGroup.github.io/tree/master/M3SDA/code_MSDA_digit#digit-five-download). This should give you
119
+
120
+ ```
121
+ digit5/
122
+ |–– Digit-Five/
123
+ ```
124
+
125
+ Then, run the following command in your terminal under `Dassl.pytorch/datasets/da`,
126
+
127
+ ```bash
128
+ python digit5.py $DATA/digit5
129
+ ```
130
+
131
+ This will extract the data and organize the file structure as
132
+
133
+ ```
134
+ digit5/
135
+ |–– Digit-Five/
136
+ |–– mnist/
137
+ |–– mnist_m/
138
+ |–– usps/
139
+ |–– svhn/
140
+ |–– syn/
141
+ ```
142
+
143
+ ### DomainNet
144
+
145
+ Download link: http://ai.bu.edu/M3SDA/. (Please download the cleaned version of split files)
146
+
147
+ File structure:
148
+
149
+ ```
150
+ domainnet/
151
+ |–– clipart/
152
+ |–– infograph/
153
+ |–– painting/
154
+ |–– quickdraw/
155
+ |–– real/
156
+ |–– sketch/
157
+ |–– splits/
158
+ | |–– clipart_train.txt
159
+ | |–– clipart_test.txt
160
+ | |–– ...
161
+ ```
162
+
163
+ ### miniDomainNet
164
+
165
+ You need to download the DomainNet dataset first. The miniDomainNet's split files can be downloaded at this [google drive](https://drive.google.com/open?id=15rrLDCrzyi6ZY-1vJar3u7plgLe4COL7). After the zip file is extracted, you should have the folder `$DATA/domainnet/splits_mini/`.
166
+
167
+ ## Domain Generalization
168
+
169
+ ### PACS
170
+
171
+ Download link: [google drive](https://drive.google.com/open?id=1m4X4fROCCXMO0lRLrr6Zz9Vb3974NWhE).
172
+
173
+ File structure:
174
+
175
+ ```
176
+ pacs/
177
+ |–– images/
178
+ |–– splits/
179
+ ```
180
+
181
+ You do not necessarily have to manually download this dataset. Once you run ``tools/train.py``, the code will detect if the dataset exists or not and automatically download the dataset to ``$DATA`` if missing. This also applies to VLCS, Office-Home-DG, and Digits-DG.
182
+
183
+ ### VLCS
184
+
185
+ Download link: [google drive](https://drive.google.com/file/d/1r0WL5DDqKfSPp9E3tRENwHaXNs1olLZd/view?usp=sharing) (credit to https://github.com/fmcarlucci/JigenDG#vlcs)
186
+
187
+ File structure:
188
+
189
+ ```
190
+ VLCS/
191
+ |–– CALTECH/
192
+ |–– LABELME/
193
+ |–– PASCAL/
194
+ |–– SUN/
195
+ ```
196
+
197
+ ### Office-Home-DG
198
+
199
+ Download link: [google drive](https://drive.google.com/open?id=1gkbf_KaxoBws-GWT3XIPZ7BnkqbAxIFa).
200
+
201
+ File structure:
202
+
203
+ ```
204
+ office_home_dg/
205
+ |–– art/
206
+ |–– clipart/
207
+ |–– product/
208
+ |–– real_world/
209
+ ```
210
+
211
+ ### Digits-DG
212
+
213
+ Download link: [google driv](https://drive.google.com/open?id=15V7EsHfCcfbKgsDmzQKj_DfXt_XYp_P7).
214
+
215
+ File structure:
216
+
217
+ ```
218
+ digits_dg/
219
+ |–– mnist/
220
+ |–– mnist_m/
221
+ |–– svhn/
222
+ |–– syn/
223
+ ```
224
+
225
+ ### Digit-Single
226
+ Follow the steps for [Digit-5](#digit-5) to organize the dataset.
227
+
228
+ ### CIFAR-10-C
229
+
230
+ First download the CIFAR-10-C dataset from https://zenodo.org/record/2535967#.YFxHEWQzb0o to, e.g., $DATA, and extract the file under the same directory. Then, navigate to `Dassl.pytorch/datasets/dg` and run the following command in your terminal
231
+ ```bash
232
+ python cifar_c.py $DATA/CIFAR-10-C
233
+ ```
234
+ where the first argument denotes the path to the (uncompressed) CIFAR-10-C dataset.
235
+
236
+ The script will extract images from the `.npy` files and save them to `cifar10_c/` created under $DATA. The file structure will look like
237
+ ```
238
+ cifar10_c/
239
+ |–– brightness/
240
+ | |–– 1/ # 5 intensity levels in total
241
+ | |–– 2/
242
+ | |–– 3/
243
+ | |–– 4/
244
+ | |–– 5/
245
+ |–– ... # 19 corruption types in total
246
+ ```
247
+
248
+ Note that `cifar10_c/` only contains the test images. The training images are the normal CIFAR-10 images. See [CIFAR10/100 and SVHN](#cifar10100-and-svhn) for how to prepare the CIFAR-10 dataset.
249
+
250
+ ### CIFAR-100-C
251
+
252
+ First download the CIFAR-100-C dataset from https://zenodo.org/record/3555552#.YFxpQmQzb0o to, e.g., $DATA, and extract the file under the same directory. Then, navigate to `Dassl.pytorch/datasets/dg` and run the following command in your terminal
253
+ ```bash
254
+ python cifar_c.py $DATA/CIFAR-100-C
255
+ ```
256
+ where the first argument denotes the path to the (uncompressed) CIFAR-100-C dataset.
257
+
258
+ The script will extract images from the `.npy` files and save them to `cifar100_c/` created under $DATA. The file structure will look like
259
+ ```
260
+ cifar100_c/
261
+ |–– brightness/
262
+ | |–– 1/ # 5 intensity levels in total
263
+ | |–– 2/
264
+ | |–– 3/
265
+ | |–– 4/
266
+ | |–– 5/
267
+ |–– ... # 19 corruption types in total
268
+ ```
269
+
270
+ Note that `cifar100_c/` only contains the test images. The training images are the normal CIFAR-100 images. See [CIFAR10/100 and SVHN](#cifar10100-and-svhn) for how to prepare the CIFAR-100 dataset.
271
+
272
+ ### WILDS
273
+
274
+ No action is required to preprocess WILDS's datasets. The code will automatically download the data.
275
+
276
+ ## Semi-Supervised Learning
277
+
278
+ ### CIFAR10/100 and SVHN
279
+
280
+ Run the following command in your terminal under `Dassl.pytorch/datasets/ssl`,
281
+
282
+ ```bash
283
+ python cifar10_cifar100_svhn.py $DATA
284
+ ```
285
+
286
+ This will create three folders under `$DATA`, i.e.
287
+
288
+ ```
289
+ cifar10/
290
+ |–– train/
291
+ |–– test/
292
+ cifar100/
293
+ |–– train/
294
+ |–– test/
295
+ svhn/
296
+ |–– train/
297
+ |–– test/
298
+ ```
299
+
300
+ ### STL10
301
+
302
+ Run the following command in your terminal under `Dassl.pytorch/datasets/ssl`,
303
+
304
+ ```bash
305
+ python stl10.py $DATA/stl10
306
+ ```
307
+
308
+ This will create a folder named `stl10` under `$DATA` and extract the data into three folders, i.e. `train`, `test` and `unlabeled`. Then, download from http://ai.stanford.edu/~acoates/stl10/ the "Binary files" and extract it under `stl10`.
309
+
310
+ The file structure will look like
311
+
312
+ ```
313
+ stl10/
314
+ |–– train/
315
+ |–– test/
316
+ |–– unlabeled/
317
+ |–– stl10_binary/
318
+ ```
Dassl.pytorch/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Kaiyang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Dassl.pytorch/README.md ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dassl
2
+
3
+ ## Introduction
4
+
5
+ Dassl is a [PyTorch](https://pytorch.org) toolbox initially developed for our project [Domain Adaptive Ensemble Learning (DAEL)](https://arxiv.org/abs/2003.07325) to support research in domain adaptation and generalization---since in DAEL we study how to unify these two problems in a single learning framework. Given that domain adaptation is closely related to semi-supervised learning---both study how to exploit unlabeled data---we also incorporate components that support research for the latter.
6
+
7
+ Why the name "Dassl"? Dassl combines the initials of domain adaptation (DA) and semi-supervised learning (SSL), which sounds natural and informative.
8
+
9
+ Dassl has a modular design and unified interfaces, allowing fast prototyping and experimentation of new DA/DG/SSL methods. With Dassl, a new method can be implemented with only a few lines of code. Don't believe? Take a look at the [engine](https://github.com/KaiyangZhou/Dassl.pytorch/tree/master/dassl/engine) folder, which contains the implementations of many existing methods (then you will come back and star this repo). :-)
10
+
11
+ Basically, Dassl is perfect for doing research in the following areas:
12
+ - Domain adaptation
13
+ - Domain generalization
14
+ - Semi-supervised learning
15
+
16
+ BUT, thanks to the neat design, Dassl can also be used as a codebase to develop any deep learning projects, like [this](https://github.com/KaiyangZhou/CoOp). :-)
17
+
18
+ A drawback of Dassl is that it doesn't (yet? hmm) support distributed multi-GPU training (Dassl uses `DataParallel` to wrap a model, which is less efficient than `DistributedDataParallel`).
19
+
20
+ We don't provide detailed documentations for Dassl, unlike another [project](https://kaiyangzhou.github.io/deep-person-reid/) of ours. This is because Dassl is developed for research purpose and as a researcher, we think it's important to be able to read source code and we highly encourage you to do so---definitely not because we are lazy. :-)
21
+
22
+ ## What's new
23
+ - **[Oct 2022]** New paper "[On-Device Domain Generalization](https://arxiv.org/abs/2209.07521)" is out! Code, models and datasets: https://github.com/KaiyangZhou/on-device-dg.
24
+
25
+ <details>
26
+ <summary>More</summary>
27
+
28
+ - **[Jun 2022]** `v0.6.0`: Make `cfg.TRAINER.METHOD_NAME` consistent with the method class name.
29
+ - **[Jun 2022]** A new domain adaptation method [CDAC (CVPR'21)](https://openaccess.thecvf.com/content/CVPR2021/papers/Li_Cross-Domain_Adaptive_Clustering_for_Semi-Supervised_Domain_Adaptation_CVPR_2021_paper.pdf) is added by [Shreejal Trivedi](https://github.com/shreejalt). See [here](https://github.com/KaiyangZhou/Dassl.pytorch/pull/44) for more details.
30
+ - **[Jun 2022]** Adds three datasets from the [WILDS](https://wilds.stanford.edu/) benchmark: iWildCam, FMoW and Camelyon17. See [here](https://github.com/KaiyangZhou/Dassl.pytorch/commit/7f7eab8e22f6e176b97a539100eca12d6a403909) for more details.
31
+ - **[May 2022]** A new domain generalization method [DDG](https://arxiv.org/abs/2205.13913) developed by [Zhishu Sun](https://github.com/siaimes) and to appear at IJCAI'22 is added to this repo. See [here](https://github.com/MetaVisionLab/DDG) for more details.
32
+ - **[Mar 2022]** A new domain generalization method [EFDM](https://arxiv.org/abs/2203.07740) developed by [Yabin Zhang (PolyU)](https://ybzh.github.io/) and to appear at CVPR'22 is added to this repo. See [here](https://github.com/KaiyangZhou/Dassl.pytorch/pull/36) for more details.
33
+ - **[Feb 2022]** In case you don't know, a class in the painting domain of DomainNet (the official splits) only has test images (no training images), which could affect performance. See section 4.a in our [paper](https://arxiv.org/abs/2003.07325) for more details.
34
+ - **[Oct 2021]** `v0.5.0`: **Important changes** made to `transforms.py`. 1) `center_crop` becomes a default transform in testing (applied after resizing the smaller edge to a certain size to keep the image aspect ratio). 2) For training, `Resize(cfg.INPUT.SIZE)` is deactivated when `random_crop` or `random_resized_crop` is used. These changes won't make any difference to the training transforms used in existing config files, nor to the testing transforms unless the raw images are not squared (the only difference is that now the image aspect ratio is respected).
35
+ - **[Oct 2021]** `v0.4.3`: Copy the attributes in `self.dm` (data manager) to `SimpleTrainer` and make `self.dm` optional, which means from now on, you can build data loaders from any source you like rather than being forced to use `DataManager`.
36
+ - **[Sep 2021]** `v0.4.2`: An important update is to set `drop_last=is_train and len(data_source)>=batch_size` when constructing a data loader to avoid 0-length.
37
+
38
+ </details>
39
+
40
+ ## Overview
41
+
42
+ Dassl has implemented the following methods:
43
+
44
+ - Single-source domain adaptation
45
+ - [Cross Domain Adaptive Clustering for Semi Supervised Domain Adaptation (CVPR'21)](https://arxiv.org/pdf/2104.09415.pdf) [[dassl/engine/da/cdac.py](dassl/engine/da/cdac.py)]
46
+ - [Semi-supervised Domain Adaptation via Minimax Entropy (ICCV'19)](https://arxiv.org/abs/1904.06487) [[dassl/engine/da/mme.py](dassl/engine/da/mme.py)]
47
+ - [Maximum Classifier Discrepancy for Unsupervised Domain Adaptation (CVPR'18)](https://arxiv.org/abs/1712.02560https://arxiv.org/abs/1712.02560) [[dassl/engine/da/mcd.py](dassl/engine/da/mcd.py)]
48
+ - [Self-ensembling for visual domain adaptation (ICLR'18)](https://arxiv.org/abs/1706.05208) [[dassl/engine/da/self_ensembling.py](dassl/engine/da/self_ensembling.py)]
49
+ - [Revisiting Batch Normalization For Practical Domain Adaptation (ICLR-W'17)](https://arxiv.org/abs/1603.04779) [[dassl/engine/da/adabn.py](dassl/engine/da/adabn.py)]
50
+ - [Adversarial Discriminative Domain Adaptation (CVPR'17)](https://arxiv.org/abs/1702.05464) [[dassl/engine/da/adda.py](dassl/engine/da/adda.py)]
51
+ - [Domain-Adversarial Training of Neural Networks (JMLR'16) ](https://arxiv.org/abs/1505.07818) [[dassl/engine/da/dann.py](dassl/engine/da/dann.py)]
52
+
53
+ - Multi-source domain adaptation
54
+ - [Domain Aadaptive Ensemble Learning](https://arxiv.org/abs/2003.07325) [[dassl/engine/da/dael.py](dassl/engine/da/dael.py)]
55
+ - [Moment Matching for Multi-Source Domain Adaptation (ICCV'19)](https://arxiv.org/abs/1812.01754) [[dassl/engine/da/m3sda.py](dassl/engine/da/m3sda.py)]
56
+
57
+ - Domain generalization
58
+ - [Dynamic Domain Generalization (IJCAI'22)](https://arxiv.org/abs/2205.13913) [[dassl/modeling/backbone/resnet_dynamic.py](dassl/modeling/backbone/resnet_dynamic.py)] [[dassl/engine/dg/domain_mix.py](dassl/engine/dg/domain_mix.py)]
59
+ - [Exact Feature Distribution Matching for Arbitrary Style Transfer and Domain Generalization (CVPR'22)](https://arxiv.org/abs/2203.07740) [[dassl/modeling/ops/efdmix.py](dassl/modeling/ops/efdmix.py)]
60
+ - [Domain Generalization with MixStyle (ICLR'21)](https://openreview.net/forum?id=6xHJ37MVxxp) [[dassl/modeling/ops/mixstyle.py](dassl/modeling/ops/mixstyle.py)]
61
+ - [Deep Domain-Adversarial Image Generation for Domain Generalisation (AAAI'20)](https://arxiv.org/abs/2003.06054) [[dassl/engine/dg/ddaig.py](dassl/engine/dg/ddaig.py)]
62
+ - [Generalizing Across Domains via Cross-Gradient Training (ICLR'18)](https://arxiv.org/abs/1804.10745) [[dassl/engine/dg/crossgrad.py](dassl/engine/dg/crossgrad.py)]
63
+
64
+ - Semi-supervised learning
65
+ - [FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence](https://arxiv.org/abs/2001.07685) [[dassl/engine/ssl/fixmatch.py](dassl/engine/ssl/fixmatch.py)]
66
+ - [MixMatch: A Holistic Approach to Semi-Supervised Learning (NeurIPS'19)](https://arxiv.org/abs/1905.02249) [[dassl/engine/ssl/mixmatch.py](dassl/engine/ssl/mixmatch.py)]
67
+ - [Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results (NeurIPS'17)](https://arxiv.org/abs/1703.01780) [[dassl/engine/ssl/mean_teacher.py](dassl/engine/ssl/mean_teacher.py)]
68
+ - [Semi-supervised Learning by Entropy Minimization (NeurIPS'04)](http://papers.nips.cc/paper/2740-semi-supervised-learning-by-entropy-minimization.pdf) [[dassl/engine/ssl/entmin.py](dassl/engine/ssl/entmin.py)]
69
+
70
+ *Feel free to make a [PR](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork) to add your methods here to make it easier for others to benchmark!*
71
+
72
+ Dassl supports the following datasets:
73
+
74
+ - Domain adaptation
75
+ - [Office-31](https://scalable.mpi-inf.mpg.de/files/2013/04/saenko_eccv_2010.pdf)
76
+ - [Office-Home](http://hemanthdv.org/OfficeHome-Dataset/)
77
+ - [VisDA17](http://ai.bu.edu/visda-2017/)
78
+ - [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html)-[STL10](https://cs.stanford.edu/~acoates/stl10/)
79
+ - [Digit-5](https://github.com/VisionLearningGroup/VisionLearningGroup.github.io/tree/master/M3SDA/code_MSDA_digit#digit-five-download)
80
+ - [DomainNet](http://ai.bu.edu/M3SDA/)
81
+ - [miniDomainNet](https://arxiv.org/abs/2003.07325)
82
+
83
+ - Domain generalization
84
+ - [PACS](https://arxiv.org/abs/1710.03077)
85
+ - [VLCS](https://people.csail.mit.edu/torralba/publications/datasets_cvpr11.pdf)
86
+ - [Office-Home](http://hemanthdv.org/OfficeHome-Dataset/)
87
+ - [Digits-DG](https://arxiv.org/abs/2003.06054)
88
+ - [Digit-Single](https://arxiv.org/abs/1805.12018)
89
+ - [CIFAR-10-C](https://arxiv.org/abs/1807.01697)
90
+ - [CIFAR-100-C](https://arxiv.org/abs/1807.01697)
91
+ - [iWildCam-WILDS](https://wilds.stanford.edu/datasets/#iwildcam)
92
+ - [Camelyon17-WILDS](https://wilds.stanford.edu/datasets/#camelyon17)
93
+ - [FMoW-WILDS](https://wilds.stanford.edu/datasets/#fmow)
94
+
95
+ - Semi-supervised learning
96
+ - [CIFAR10/100](https://www.cs.toronto.edu/~kriz/cifar.html.)
97
+ - [SVHN](http://ufldl.stanford.edu/housenumbers/)
98
+ - [STL10](https://cs.stanford.edu/~acoates/stl10/)
99
+
100
+ ## Get started
101
+
102
+ ### Installation
103
+
104
+ Make sure [conda](https://www.anaconda.com/distribution/) is installed properly.
105
+
106
+ ```bash
107
+ # Clone this repo
108
+ git clone https://github.com/KaiyangZhou/Dassl.pytorch.git
109
+ cd Dassl.pytorch/
110
+
111
+ # Create a conda environment
112
+ conda create -y -n dassl python=3.8
113
+
114
+ # Activate the environment
115
+ conda activate dassl
116
+
117
+ # Install torch (requires version >= 1.8.1) and torchvision
118
+ # Please refer to https://pytorch.org/ if you need a different cuda version
119
+ conda install pytorch torchvision cudatoolkit=10.2 -c pytorch
120
+
121
+ # Install dependencies
122
+ pip install -r requirements.txt
123
+
124
+ # Install this library (no need to re-build if the source code is modified)
125
+ python setup.py develop
126
+ ```
127
+
128
+ Follow the instructions in [DATASETS.md](./DATASETS.md) to preprocess the datasets.
129
+
130
+ ### Training
131
+
132
+ The main interface is implemented in `tools/train.py`, which basically does
133
+
134
+ 1. initialize the config with `cfg = setup_cfg(args)` where `args` contains the command-line input (see `tools/train.py` for the list of input arguments);
135
+ 2. instantiate a `trainer` with `build_trainer(cfg)` which loads the dataset and builds a deep neural network model;
136
+ 3. call `trainer.train()` for training and evaluating the model.
137
+
138
+ Below we provide an example for training a source-only baseline on the popular domain adaptation dataset, Office-31,
139
+
140
+ ```bash
141
+ CUDA_VISIBLE_DEVICES=0 python tools/train.py \
142
+ --root $DATA \
143
+ --trainer SourceOnly \
144
+ --source-domains amazon \
145
+ --target-domains webcam \
146
+ --dataset-config-file configs/datasets/da/office31.yaml \
147
+ --config-file configs/trainers/da/source_only/office31.yaml \
148
+ --output-dir output/source_only_office31
149
+ ```
150
+
151
+ `$DATA` denotes the location where datasets are installed. `--dataset-config-file` loads the common setting for the dataset (Office-31 in this case) such as image size and model architecture. `--config-file` loads the algorithm-specific setting such as hyper-parameters and optimization parameters.
152
+
153
+ To use multiple sources, namely the multi-source domain adaptation task, one just needs to add more sources to `--source-domains`. For instance, to train a source-only baseline on miniDomainNet, one can do
154
+
155
+ ```bash
156
+ CUDA_VISIBLE_DEVICES=0 python tools/train.py \
157
+ --root $DATA \
158
+ --trainer SourceOnly \
159
+ --source-domains clipart painting real \
160
+ --target-domains sketch \
161
+ --dataset-config-file configs/datasets/da/mini_domainnet.yaml \
162
+ --config-file configs/trainers/da/source_only/mini_domainnet.yaml \
163
+ --output-dir output/source_only_minidn
164
+ ```
165
+
166
+ After the training finishes, the model weights will be saved under the specified output directory, along with a log file and a tensorboard file for visualization.
167
+
168
+ To print out the results saved in the log file (so you do not need to exhaustively go through all log files and calculate the mean/std by yourself), you can use `tools/parse_test_res.py`. The instruction can be found in the code.
169
+
170
+ For other trainers such as `MCD`, you can set `--trainer MCD` while keeping the config file unchanged, i.e. using the same training parameters as `SourceOnly` (in the simplest case). To modify the hyper-parameters in MCD, like `N_STEP_F` (number of steps to update the feature extractor), you can append `TRAINER.MCD.N_STEP_F 4` to the existing input arguments (otherwise the default value will be used). Alternatively, you can create a new `.yaml` config file to store your custom setting. See [here](https://github.com/KaiyangZhou/Dassl.pytorch/blob/master/dassl/config/defaults.py#L176) for a complete list of algorithm-specific hyper-parameters.
171
+
172
+ ### Test
173
+ Model testing can be done by using `--eval-only`, which asks the code to run `trainer.test()`. You also need to provide the trained model and specify which model file (i.e. saved at which epoch) to use. For example, to use `model.pth.tar-20` saved at `output/source_only_office31/model`, you can do
174
+
175
+ ```bash
176
+ CUDA_VISIBLE_DEVICES=0 python tools/train.py \
177
+ --root $DATA \
178
+ --trainer SourceOnly \
179
+ --source-domains amazon \
180
+ --target-domains webcam \
181
+ --dataset-config-file configs/datasets/da/office31.yaml \
182
+ --config-file configs/trainers/da/source_only/office31.yaml \
183
+ --output-dir output/source_only_office31_test \
184
+ --eval-only \
185
+ --model-dir output/source_only_office31 \
186
+ --load-epoch 20
187
+ ```
188
+
189
+ Note that `--model-dir` takes as input the directory path which was specified in `--output-dir` in the training stage.
190
+
191
+ ### Write a new trainer
192
+ A good practice is to go through `dassl/engine/trainer.py` to get familar with the base trainer classes, which provide generic functions and training loops. To write a trainer class for domain adaptation or semi-supervised learning, the new class can subclass `TrainerXU`. For domain generalization, the new class can subclass `TrainerX`. In particular, `TrainerXU` and `TrainerX` mainly differ in whether using a data loader for unlabeled data. With the base classes, a new trainer may only need to implement the `forward_backward()` method, which performs loss computation and model update. See `dassl/enigne/da/source_only.py` for example.
193
+
194
+ ### Add a new backbone/head/network
195
+ `backbone` corresponds to a convolutional neural network model which performs feature extraction. `head` (which is an optional module) is mounted on top of `backbone` for further processing, which can be, for example, a MLP. `backbone` and `head` are basic building blocks for constructing a `SimpleNet()` (see `dassl/engine/trainer.py`) which serves as the primary model for a task. `network` contains custom neural network models, such as an image generator.
196
+
197
+ To add a new module, namely a backbone/head/network, you need to first register the module using the corresponding `registry`, i.e. `BACKBONE_REGISTRY` for `backbone`, `HEAD_REGISTRY` for `head` and `NETWORK_RESIGTRY` for `network`. Note that for a new `backbone`, we require the model to subclass `Backbone` as defined in `dassl/modeling/backbone/backbone.py` and specify the `self._out_features` attribute.
198
+
199
+ We provide an example below for how to add a new `backbone`.
200
+ ```python
201
+ from dassl.modeling import Backbone, BACKBONE_REGISTRY
202
+
203
+ class MyBackbone(Backbone):
204
+
205
+ def __init__(self):
206
+ super().__init__()
207
+ # Create layers
208
+ self.conv = ...
209
+
210
+ self._out_features = 2048
211
+
212
+ def forward(self, x):
213
+ # Extract and return features
214
+
215
+ @BACKBONE_REGISTRY.register()
216
+ def my_backbone(**kwargs):
217
+ return MyBackbone()
218
+ ```
219
+ Then, you can set `MODEL.BACKBONE.NAME` to `my_backbone` to use your own architecture. For more details, please refer to the source code in `dassl/modeling`.
220
+
221
+ ### Add a dataset
222
+ An example code structure is shown below. Make sure you subclass `DatasetBase` and register the dataset with `@DATASET_REGISTRY.register()`. All you need is to load `train_x`, `train_u` (optional), `val` (optional) and `test`, among which `train_u` and `val` could be `None` or simply ignored. Each of these variables contains a list of `Datum` objects. A `Datum` object (implemented [here](https://github.com/KaiyangZhou/Dassl.pytorch/blob/master/dassl/data/datasets/base_dataset.py#L12)) contains information for a single image, like `impath` (string) and `label` (int).
223
+
224
+ ```python
225
+ from dassl.data.datasets import DATASET_REGISTRY, Datum, DatasetBase
226
+
227
+ @DATASET_REGISTRY.register()
228
+ class NewDataset(DatasetBase):
229
+
230
+ dataset_dir = ''
231
+
232
+ def __init__(self, cfg):
233
+
234
+ train_x = ...
235
+ train_u = ... # optional, can be None
236
+ val = ... # optional, can be None
237
+ test = ...
238
+
239
+ super().__init__(train_x=train_x, train_u=train_u, val=val, test=test)
240
+ ```
241
+
242
+ We suggest you take a look at the datasets code in some projects like [this](https://github.com/KaiyangZhou/CoOp), which is built on top of Dassl.
243
+
244
+ ## Relevant Research
245
+
246
+ We would like to share here our research relevant to Dassl.
247
+
248
+ - [On-Device Domain Generalization](https://arxiv.org/abs/2209.07521)
249
+ - [Domain Generalization: A Survey](https://arxiv.org/abs/2103.02503) (TPAMI 2022)
250
+ - [Domain Adaptive Ensemble Learning](https://arxiv.org/abs/2003.07325) (TIP 2021)
251
+ - [MixStyle Neural Networks for Domain Generalization and Adaptation](https://arxiv.org/abs/2107.02053)
252
+ - [Semi-Supervised Domain Generalization with Stochastic StyleMatch](https://arxiv.org/abs/2106.00592)
253
+ - [Domain Generalization with MixStyle](https://openreview.net/forum?id=6xHJ37MVxxp) (ICLR 2021)
254
+ - [Learning to Generate Novel Domains for Domain Generalization](https://arxiv.org/abs/2007.03304) (ECCV 2020)
255
+ - [Deep Domain-Adversarial Image Generation for Domain Generalisation](https://arxiv.org/abs/2003.06054) (AAAI 2020)
256
+
257
+ ## Citation
258
+
259
+ If you find this code useful to your research, please give credit to the following paper
260
+
261
+ ```
262
+ @article{zhou2022domain,
263
+ title={Domain generalization: A survey},
264
+ author={Zhou, Kaiyang and Liu, Ziwei and Qiao, Yu and Xiang, Tao and Loy, Chen Change},
265
+ journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
266
+ year={2022},
267
+ publisher={IEEE}
268
+ }
269
+
270
+ @article{zhou2021domain,
271
+ title={Domain adaptive ensemble learning},
272
+ author={Zhou, Kaiyang and Yang, Yongxin and Qiao, Yu and Xiang, Tao},
273
+ journal={IEEE Transactions on Image Processing},
274
+ volume={30},
275
+ pages={8008--8018},
276
+ year={2021},
277
+ publisher={IEEE}
278
+ }
279
+ ```
Dassl.pytorch/configs/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ The `datasets/` folder contains dataset-specific config files which define the standard protocols (e.g., image size, data augmentation, network architecture) used by most papers. The `trainers/` folder contains method-specific config files which define optimization algorithms (e.g., optimizer, epoch) and hyperparameter settings.
Dassl.pytorch/configs/datasets/da/cifar_stl.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
4
+ PIXEL_STD: [0.5, 0.5, 0.5]
5
+
6
+ DATASET:
7
+ NAME: "CIFARSTL"
Dassl.pytorch/configs/datasets/da/digit5.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
4
+ PIXEL_STD: [0.5, 0.5, 0.5]
5
+ TRANSFORMS: ["normalize"]
6
+
7
+ DATASET:
8
+ NAME: "Digit5"
9
+
10
+ MODEL:
11
+ BACKBONE:
12
+ NAME: "cnn_digit5_m3sda"
Dassl.pytorch/configs/datasets/da/domainnet.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_flip", "random_translation", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "DomainNet"
7
+
8
+ MODEL:
9
+ BACKBONE:
10
+ NAME: "resnet101"
Dassl.pytorch/configs/datasets/da/mini_domainnet.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (96, 96)
3
+ TRANSFORMS: ["random_flip", "random_translation", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "miniDomainNet"
7
+
8
+ MODEL:
9
+ BACKBONE:
10
+ NAME: "resnet18"
Dassl.pytorch/configs/datasets/da/office31.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_flip", "random_translation", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "Office31"
7
+
8
+ MODEL:
9
+ BACKBONE:
10
+ NAME: "resnet50"
11
+ HEAD:
12
+ NAME: "mlp"
13
+ HIDDEN_LAYERS: [256]
14
+ DROPOUT: 0.
Dassl.pytorch/configs/datasets/da/office_home.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+
4
+ DATASET:
5
+ NAME: "OfficeHome"
Dassl.pytorch/configs/datasets/da/visda17.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_flip", "center_crop", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "VisDA17"
7
+
8
+ MODEL:
9
+ BACKBONE:
10
+ NAME: "resnet101"
11
+
12
+ TEST:
13
+ PER_CLASS_RESULT: True
Dassl.pytorch/configs/datasets/dg/camelyon17.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_resized_crop", "random_flip", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "Camelyon17"
Dassl.pytorch/configs/datasets/dg/cifar100_c.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ TRANSFORMS: ["random_flip", "random_crop", "normalize"]
4
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
5
+ PIXEL_STD: [0.5, 0.5, 0.5]
6
+
7
+ DATASET:
8
+ NAME: "CIFAR100C"
9
+ CIFAR_C_TYPE: "fog"
10
+ CIFAR_C_LEVEL: 5
11
+
12
+ MODEL:
13
+ BACKBONE:
14
+ NAME: "wide_resnet_16_4"
Dassl.pytorch/configs/datasets/dg/cifar10_c.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ TRANSFORMS: ["random_flip", "random_crop", "normalize"]
4
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
5
+ PIXEL_STD: [0.5, 0.5, 0.5]
6
+
7
+ DATASET:
8
+ NAME: "CIFAR10C"
9
+ CIFAR_C_TYPE: "fog"
10
+ CIFAR_C_LEVEL: 5
11
+
12
+ MODEL:
13
+ BACKBONE:
14
+ NAME: "wide_resnet_16_4"
Dassl.pytorch/configs/datasets/dg/digit_single.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ TRANSFORMS: ["normalize"]
4
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
5
+ PIXEL_STD: [0.5, 0.5, 0.5]
6
+
7
+ DATASET:
8
+ NAME: "DigitSingle"
9
+
10
+ MODEL:
11
+ BACKBONE:
12
+ NAME: "cnn_digitsingle"
Dassl.pytorch/configs/datasets/dg/digits_dg.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ TRANSFORMS: ["normalize"]
4
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
5
+ PIXEL_STD: [0.5, 0.5, 0.5]
6
+
7
+ DATASET:
8
+ NAME: "DigitsDG"
9
+
10
+ MODEL:
11
+ BACKBONE:
12
+ NAME: "cnn_digitsdg"
Dassl.pytorch/configs/datasets/dg/fmow.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_resized_crop", "random_flip", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "FMoW"
Dassl.pytorch/configs/datasets/dg/iwildcam.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_resized_crop", "random_flip", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "IWildCam"
Dassl.pytorch/configs/datasets/dg/office_home_dg.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_flip", "random_translation", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "OfficeHomeDG"
7
+
8
+ MODEL:
9
+ BACKBONE:
10
+ NAME: "resnet18"
11
+ PRETRAINED: True
Dassl.pytorch/configs/datasets/dg/pacs.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_flip", "random_translation", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "PACS"
7
+
8
+ MODEL:
9
+ BACKBONE:
10
+ NAME: "resnet18"
11
+ PRETRAINED: True
Dassl.pytorch/configs/datasets/dg/vlcs.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (224, 224)
3
+ TRANSFORMS: ["random_flip", "random_translation", "normalize"]
4
+
5
+ DATASET:
6
+ NAME: "VLCS"
7
+
8
+ MODEL:
9
+ BACKBONE:
10
+ NAME: "resnet18"
11
+ PRETRAINED: True
Dassl.pytorch/configs/datasets/ssl/cifar10.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ TRANSFORMS: ["random_flip", "random_crop", "normalize"]
4
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
5
+ PIXEL_STD: [0.5, 0.5, 0.5]
6
+
7
+ DATASET:
8
+ NAME: "CIFAR10"
9
+ NUM_LABELED: 4000
10
+ VAL_PERCENT: 0.
11
+
12
+ MODEL:
13
+ BACKBONE:
14
+ NAME: "wide_resnet_28_2"
Dassl.pytorch/configs/datasets/ssl/cifar100.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ TRANSFORMS: ["random_flip", "random_crop", "normalize"]
4
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
5
+ PIXEL_STD: [0.5, 0.5, 0.5]
6
+ CROP_PADDING: 4
7
+
8
+ DATASET:
9
+ NAME: "CIFAR100"
10
+ NUM_LABELED: 10000
11
+ VAL_PERCENT: 0.
12
+
13
+ MODEL:
14
+ BACKBONE:
15
+ NAME: "wide_resnet_28_2"
Dassl.pytorch/configs/datasets/ssl/stl10.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (96, 96)
3
+ TRANSFORMS: ["random_flip", "random_crop", "normalize"]
4
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
5
+ PIXEL_STD: [0.5, 0.5, 0.5]
6
+ CROP_PADDING: 4
7
+
8
+ DATASET:
9
+ NAME: "STL10"
10
+ STL10_FOLD: 0
11
+
12
+ MODEL:
13
+ BACKBONE:
14
+ NAME: "wide_resnet_28_2"
Dassl.pytorch/configs/datasets/ssl/svhn.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ SIZE: (32, 32)
3
+ TRANSFORMS: ["random_crop", "normalize"]
4
+ PIXEL_MEAN: [0.5, 0.5, 0.5]
5
+ PIXEL_STD: [0.5, 0.5, 0.5]
6
+ CROP_PADDING: 4
7
+
8
+ DATASET:
9
+ NAME: "SVHN"
10
+ NUM_LABELED: 1000
11
+ VAL_PERCENT: 0.
12
+
13
+ MODEL:
14
+ BACKBONE:
15
+ NAME: "wide_resnet_28_2"
Dassl.pytorch/configs/trainers/da/cdac/digit5.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomSampler"
4
+ BATCH_SIZE: 64
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 192
8
+ TEST:
9
+ BATCH_SIZE: 256
10
+ K_TRANSFORMS: 2
11
+
12
+ OPTIM:
13
+ NAME: "sgd"
14
+ LR: 0.001
15
+ MAX_EPOCH: 90
16
+ RAMPUP_ITRS: 10000
17
+
18
+ TRAINER:
19
+ CDAC:
20
+ STRONG_TRANSFORMS: ["randaugment", "normalize"]
Dassl.pytorch/configs/trainers/da/cdac/domainnet.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 30
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 6
8
+ TEST:
9
+ BATCH_SIZE: 30
10
+ K_TRANSFORMS: 2
11
+
12
+ OPTIM:
13
+ NAME: "sgd"
14
+ LR: 0.001
15
+ MAX_EPOCH: 90
16
+ RAMPUP_ITRS: 10000
17
+
18
+ TRAINER:
19
+ CDAC:
20
+ STRONG_TRANSFORMS: ["randaugment", "normalize"]
Dassl.pytorch/configs/trainers/da/cdac/mini_domainnet.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 64
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 192
8
+ TEST:
9
+ BATCH_SIZE: 200
10
+ K_TRANSFORMS: 2
11
+
12
+ OPTIM:
13
+ NAME: "sgd"
14
+ LR: 0.001
15
+ MAX_EPOCH: 60
16
+ RAMPUP_ITRS: 10000
17
+ LR_SCHEDULER: "cosine"
18
+
19
+ TRAINER:
20
+ CDAC:
21
+ STRONG_TRANSFORMS: ["randaugment", "normalize"]
Dassl.pytorch/configs/trainers/da/dael/digit5.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 256
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 64
8
+ TEST:
9
+ BATCH_SIZE: 256
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.05
14
+ STEPSIZE: [30]
15
+ MAX_EPOCH: 30
16
+ LR_SCHEDULER: "cosine"
17
+
18
+ TRAINER:
19
+ DAEL:
20
+ STRONG_TRANSFORMS: ["randaugment2", "normalize"]
Dassl.pytorch/configs/trainers/da/dael/domainnet.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 30
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 6
8
+ TEST:
9
+ BATCH_SIZE: 30
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.002
14
+ MAX_EPOCH: 40
15
+ LR_SCHEDULER: "cosine"
16
+
17
+ TRAINER:
18
+ DAEL:
19
+ STRONG_TRANSFORMS: ["random_flip", "cutout", "randaugment2", "normalize"]
Dassl.pytorch/configs/trainers/da/dael/mini_domainnet.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 192
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 64
8
+ TEST:
9
+ BATCH_SIZE: 200
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.005
14
+ MAX_EPOCH: 60
15
+ LR_SCHEDULER: "cosine"
16
+
17
+ TRAINER:
18
+ DAEL:
19
+ STRONG_TRANSFORMS: ["random_flip", "cutout", "randaugment2", "normalize"]
Dassl.pytorch/configs/trainers/da/m3sda/digit5.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 256
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 64
8
+ TEST:
9
+ BATCH_SIZE: 256
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.05
14
+ STEPSIZE: [30]
15
+ MAX_EPOCH: 30
16
+ LR_SCHEDULER: "cosine"
Dassl.pytorch/configs/trainers/da/m3sda/domainnet.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 30
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 6
8
+ TEST:
9
+ BATCH_SIZE: 30
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.002
14
+ MAX_EPOCH: 40
15
+ LR_SCHEDULER: "cosine"
Dassl.pytorch/configs/trainers/da/m3sda/mini_domainnet.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 192
5
+ TRAIN_U:
6
+ SAME_AS_X: False
7
+ BATCH_SIZE: 64
8
+ TEST:
9
+ BATCH_SIZE: 200
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.005
14
+ MAX_EPOCH: 60
15
+ LR_SCHEDULER: "cosine"
Dassl.pytorch/configs/trainers/da/source_only/digit5.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ BATCH_SIZE: 256
4
+ TEST:
5
+ BATCH_SIZE: 256
6
+
7
+ OPTIM:
8
+ NAME: "sgd"
9
+ LR: 0.05
10
+ STEPSIZE: [30]
11
+ MAX_EPOCH: 30
12
+ LR_SCHEDULER: "cosine"
Dassl.pytorch/configs/trainers/da/source_only/mini_domainnet.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ BATCH_SIZE: 128
4
+ TEST:
5
+ BATCH_SIZE: 128
6
+
7
+ OPTIM:
8
+ NAME: "sgd"
9
+ LR: 0.005
10
+ MAX_EPOCH: 60
11
+ LR_SCHEDULER: "cosine"
Dassl.pytorch/configs/trainers/da/source_only/office31.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ BATCH_SIZE: 32
4
+ TEST:
5
+ BATCH_SIZE: 32
6
+
7
+ OPTIM:
8
+ NAME: "sgd"
9
+ LR: 0.002
10
+ STEPSIZE: [20]
11
+ MAX_EPOCH: 20
Dassl.pytorch/configs/trainers/da/source_only/visda17.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ BATCH_SIZE: 32
4
+ TEST:
5
+ BATCH_SIZE: 32
6
+
7
+ OPTIM:
8
+ NAME: "sgd"
9
+ LR: 0.0001
10
+ STEPSIZE: [2]
11
+ MAX_EPOCH: 2
12
+
13
+ TRAIN:
14
+ PRINT_FREQ: 50
15
+ COUNT_ITER: "train_u"
Dassl.pytorch/configs/trainers/dg/daeldg/digits_dg.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 120
5
+ TEST:
6
+ BATCH_SIZE: 100
7
+
8
+ OPTIM:
9
+ NAME: "sgd"
10
+ LR: 0.05
11
+ STEPSIZE: [20]
12
+ MAX_EPOCH: 50
13
+
14
+ TRAINER:
15
+ DAELDG:
16
+ STRONG_TRANSFORMS: ["randaugment2", "normalize"]
Dassl.pytorch/configs/trainers/dg/daeldg/office_home_dg.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 30
5
+ TEST:
6
+ BATCH_SIZE: 100
7
+
8
+ OPTIM:
9
+ NAME: "sgd"
10
+ LR: 0.002
11
+ MAX_EPOCH: 40
12
+ LR_SCHEDULER: "cosine"
13
+
14
+ TRAINER:
15
+ DAELDG:
16
+ STRONG_TRANSFORMS: ["random_flip", "cutout", "randaugment2", "normalize"]
Dassl.pytorch/configs/trainers/dg/daeldg/pacs.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATALOADER:
2
+ TRAIN_X:
3
+ SAMPLER: "RandomDomainSampler"
4
+ BATCH_SIZE: 30
5
+ TEST:
6
+ BATCH_SIZE: 100
7
+
8
+ OPTIM:
9
+ NAME: "sgd"
10
+ LR: 0.002
11
+ MAX_EPOCH: 40
12
+ LR_SCHEDULER: "cosine"
13
+
14
+ TRAINER:
15
+ DAELDG:
16
+ STRONG_TRANSFORMS: ["random_flip", "cutout", "randaugment2", "normalize"]
Dassl.pytorch/configs/trainers/dg/ddaig/digits_dg.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ PIXEL_MEAN: [0., 0., 0.]
3
+ PIXEL_STD: [1., 1., 1.]
4
+
5
+ DATALOADER:
6
+ TRAIN_X:
7
+ BATCH_SIZE: 128
8
+ TEST:
9
+ BATCH_SIZE: 128
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.05
14
+ STEPSIZE: [20]
15
+ MAX_EPOCH: 50
16
+
17
+ TRAINER:
18
+ DDAIG:
19
+ G_ARCH: "fcn_3x32_gctx"
20
+ LMDA: 0.3
Dassl.pytorch/configs/trainers/dg/ddaig/office_home_dg.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ PIXEL_MEAN: [0., 0., 0.]
3
+ PIXEL_STD: [1., 1., 1.]
4
+
5
+ DATALOADER:
6
+ TRAIN_X:
7
+ BATCH_SIZE: 16
8
+ TEST:
9
+ BATCH_SIZE: 16
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.0005
14
+ STEPSIZE: [20]
15
+ MAX_EPOCH: 25
16
+
17
+ TRAINER:
18
+ DDAIG:
19
+ G_ARCH: "fcn_3x64_gctx"
20
+ WARMUP: 3
21
+ LMDA: 0.3
Dassl.pytorch/configs/trainers/dg/ddaig/pacs.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INPUT:
2
+ PIXEL_MEAN: [0., 0., 0.]
3
+ PIXEL_STD: [1., 1., 1.]
4
+
5
+ DATALOADER:
6
+ TRAIN_X:
7
+ BATCH_SIZE: 16
8
+ TEST:
9
+ BATCH_SIZE: 16
10
+
11
+ OPTIM:
12
+ NAME: "sgd"
13
+ LR: 0.0005
14
+ STEPSIZE: [20]
15
+ MAX_EPOCH: 25
16
+
17
+ TRAINER:
18
+ DDAIG:
19
+ G_ARCH: "fcn_3x64_gctx"
20
+ WARMUP: 3
21
+ LMDA: 0.3