-
Notifications
You must be signed in to change notification settings - Fork 272
Expand file tree
/
Copy pathconfig.py
More file actions
222 lines (194 loc) · 11.1 KB
/
config.py
File metadata and controls
222 lines (194 loc) · 11.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
import os
import math
class Config():
def __init__(self) -> None:
# Main active settings
self.batch_size = 8 # Multi-GPU+BF16 training for 76GB / 62GB, without/with compile, on each A100.
self.compile = True # 1. PyTorch<=2.0.1 has an inherent CPU memory leak problem; 2.0.1<PyTorch<2.5.0 cannot successfully compile.
self.mixed_precision = ['no', 'fp16', 'bf16', 'fp8'][2] # 2. FP8 doesn't show acceleration in the torch.compile mode.
self.SDPA_enabled = True # H200x1 + compile==True. None: 43GB + 14s, math: 43GB + 15s, mem_eff: 35GB + 15s.
# H200x1 + compile==False. None: 54GB + 25s, math: 51GB + 26s, mem_eff: 40GB + 25s.
# PATH settings
# Make up your file system as: SYS_HOME_DIR/codes/dis/BiRefNet, SYS_HOME_DIR/datasets/dis/xx, SYS_HOME_DIR/weights/xx
self.sys_home_dir = [os.path.expanduser('~'), '/workspace'][1] # Default, custom
self.data_root_dir = os.path.join(self.sys_home_dir, 'datasets/dis')
# TASK settings
self.task = ['DIS5K', 'COD', 'HRSOD', 'General', 'General-2K', 'Matting'][0]
self.testsets = {
# Benchmarks
'DIS5K': ','.join(['DIS-VD', 'DIS-TE1', 'DIS-TE2', 'DIS-TE3', 'DIS-TE4'][:1]),
'COD': ','.join(['CHAMELEON', 'NC4K', 'TE-CAMO', 'TE-COD10K']),
'HRSOD': ','.join(['DAVIS-S', 'TE-HRSOD', 'TE-UHRSD', 'DUT-OMRON', 'TE-DUTS']),
# Practical use
'General': ','.join(['DIS-VD', 'TE-P3M-500-NP']),
'General-2K': ','.join(['DIS-VD', 'TE-P3M-500-NP']),
'Matting': ','.join(['TE-P3M-500-NP', 'TE-AM-2k']),
}[self.task]
datasets_all = '+'.join([ds for ds in (os.listdir(os.path.join(self.data_root_dir, self.task)) if os.path.isdir(os.path.join(self.data_root_dir, self.task)) else []) if ds not in self.testsets.split(',')])
self.training_set = {
'DIS5K': ['DIS-TR', 'DIS-TR+DIS-TE1+DIS-TE2+DIS-TE3+DIS-TE4'][0],
'COD': 'TR-COD10K+TR-CAMO',
'HRSOD': ['TR-DUTS', 'TR-HRSOD', 'TR-UHRSD', 'TR-DUTS+TR-HRSOD', 'TR-DUTS+TR-UHRSD', 'TR-HRSOD+TR-UHRSD', 'TR-DUTS+TR-HRSOD+TR-UHRSD'][5],
'General': datasets_all,
'General-2K': datasets_all,
'Matting': datasets_all,
}[self.task]
# Data settings
self.size = (1024, 1024) if self.task not in ['General-2K'] else (2560, 1440) # wid, hei. Can be overwritten by dynamic_size in training.
self.dynamic_size = [None, ((512-256, 2048+256), (512-256, 2048+256))][0] # wid, hei. It might cause errors in using compile.
self.background_color_synthesis = False # whether to use pure bg color to replace the original backgrounds.
# Faster-Training settings
self.precisionHigh = True
self.load_all = False and self.dynamic_size is None # Turn it on/off by your case. It may consume a lot of CPU memory. And for multi-GPU (N), it would cost N times the CPU memory to load the data.
# Machines with > 70GB CPU memory can run the whole training on DIS5K with default setting.
# 2. Higher PyTorch version may fix it: https://github.com/pytorch/pytorch/issues/119607.
# 3. But compile in 2.0.1 < Pytorch < 2.5.0 seems to bring no acceleration for training.
# MODEL settings
self.ms_supervision = True
self.out_ref = self.ms_supervision and True
self.dec_ipt = True
self.dec_ipt_split = True
self.cxt_num = [0, 3][1] # multi-scale skip connections from encoder
self.mul_scl_ipt = ['', 'add', 'cat'][2]
self.dec_att = ['', 'ASPP', 'ASPPDeformable'][2]
self.squeeze_block = ['', 'BasicDecBlk_x1', 'ResBlk_x4', 'ASPP_x3', 'ASPPDeformable_x3'][1]
self.dec_blk = ['BasicDecBlk', 'ResBlk'][0]
# TRAINING settings
self.finetune_last_epochs = [
0,
{
'DIS5K': -40,
'COD': -20,
'HRSOD': -20,
'General': -20,
'General-2K': -20,
'Matting': -10,
}[self.task]
][1] # choose 0 to skip
self.lr = (1e-4 if 'DIS5K' in self.task else 1e-5) * math.sqrt(self.batch_size / 4) # DIS needs high lr to converge faster. Adapt the lr linearly
self.num_workers = max(4, self.batch_size) # will be decreased to min(it, batch_size) at the initialization of the data_loader
# Backbone settings
self.bb = [
'vgg16', 'vgg16bn', 'resnet50',
'swin_v1_l', 'swin_v1_b',
'swin_v1_s', 'swin_v1_t',
'pvt_v2_b5', 'pvt_v2_b2',
'pvt_v2_b1', 'pvt_v2_b0',
'dino_v3_7b', 'dino_v3_h_plus', 'dino_v3_l',
'dino_v3_b', 'dino_v3_s_plus', 'dino_v3_s',
][3]
self.freeze_bb = 'dino_v3' in self.bb
self.lateral_channels_in_collection = {
'vgg16': [512, 512, 256, 128], 'vgg16bn': [512, 512, 256, 128], 'resnet50': [2048, 1024, 512, 256],
'dino_v3_7b': [4096] * 4, 'dino_v3_h_plus': [1280] * 4, 'dino_v3_l': [1024] * 4,
'dino_v3_b': [768] * 4, 'dino_v3_s_plus': [384] * 4, 'dino_v3_s': [384] * 4,
'swin_v1_l': [1536, 768, 384, 192], 'swin_v1_b': [1024, 512, 256, 128],
'swin_v1_s': [768, 384, 192, 96], 'swin_v1_t': [768, 384, 192, 96],
'pvt_v2_b5': [512, 320, 128, 64], 'pvt_v2_b2': [512, 320, 128, 64],
'pvt_v2_b1': [512, 320, 128, 64], 'pvt_v2_b0': [256, 160, 64, 32],
}[self.bb]
if self.mul_scl_ipt == 'cat':
self.lateral_channels_in_collection = [channel * 2 for channel in self.lateral_channels_in_collection]
self.cxt = self.lateral_channels_in_collection[1:][::-1][-self.cxt_num:] if self.cxt_num else []
# MODEL settings - inactive
self.lat_blk = ['BasicLatBlk'][0]
self.dec_channels_inter = ['fixed', 'adap'][0]
self.auxiliary_classification = False # Only for DIS5K, where class labels are saved in `dataset.py`.
self.model = [
'BiRefNet',
][0]
# TRAINING settings - inactive
self.preproc_methods = ['flip', 'enhance', 'rotate', 'pepper', 'crop'][:4 if not self.background_color_synthesis else 1]
self.optimizer = ['Adam', 'AdamW'][1]
self.lr_decay_epochs = [1e5] # Set to negative N to decay the lr in the last N-th epoch.
self.lr_decay_rate = 0.5
# Loss
if self.task in ['Matting']:
self.lambdas_pix_last = {
'bce': 30 * 1,
'iou': 0.5 * 0,
'iou_patch': 0.5 * 0,
'mae': 100 * 1,
'mse': 30 * 0,
'triplet': 3 * 0,
'reg': 100 * 0,
'ssim': 10 * 1,
'cnt': 5 * 0,
'structure': 5 * 0,
}
elif self.task in ['General', 'General-2K']:
self.lambdas_pix_last = {
'bce': 30 * 1,
'iou': 0.5 * 1,
'iou_patch': 0.5 * 0,
'mae': 100 * 1,
'mse': 30 * 0,
'triplet': 3 * 0,
'reg': 100 * 0,
'ssim': 10 * 1,
'cnt': 5 * 0,
'structure': 5 * 0,
}
else:
self.lambdas_pix_last = {
# not 0 means opening this loss
# original rate -- 1 : 30 : 1.5 : 0.2, bce x 30
'bce': 30 * 1, # high performance
'iou': 0.5 * 1, # 0 / 255
'iou_patch': 0.5 * 0, # 0 / 255, win_size = (64, 64)
'mae': 30 * 0,
'mse': 30 * 0, # can smooth the saliency map
'triplet': 3 * 0,
'reg': 100 * 0,
'ssim': 10 * 1, # help contours,
'cnt': 5 * 0, # help contours
'structure': 5 * 0, # structure loss from codes of MVANet. A little improvement on DIS-TE[1,2,3], a bit more decrease on DIS-TE4.
}
self.lambdas_cls = {
'ce': 5.0
}
# PATH settings - inactive
self.weights_root_dir = os.path.join(self.sys_home_dir, 'weights/cv')
model_name_to_weights_file = {
'dino_v3_7b': 'vit_7b_patch16_dinov3.lvd1689m.pth', 'dino_v3_h_plus': 'vit_huge_plus_patch16_dinov3.lvd1689m.pth',
'dino_v3_l': 'vit_large_patch16_dinov3.lvd1689m.pth', 'dino_v3_b': 'vit_base_patch16_dinov3.lvd1689m.pth',
'dino_v3_s_plus': 'vit_small_plus_patch16_dinov3.lvd1689m.pth', 'dino_v3_s': 'vit_small_patch16_dinov3.lvd1689m.pth',
'swin_v1_l': 'swin_large_patch4_window12_384_22kto1k.pth', 'swin_v1_b': 'swin_base_patch4_window12_384_22kto1k.pth',
'swin_v1_t': 'swin_tiny_patch4_window7_224_22kto1k_finetune.pth', 'swin_v1_s': 'swin_small_patch4_window7_224_22kto1k_finetune.pth',
'pvt_v2_b5': 'pvt_v2_b5.pth', 'pvt_v2_b2': 'pvt_v2_b2.pth', 'pvt_v2_b1': 'pvt_v2_b1.pth', 'pvt_v2_b0': 'pvt_v2_b0.pth',
}
self.weights = {}
for model_name, weights_file in model_name_to_weights_file.items():
if 'dino_v3' in model_name:
model_name_dir = 'DINOv3-timm'
elif 'swin_v1' in model_name:
model_name_dir = ''
elif 'pvt_v2' in model_name:
model_name_dir = ''
else:
model_name_dir = ''
self.weights[model_name] = os.path.join(self.weights_root_dir, model_name_dir, weights_file)
# Callbacks - inactive
self.verbose_eval = True
self.only_S_MAE = False
# others
self.device = [0, 'cpu'][0] # .to(0) == .to('cuda:0')
self.batch_size_valid = 1
self.rand_seed = 7
run_sh_file = [f for f in os.listdir('.') if 'train.sh' == f] + [os.path.join('..', f) for f in os.listdir('..') if 'train.sh' == f]
if run_sh_file:
with open(run_sh_file[0], 'r') as f:
lines = f.readlines()
self.save_last = int([l.strip() for l in lines if "'{}')".format(self.task) in l and 'val_last=' in l][0].split('val_last=')[-1].split()[0])
self.save_step = int([l.strip() for l in lines if "'{}')".format(self.task) in l and 'step=' in l][0].split('step=')[-1].split()[0])
# Return task for choosing settings in shell scripts.
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Only choose one argument to activate.')
parser.add_argument('--print_task', action='store_true', help='print task name')
parser.add_argument('--print_testsets', action='store_true', help='print validation set')
args = parser.parse_args()
config = Config()
for arg_name, arg_value in args._get_kwargs():
if arg_value:
print(config.__getattribute__(arg_name[len('print_'):]))