atnikos commited on
Commit
f93e96d
·
verified ·
1 Parent(s): ee9f29a

Upload 6 files

Browse files
bs_128_conf/.hydra/config.yaml ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data:
2
+ dataname: motionfix
3
+ _target_: src.data.motionfix.MotionFixDataModule
4
+ debug: ${debug}
5
+ datapath: ${path.data}/motionfix-dataset/motionfix.pth.tar
6
+ smplh_path: ${path.data}/body_models
7
+ smplh_path_dbg: ${path.minidata}/body_models
8
+ load_with_rot: true
9
+ load_splits:
10
+ - train
11
+ - val
12
+ - test
13
+ proportion: 1.0
14
+ text_augment: false
15
+ batch_size: ${machine.batch_size}
16
+ num_workers: ${machine.num_workers}
17
+ rot_repr: 6d
18
+ preproc:
19
+ stats_file: ${path.deps}/stats/statistics_${data.dataname}.npy
20
+ split_seed: 0
21
+ calculate_minmax: true
22
+ generate_joint_files: true
23
+ use_cuda: true
24
+ n_body_joints: 22
25
+ norm_type: std
26
+ framerate: 30
27
+ sampler: ${sampler}
28
+ load_feats:
29
+ - body_transl_delta_pelv
30
+ - body_orient_xy
31
+ - z_orient_delta
32
+ - body_pose
33
+ - body_joints_local_wo_z_rot
34
+ progress_bar: true
35
+ model:
36
+ modelname: basic_clip
37
+ _target_: src.model.base_diffusion.MD
38
+ latent_dim: 512
39
+ ff_size: 2048
40
+ num_layers: 8
41
+ num_head: 8
42
+ droupout: 0.1
43
+ activation: gelu
44
+ render_vids_every_n_epochs: 100
45
+ num_vids_to_render: 2
46
+ lr_scheduler: null
47
+ zero_len_source: false
48
+ old_way: false
49
+ statistics_path: ${statistics_path}
50
+ norm_type: standardize
51
+ diff_params:
52
+ num_inference_timesteps: 200
53
+ num_train_timesteps: 200
54
+ prob_uncondp: 0.05
55
+ prob_drop_text: 0.05
56
+ prob_drop_motion: 0.05
57
+ guidance_scale_text: 2.5
58
+ guidance_scale_motion: 2.0
59
+ noise_schedule: squaredcos_cap_v2
60
+ predict_type: sample
61
+ motion_condition: source
62
+ source_encoder: null
63
+ condition: text
64
+ smpl_path: ${data.smplh_path}
65
+ copy_target: false
66
+ nfeats: 135
67
+ dim_per_feat:
68
+ - 135
69
+ input_feats:
70
+ - body_transl_delta_pelv
71
+ - body_orient_xy
72
+ - z_orient_delta
73
+ - body_pose
74
+ - body_joints_local_wo_z_rot
75
+ pad_inputs: false
76
+ loss_func_pos: mse
77
+ loss_func_feats: mse
78
+ path:
79
+ deps: ${code_path:./deps}
80
+ data: ${code_path:./data}
81
+ minidata: ${code_path:./minidata}
82
+ code_dir: ${code_path:}
83
+ working_dir: ${working_path:""}
84
+ minilog: ${code_path:./miniexperiments}
85
+ train_scheduler:
86
+ _target_: diffusers.DDPMScheduler
87
+ num_train_timesteps: 1000
88
+ beta_start: 0.00085
89
+ beta_end: 0.012
90
+ beta_schedule: squaredcos_cap_v2
91
+ variance_type: fixed_small
92
+ clip_sample: false
93
+ prediction_type: sample
94
+ infer_scheduler:
95
+ _target_: diffusers.DDPMScheduler
96
+ num_train_timesteps: 1000
97
+ beta_start: 0.00085
98
+ beta_end: 0.012
99
+ beta_schedule: squaredcos_cap_v2
100
+ variance_type: fixed_small
101
+ clip_sample: false
102
+ prediction_type: sample
103
+ denoiser:
104
+ _target_: src.model.tmed_denoiser.TMED_denoiser
105
+ text_encoded_dim: 768
106
+ ff_size: 1024
107
+ num_layers: 8
108
+ num_heads: 4
109
+ dropout: 0.1
110
+ activation: gelu
111
+ condition: ${model.condition}
112
+ motion_condition: ${model.motion_condition}
113
+ latent_dim: ${model.latent_dim}
114
+ nfeats: ${model.nfeats}
115
+ use_sep: true
116
+ pred_delta_motion: false
117
+ text_encoder:
118
+ _target_: src.model.textencoder.clip_encoder.ClipTextEncoder
119
+ finetune: false
120
+ last_hidden_state: true
121
+ modelpath: ${path.deps}/clip-vit-large-patch14
122
+ motion_condition_encoder:
123
+ name: actor_encoder
124
+ _target_: src.model.motionencoder.ActorAgnosticEncoder
125
+ latent_dim: ${model.latent_dim}
126
+ ff_size: ${model.ff_size}
127
+ num_layers: ${model.num_layers}
128
+ num_head: ${model.num_head}
129
+ droupout: ${model.droupout}
130
+ activation: ${model.activation}
131
+ nfeats: ${model.nfeats}
132
+ losses:
133
+ _target_: src.model.losses.MLDLosses
134
+ lmd_rfeats_recons: 1.0
135
+ lmd_jfeats_recons: 1.0
136
+ predict_epsilon: false
137
+ modelname: ${model.modelname}
138
+ lmd_latent: 1.0e-05
139
+ lmd_kl: 1.0e-05
140
+ lmd_prior: 0.0
141
+ lmd_recons: 1.0
142
+ lmd_gen: 1.0
143
+ fuse: concat
144
+ loss_on_both: true
145
+ loss_on_jfeats: false
146
+ ablation_no_kl_combine: false
147
+ ablation_no_kl_gaussian: false
148
+ ablation_no_motionencoder: false
149
+ optim:
150
+ _target_: torch.optim.AdamW
151
+ lr: 0.0001
152
+ lr_final: 1.0e-06
153
+ t_warmup: 150
154
+ func_recons:
155
+ _target_: src.model.losses.recons.Recons
156
+ func_latent:
157
+ _target_: src.model.losses.recons.Recons
158
+ func_kl:
159
+ _target_: src.model.losses.KLLoss
160
+ machine:
161
+ name: server
162
+ batch_size: 128
163
+ smpl_batch_size: 64
164
+ num_workers: 8
165
+ trainer:
166
+ strategy: auto
167
+ benchmark: true
168
+ max_epochs: 1001
169
+ accelerator: gpu
170
+ log_every_n_steps: 40
171
+ deterministic: false
172
+ detect_anomaly: false
173
+ enable_progress_bar: true
174
+ check_val_every_n_epoch: 100
175
+ limit_train_batches: 1.0
176
+ limit_val_batches: 1.0
177
+ num_sanity_val_steps: 0
178
+ precision: 32
179
+ sampler:
180
+ _target_: src.data.sampling.FrameSampler
181
+ request_frames: 300
182
+ sampling: conseq
183
+ sampling_step: 1
184
+ threshold_reject: 0.75
185
+ max_len: 600
186
+ min_len: 15
187
+ logger:
188
+ logger_name: wandb
189
+ name: ${experiment}-${run_id}
190
+ save_dir: wandb
191
+ project: ${project}
192
+ group: null
193
+ tags: null
194
+ notes: null
195
+ offline: false
196
+ resume: false
197
+ save_code: false
198
+ log_model: false
199
+ callback:
200
+ last_ckpt:
201
+ _target_: pytorch_lightning.callbacks.ModelCheckpoint
202
+ dirpath: ${path.working_dir}/checkpoints
203
+ filename: latest-{epoch}
204
+ every_n_epochs: 1
205
+ save_top_k: 1
206
+ save_last: true
207
+ latest_ckpt:
208
+ _target_: pytorch_lightning.callbacks.ModelCheckpoint
209
+ dirpath: ${path.working_dir}/checkpoints
210
+ filename: latest-{epoch}
211
+ monitor: step
212
+ mode: max
213
+ every_n_epochs: 100
214
+ save_top_k: -1
215
+ save_last: false
216
+ progress:
217
+ _target_: src.callback.ProgressLogger
218
+ render:
219
+ _target_: src.callback.RenderCallback
220
+ every_n_epochs: 50
221
+ num_workers: ${machine.num_workers}
222
+ save_last: true
223
+ nvids_to_save: 3
224
+ bm_path: ${path.data}
225
+ modelname: ${model.modelname}
226
+ lr_logging:
227
+ _target_: pytorch_lightning.callbacks.LearningRateMonitor
228
+ logging_interval: epoch
229
+ path:
230
+ deps: ${code_path:./deps}
231
+ data: ${code_path:./data}
232
+ minidata: ${code_path:./minidata}
233
+ code_dir: ${code_path:}
234
+ working_dir: ${working_path:""}
235
+ minilog: ${code_path:./miniexperiments}
236
+ debug: false
237
+ expdir: ${get_expdir:${debug}}
238
+ experiment: final-model
239
+ project: tuning-exps
240
+ seed: 42
241
+ logger_level: INFO
242
+ run_id: bs128_ld512_ts200_big_model
243
+ resume: null
244
+ resume_ckpt_name: null
245
+ renderer: pyrender
246
+ watch_model: false
247
+ log_freq: 1000
248
+ log: all
249
+ devices: 1
250
+ ftune: null
251
+ ftune_ckpt_name: last
252
+ ftune_ckpt_path: ${get_last_checkpoint:${ftune},${ftune_ckpt_name}}
253
+ statistics_file: statistics_${data.dataname}${get_debug:${debug}}.npy
254
+ statistics_path: ${path.deps}/stats/${statistics_file}
bs_128_conf/.hydra/hydra.yaml ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${expdir}/${project}/${experiment}/${run_id}/
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ colorlog:
72
+ (): colorlog.ColoredFormatter
73
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
74
+ datefmt: '%d/%m/%y %H:%M:%S'
75
+ handlers:
76
+ console:
77
+ class: rich.logging.RichHandler
78
+ formatter: colorlog
79
+ root:
80
+ level: INFO
81
+ handlers:
82
+ - console
83
+ disable_existing_loggers: false
84
+ job_logging:
85
+ version: 1
86
+ filters:
87
+ onlyimportant:
88
+ (): src.tools.logging.LevelsFilter
89
+ levels:
90
+ - CRITICAL
91
+ - ERROR
92
+ - WARNING
93
+ noimportant:
94
+ (): src.tools.logging.LevelsFilter
95
+ levels:
96
+ - INFO
97
+ - DEBUG
98
+ - NOTSET
99
+ formatters:
100
+ verysimple:
101
+ format: '%(message)s'
102
+ simple:
103
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
104
+ datefmt: '%d/%m/%y %H:%M:%S'
105
+ colorlog:
106
+ (): colorlog.ColoredFormatter
107
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
108
+ - %(message)s'
109
+ datefmt: '%d/%m/%y %H:%M:%S'
110
+ log_colors:
111
+ DEBUG: purple
112
+ INFO: green
113
+ WARNING: yellow
114
+ ERROR: red
115
+ CRITICAL: red
116
+ handlers:
117
+ console:
118
+ class: rich.logging.RichHandler
119
+ formatter: verysimple
120
+ rich_tracebacks: true
121
+ file_out:
122
+ class: logging.FileHandler
123
+ formatter: simple
124
+ filename: logs.out
125
+ filters:
126
+ - noimportant
127
+ file_err:
128
+ class: logging.FileHandler
129
+ formatter: simple
130
+ filename: logs.err
131
+ filters:
132
+ - onlyimportant
133
+ root:
134
+ level: ${logger_level}
135
+ handlers:
136
+ - console
137
+ - file_out
138
+ - file_err
139
+ disable_existing_loggers: false
140
+ env: {}
141
+ mode: RUN
142
+ searchpath: []
143
+ callbacks: {}
144
+ output_subdir: .hydra
145
+ overrides:
146
+ hydra:
147
+ - hydra.mode=RUN
148
+ task:
149
+ - run_id=bs128_ld512_ts200_big_model
150
+ - trainer.strategy=auto
151
+ - devices=1
152
+ - machine.num_workers=8
153
+ - experiment=final-model
154
+ - machine.batch_size=128
155
+ - model.diff_params.num_train_timesteps=200
156
+ - model.latent_dim=512
157
+ - model.ff_size=2048
158
+ - model.num_layers=8
159
+ - model.num_head=8
160
+ job:
161
+ name: train
162
+ chdir: true
163
+ override_dirname: devices=1,experiment=final-model,machine.batch_size=128,machine.num_workers=8,model.diff_params.num_train_timesteps=200,model.ff_size=2048,model.latent_dim=512,model.num_head=8,model.num_layers=8,run_id=bs128_ld512_ts200_big_model,trainer.strategy=auto
164
+ id: ???
165
+ num: ???
166
+ config_name: train
167
+ env_set:
168
+ PYOPENGL_PLATFORM: egl
169
+ HYDRA_FULL_ERROR: '1'
170
+ WANDB__SERVICE_WAIT: '300'
171
+ env_copy: []
172
+ config:
173
+ override_dirname:
174
+ kv_sep: '='
175
+ item_sep: ','
176
+ exclude_keys: []
177
+ runtime:
178
+ version: 1.3.2
179
+ version_base: '1.2'
180
+ cwd: /lustre/home/nathanasiou/projects/tgm
181
+ config_sources:
182
+ - path: hydra.conf
183
+ schema: pkg
184
+ provider: hydra
185
+ - path: /lustre/home/nathanasiou/projects/tgm/configs
186
+ schema: file
187
+ provider: main
188
+ - path: hydra_plugins.hydra_colorlog.conf
189
+ schema: pkg
190
+ provider: hydra-colorlog
191
+ - path: ''
192
+ schema: structured
193
+ provider: schema
194
+ output_dir: /lustre/home/nathanasiou/projects/tgm/experiments/tuning-exps/final-model/bs128_ld512_ts200_big_model
195
+ choices:
196
+ callback: base
197
+ logger: wandb
198
+ sampler: variable_conseq
199
+ trainer: base
200
+ machine: server
201
+ model: basic_clip
202
+ model/optim: adamw
203
+ model/losses: basic
204
+ model/motion_condition_encoder: actor
205
+ model/text_encoder: clipenc
206
+ model/denoiser: denoiser
207
+ model/infer_scheduler: ddpm
208
+ model/train_scheduler: ddpm
209
+ data: motionfix
210
+ hydra/env: default
211
+ hydra/callbacks: null
212
+ hydra/job_logging: rich
213
+ hydra/hydra_logging: rich
214
+ hydra/hydra_help: default
215
+ hydra/help: default
216
+ hydra/sweeper: basic
217
+ hydra/launcher: basic
218
+ hydra/output: default
219
+ verbose: false
bs_128_conf/.hydra/overrides.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - run_id=bs128_ld512_ts200_big_model
2
+ - trainer.strategy=auto
3
+ - devices=1
4
+ - machine.num_workers=8
5
+ - experiment=final-model
6
+ - machine.batch_size=128
7
+ - model.diff_params.num_train_timesteps=200
8
+ - model.latent_dim=512
9
+ - model.ff_size=2048
10
+ - model.num_layers=8
11
+ - model.num_head=8
bs_64_conf/.hydra/config.yaml ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data:
2
+ dataname: motionfix
3
+ _target_: src.data.motionfix.MotionFixDataModule
4
+ debug: ${debug}
5
+ datapath: ${path.data}/motionfix-dataset/motionfix.pth.tar
6
+ smplh_path: ${path.data}/body_models
7
+ smplh_path_dbg: ${path.minidata}/body_models
8
+ load_with_rot: true
9
+ load_splits:
10
+ - train
11
+ - val
12
+ - test
13
+ proportion: 1.0
14
+ text_augment: false
15
+ batch_size: ${machine.batch_size}
16
+ num_workers: ${machine.num_workers}
17
+ rot_repr: 6d
18
+ preproc:
19
+ stats_file: ${path.deps}/stats/statistics_${data.dataname}.npy
20
+ split_seed: 0
21
+ calculate_minmax: true
22
+ generate_joint_files: true
23
+ use_cuda: true
24
+ n_body_joints: 22
25
+ norm_type: std
26
+ framerate: 30
27
+ sampler: ${sampler}
28
+ load_feats:
29
+ - body_transl_delta_pelv
30
+ - body_orient_xy
31
+ - z_orient_delta
32
+ - body_pose
33
+ - body_joints_local_wo_z_rot
34
+ progress_bar: true
35
+ model:
36
+ modelname: basic_clip
37
+ _target_: src.model.base_diffusion.MD
38
+ latent_dim: 256
39
+ ff_size: 1024
40
+ num_layers: 9
41
+ num_head: 4
42
+ droupout: 0.1
43
+ activation: gelu
44
+ render_vids_every_n_epochs: 100
45
+ num_vids_to_render: 2
46
+ lr_scheduler: null
47
+ zero_len_source: false
48
+ old_way: false
49
+ statistics_path: ${statistics_path}
50
+ norm_type: standardize
51
+ diff_params:
52
+ num_inference_timesteps: 200
53
+ num_train_timesteps: 200
54
+ prob_uncondp: 0.05
55
+ prob_drop_text: 0.05
56
+ prob_drop_motion: 0.05
57
+ guidance_scale_text: 2.5
58
+ guidance_scale_motion: 2.0
59
+ noise_schedule: squaredcos_cap_v2
60
+ predict_type: sample
61
+ motion_condition: source
62
+ source_encoder: null
63
+ condition: text
64
+ smpl_path: ${data.smplh_path}
65
+ copy_target: false
66
+ nfeats: 135
67
+ dim_per_feat:
68
+ - 135
69
+ input_feats:
70
+ - body_transl_delta_pelv
71
+ - body_orient_xy
72
+ - z_orient_delta
73
+ - body_pose
74
+ - body_joints_local_wo_z_rot
75
+ pad_inputs: false
76
+ loss_func_pos: mse
77
+ loss_func_feats: mse
78
+ path:
79
+ deps: ${code_path:./deps}
80
+ data: ${code_path:./data}
81
+ minidata: ${code_path:./minidata}
82
+ code_dir: ${code_path:}
83
+ working_dir: ${working_path:""}
84
+ minilog: ${code_path:./miniexperiments}
85
+ train_scheduler:
86
+ _target_: diffusers.DDPMScheduler
87
+ num_train_timesteps: 1000
88
+ beta_start: 0.00085
89
+ beta_end: 0.012
90
+ beta_schedule: squaredcos_cap_v2
91
+ variance_type: fixed_small
92
+ clip_sample: false
93
+ prediction_type: sample
94
+ infer_scheduler:
95
+ _target_: diffusers.DDPMScheduler
96
+ num_train_timesteps: 1000
97
+ beta_start: 0.00085
98
+ beta_end: 0.012
99
+ beta_schedule: squaredcos_cap_v2
100
+ variance_type: fixed_small
101
+ clip_sample: false
102
+ prediction_type: sample
103
+ denoiser:
104
+ _target_: src.model.tmed_denoiser.TMED_denoiser
105
+ text_encoded_dim: 768
106
+ ff_size: 1024
107
+ num_layers: 8
108
+ num_heads: 4
109
+ dropout: 0.1
110
+ activation: gelu
111
+ condition: ${model.condition}
112
+ motion_condition: ${model.motion_condition}
113
+ latent_dim: ${model.latent_dim}
114
+ nfeats: ${model.nfeats}
115
+ use_sep: true
116
+ pred_delta_motion: false
117
+ text_encoder:
118
+ _target_: src.model.textencoder.clip_encoder.ClipTextEncoder
119
+ finetune: false
120
+ last_hidden_state: true
121
+ modelpath: ${path.deps}/clip-vit-large-patch14
122
+ motion_condition_encoder:
123
+ name: actor_encoder
124
+ _target_: src.model.motionencoder.ActorAgnosticEncoder
125
+ latent_dim: ${model.latent_dim}
126
+ ff_size: ${model.ff_size}
127
+ num_layers: ${model.num_layers}
128
+ num_head: ${model.num_head}
129
+ droupout: ${model.droupout}
130
+ activation: ${model.activation}
131
+ nfeats: ${model.nfeats}
132
+ losses:
133
+ _target_: src.model.losses.MLDLosses
134
+ lmd_rfeats_recons: 1.0
135
+ lmd_jfeats_recons: 1.0
136
+ predict_epsilon: false
137
+ modelname: ${model.modelname}
138
+ lmd_latent: 1.0e-05
139
+ lmd_kl: 1.0e-05
140
+ lmd_prior: 0.0
141
+ lmd_recons: 1.0
142
+ lmd_gen: 1.0
143
+ fuse: concat
144
+ loss_on_both: true
145
+ loss_on_jfeats: false
146
+ ablation_no_kl_combine: false
147
+ ablation_no_kl_gaussian: false
148
+ ablation_no_motionencoder: false
149
+ optim:
150
+ _target_: torch.optim.AdamW
151
+ lr: 0.0001
152
+ lr_final: 1.0e-06
153
+ t_warmup: 150
154
+ func_recons:
155
+ _target_: src.model.losses.recons.Recons
156
+ func_latent:
157
+ _target_: src.model.losses.recons.Recons
158
+ func_kl:
159
+ _target_: src.model.losses.KLLoss
160
+ machine:
161
+ name: server
162
+ batch_size: 64
163
+ smpl_batch_size: 64
164
+ num_workers: 8
165
+ trainer:
166
+ strategy: auto
167
+ benchmark: true
168
+ max_epochs: 1001
169
+ accelerator: gpu
170
+ log_every_n_steps: 40
171
+ deterministic: false
172
+ detect_anomaly: false
173
+ enable_progress_bar: true
174
+ check_val_every_n_epoch: 100
175
+ limit_train_batches: 1.0
176
+ limit_val_batches: 1.0
177
+ num_sanity_val_steps: 0
178
+ precision: 32
179
+ sampler:
180
+ _target_: src.data.sampling.FrameSampler
181
+ request_frames: 300
182
+ sampling: conseq
183
+ sampling_step: 1
184
+ threshold_reject: 0.75
185
+ max_len: 600
186
+ min_len: 15
187
+ logger:
188
+ logger_name: wandb
189
+ name: ${experiment}-${run_id}
190
+ save_dir: wandb
191
+ project: ${project}
192
+ group: null
193
+ tags: null
194
+ notes: null
195
+ offline: false
196
+ resume: false
197
+ save_code: false
198
+ log_model: false
199
+ callback:
200
+ last_ckpt:
201
+ _target_: pytorch_lightning.callbacks.ModelCheckpoint
202
+ dirpath: ${path.working_dir}/checkpoints
203
+ filename: latest-{epoch}
204
+ every_n_epochs: 1
205
+ save_top_k: 1
206
+ save_last: true
207
+ latest_ckpt:
208
+ _target_: pytorch_lightning.callbacks.ModelCheckpoint
209
+ dirpath: ${path.working_dir}/checkpoints
210
+ filename: latest-{epoch}
211
+ monitor: step
212
+ mode: max
213
+ every_n_epochs: 100
214
+ save_top_k: -1
215
+ save_last: false
216
+ progress:
217
+ _target_: src.callback.ProgressLogger
218
+ render:
219
+ _target_: src.callback.RenderCallback
220
+ every_n_epochs: 50
221
+ num_workers: ${machine.num_workers}
222
+ save_last: true
223
+ nvids_to_save: 3
224
+ bm_path: ${path.data}
225
+ modelname: ${model.modelname}
226
+ lr_logging:
227
+ _target_: pytorch_lightning.callbacks.LearningRateMonitor
228
+ logging_interval: epoch
229
+ path:
230
+ deps: ${code_path:./deps}
231
+ data: ${code_path:./data}
232
+ minidata: ${code_path:./minidata}
233
+ code_dir: ${code_path:}
234
+ working_dir: ${working_path:""}
235
+ minilog: ${code_path:./miniexperiments}
236
+ debug: false
237
+ expdir: ${get_expdir:${debug}}
238
+ experiment: final-model
239
+ project: tuning-exps
240
+ seed: 42
241
+ logger_level: INFO
242
+ run_id: bs64_ld512_ts200
243
+ resume: null
244
+ resume_ckpt_name: null
245
+ renderer: pyrender
246
+ watch_model: false
247
+ log_freq: 1000
248
+ log: all
249
+ devices: 1
250
+ ftune: null
251
+ ftune_ckpt_name: last
252
+ ftune_ckpt_path: ${get_last_checkpoint:${ftune},${ftune_ckpt_name}}
253
+ statistics_file: statistics_${data.dataname}${get_debug:${debug}}.npy
254
+ statistics_path: ${path.deps}/stats/${statistics_file}
bs_64_conf/.hydra/hydra.yaml ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${expdir}/${project}/${experiment}/${run_id}/
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ colorlog:
72
+ (): colorlog.ColoredFormatter
73
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
74
+ datefmt: '%d/%m/%y %H:%M:%S'
75
+ handlers:
76
+ console:
77
+ class: rich.logging.RichHandler
78
+ formatter: colorlog
79
+ root:
80
+ level: INFO
81
+ handlers:
82
+ - console
83
+ disable_existing_loggers: false
84
+ job_logging:
85
+ version: 1
86
+ filters:
87
+ onlyimportant:
88
+ (): src.tools.logging.LevelsFilter
89
+ levels:
90
+ - CRITICAL
91
+ - ERROR
92
+ - WARNING
93
+ noimportant:
94
+ (): src.tools.logging.LevelsFilter
95
+ levels:
96
+ - INFO
97
+ - DEBUG
98
+ - NOTSET
99
+ formatters:
100
+ verysimple:
101
+ format: '%(message)s'
102
+ simple:
103
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
104
+ datefmt: '%d/%m/%y %H:%M:%S'
105
+ colorlog:
106
+ (): colorlog.ColoredFormatter
107
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
108
+ - %(message)s'
109
+ datefmt: '%d/%m/%y %H:%M:%S'
110
+ log_colors:
111
+ DEBUG: purple
112
+ INFO: green
113
+ WARNING: yellow
114
+ ERROR: red
115
+ CRITICAL: red
116
+ handlers:
117
+ console:
118
+ class: rich.logging.RichHandler
119
+ formatter: verysimple
120
+ rich_tracebacks: true
121
+ file_out:
122
+ class: logging.FileHandler
123
+ formatter: simple
124
+ filename: logs.out
125
+ filters:
126
+ - noimportant
127
+ file_err:
128
+ class: logging.FileHandler
129
+ formatter: simple
130
+ filename: logs.err
131
+ filters:
132
+ - onlyimportant
133
+ root:
134
+ level: ${logger_level}
135
+ handlers:
136
+ - console
137
+ - file_out
138
+ - file_err
139
+ disable_existing_loggers: false
140
+ env: {}
141
+ mode: RUN
142
+ searchpath: []
143
+ callbacks: {}
144
+ output_subdir: .hydra
145
+ overrides:
146
+ hydra:
147
+ - hydra.mode=RUN
148
+ task:
149
+ - run_id=bs64_ld512_ts200
150
+ - trainer.strategy=auto
151
+ - devices=1
152
+ - machine.num_workers=8
153
+ - experiment=final-model
154
+ - machine.batch_size=64
155
+ - model.diff_params.num_train_timesteps=200
156
+ - model.latent_dim=256
157
+ job:
158
+ name: train
159
+ chdir: true
160
+ override_dirname: devices=1,experiment=final-model,machine.batch_size=64,machine.num_workers=8,model.diff_params.num_train_timesteps=200,model.latent_dim=256,run_id=bs64_ld512_ts200,trainer.strategy=auto
161
+ id: ???
162
+ num: ???
163
+ config_name: train
164
+ env_set:
165
+ WANDB_API_KEY: 90e13be46265667b13415a5e6d248ae91ab399fc
166
+ PYOPENGL_PLATFORM: egl
167
+ HYDRA_FULL_ERROR: '1'
168
+ WANDB__SERVICE_WAIT: '300'
169
+ env_copy: []
170
+ config:
171
+ override_dirname:
172
+ kv_sep: '='
173
+ item_sep: ','
174
+ exclude_keys: []
175
+ runtime:
176
+ version: 1.3.2
177
+ version_base: '1.2'
178
+ cwd: /lustre/home/nathanasiou/projects/tgm
179
+ config_sources:
180
+ - path: hydra.conf
181
+ schema: pkg
182
+ provider: hydra
183
+ - path: /lustre/home/nathanasiou/projects/tgm/configs
184
+ schema: file
185
+ provider: main
186
+ - path: hydra_plugins.hydra_colorlog.conf
187
+ schema: pkg
188
+ provider: hydra-colorlog
189
+ - path: ''
190
+ schema: structured
191
+ provider: schema
192
+ output_dir: /lustre/home/nathanasiou/projects/tgm/experiments/tuning-exps/final-model/bs64_ld512_ts200
193
+ choices:
194
+ callback: base
195
+ logger: wandb
196
+ sampler: variable_conseq
197
+ trainer: base
198
+ machine: server
199
+ model: basic_clip
200
+ model/optim: adamw
201
+ model/losses: basic
202
+ model/motion_condition_encoder: actor
203
+ model/text_encoder: clipenc
204
+ model/denoiser: denoiser
205
+ model/infer_scheduler: ddpm
206
+ model/train_scheduler: ddpm
207
+ data: motionfix
208
+ hydra/env: default
209
+ hydra/callbacks: null
210
+ hydra/job_logging: rich
211
+ hydra/hydra_logging: rich
212
+ hydra/hydra_help: default
213
+ hydra/help: default
214
+ hydra/sweeper: basic
215
+ hydra/launcher: basic
216
+ hydra/output: default
217
+ verbose: false
bs_64_conf/.hydra/overrides.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ - run_id=bs64_ld512_ts200
2
+ - trainer.strategy=auto
3
+ - devices=1
4
+ - machine.num_workers=8
5
+ - experiment=final-model
6
+ - machine.batch_size=64
7
+ - model.diff_params.num_train_timesteps=200
8
+ - model.latent_dim=256