LittleFrog commited on
Commit
f2f095e
·
verified ·
1 Parent(s): f3c2e49

Upload folder using huggingface_hub

Browse files
albedo/checkpoints/last.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5fa7a1caa7e1e3818119cd9a2e8715ee7b86a77fa66447cc4b0767d8ab550f8
3
+ size 15458840153
albedo/configs/albedo_project.yaml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 0.0001
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.012
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image_target
11
+ cond_stage_key: image_cond
12
+ image_size: 32
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: hybrid
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ scheduler_config:
19
+ target: ldm.lr_scheduler.LambdaLinearScheduler
20
+ params:
21
+ warm_up_steps:
22
+ - 100
23
+ cycle_lengths:
24
+ - 10000000000000
25
+ f_start:
26
+ - 1.0e-06
27
+ f_max:
28
+ - 1.0
29
+ f_min:
30
+ - 1.0
31
+ unet_config:
32
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
33
+ params:
34
+ image_size: 32
35
+ in_channels: 8
36
+ out_channels: 4
37
+ model_channels: 320
38
+ attention_resolutions:
39
+ - 4
40
+ - 2
41
+ - 1
42
+ num_res_blocks: 2
43
+ channel_mult:
44
+ - 1
45
+ - 2
46
+ - 4
47
+ - 4
48
+ num_heads: 8
49
+ use_spatial_transformer: true
50
+ transformer_depth: 1
51
+ context_dim: 768
52
+ use_checkpoint: true
53
+ legacy: false
54
+ first_stage_config:
55
+ target: ldm.models.autoencoder.AutoencoderKL
56
+ params:
57
+ embed_dim: 4
58
+ monitor: val/rec_loss
59
+ ddconfig:
60
+ double_z: true
61
+ z_channels: 4
62
+ resolution: 256
63
+ in_channels: 3
64
+ out_ch: 3
65
+ ch: 128
66
+ ch_mult:
67
+ - 1
68
+ - 2
69
+ - 4
70
+ - 4
71
+ num_res_blocks: 2
72
+ attn_resolutions: []
73
+ dropout: 0.0
74
+ lossconfig:
75
+ target: torch.nn.Identity
76
+ cond_stage_config:
77
+ target: ldm.modules.encoders.modules.FrozenCLIPImageEmbedder
78
+ data:
79
+ target: ldm.data.simple.ObjaverseDataModuleFromConfig
80
+ params:
81
+ target_name: albedo
82
+ root_dir: data/objaverse_rendering/samll-dataset
83
+ batch_size: 128
84
+ num_workers: 16
85
+ tar_config:
86
+ list_dir: data/big_data_lists
87
+ tar_dir: data/big_data
88
+ img_per_obj: 10
89
+ objaverse_data_list:
90
+ image_list_cache_path: image_lists/64000_10_image_list.npz
91
+ obj_starts:
92
+ - 0
93
+ - 5000
94
+ - 15000
95
+ obj_ends:
96
+ - 2000
97
+ - 7000
98
+ - 17000
99
+ num_envs: 50
100
+ num_imgs: 1
101
+ train:
102
+ validation: false
103
+ image_transforms:
104
+ size: 256
105
+ validation:
106
+ validation: true
107
+ image_transforms:
108
+ size: 256
109
+ use_wds: true
specular/checkpoints/last.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:361b7c3824f4603c4137657ff2fc8a127e0972954d425b440516522f034de7a0
3
+ size 15458847705
specular/configs/specular_project.yaml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 0.0001
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.012
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image_target
11
+ cond_stage_key: image_cond
12
+ image_size: 32
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: hybrid
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ scheduler_config:
19
+ target: ldm.lr_scheduler.LambdaLinearScheduler
20
+ params:
21
+ warm_up_steps:
22
+ - 100
23
+ cycle_lengths:
24
+ - 10000000000000
25
+ f_start:
26
+ - 1.0e-06
27
+ f_max:
28
+ - 1.0
29
+ f_min:
30
+ - 1.0
31
+ unet_config:
32
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
33
+ params:
34
+ image_size: 32
35
+ in_channels: 8
36
+ out_channels: 4
37
+ model_channels: 320
38
+ attention_resolutions:
39
+ - 4
40
+ - 2
41
+ - 1
42
+ num_res_blocks: 2
43
+ channel_mult:
44
+ - 1
45
+ - 2
46
+ - 4
47
+ - 4
48
+ num_heads: 8
49
+ use_spatial_transformer: true
50
+ transformer_depth: 1
51
+ context_dim: 768
52
+ use_checkpoint: true
53
+ legacy: false
54
+ first_stage_config:
55
+ target: ldm.models.autoencoder.AutoencoderKL
56
+ params:
57
+ embed_dim: 4
58
+ monitor: val/rec_loss
59
+ ddconfig:
60
+ double_z: true
61
+ z_channels: 4
62
+ resolution: 256
63
+ in_channels: 3
64
+ out_ch: 3
65
+ ch: 128
66
+ ch_mult:
67
+ - 1
68
+ - 2
69
+ - 4
70
+ - 4
71
+ num_res_blocks: 2
72
+ attn_resolutions: []
73
+ dropout: 0.0
74
+ lossconfig:
75
+ target: torch.nn.Identity
76
+ cond_stage_config:
77
+ target: ldm.modules.encoders.modules.FrozenCLIPImageEmbedder
78
+ data:
79
+ target: ldm.data.simple.ObjaverseDataModuleFromConfig
80
+ params:
81
+ target_name: gloss_shaded
82
+ root_dir: data/objaverse_rendering/samll-dataset
83
+ batch_size: 128
84
+ num_workers: 16
85
+ tar_config:
86
+ list_dir: data/big_data_lists
87
+ tar_dir: data/big_data
88
+ img_per_obj: 10
89
+ objaverse_data_list:
90
+ image_list_cache_path: image_lists/64000_10_image_list.npz
91
+ obj_starts:
92
+ - 0
93
+ - 5000
94
+ - 15000
95
+ obj_ends:
96
+ - 2000
97
+ - 7000
98
+ - 17000
99
+ num_envs: 50
100
+ num_imgs: 1
101
+ train:
102
+ validation: false
103
+ image_transforms:
104
+ size: 256
105
+ validation:
106
+ validation: true
107
+ image_transforms:
108
+ size: 256
109
+ use_wds: true