Robotics
LeRobot
Safetensors
act
arminfg commited on
Commit
ea80c44
·
verified ·
1 Parent(s): a3dfd9e

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +62 -0
  2. config.json +61 -0
  3. model.safetensors +3 -0
  4. train_config.json +169 -0
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets: lerobot/pusht
3
+ library_name: lerobot
4
+ license: apache-2.0
5
+ model_name: act
6
+ pipeline_tag: robotics
7
+ tags:
8
+ - lerobot
9
+ - robotics
10
+ - act
11
+ ---
12
+
13
+ # Model Card for act
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+ [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates.
19
+
20
+
21
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
22
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
23
+
24
+ ---
25
+
26
+ ## How to Get Started with the Model
27
+
28
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
29
+ Below is the short version on how to train and run inference/eval:
30
+
31
+ ### Train from scratch
32
+
33
+ ```bash
34
+ python -m lerobot.scripts.train \
35
+ --dataset.repo_id=${HF_USER}/<dataset> \
36
+ --policy.type=act \
37
+ --output_dir=outputs/train/<desired_policy_repo_id> \
38
+ --job_name=lerobot_training \
39
+ --policy.device=cuda \
40
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
41
+ --wandb.enable=true
42
+ ```
43
+
44
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
45
+
46
+ ### Evaluate the policy/run inference
47
+
48
+ ```bash
49
+ python -m lerobot.record \
50
+ --robot.type=so100_follower \
51
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
52
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
53
+ --episodes=10
54
+ ```
55
+
56
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
57
+
58
+ ---
59
+
60
+ ## Model Details
61
+
62
+ - **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "act",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.image": {
11
+ "type": "VISUAL",
12
+ "shape": [
13
+ 3,
14
+ 96,
15
+ 96
16
+ ]
17
+ },
18
+ "observation.state": {
19
+ "type": "STATE",
20
+ "shape": [
21
+ 2
22
+ ]
23
+ }
24
+ },
25
+ "output_features": {
26
+ "action": {
27
+ "type": "ACTION",
28
+ "shape": [
29
+ 2
30
+ ]
31
+ }
32
+ },
33
+ "device": "cuda",
34
+ "use_amp": false,
35
+ "push_to_hub": true,
36
+ "repo_id": "arminfg/test_model",
37
+ "private": null,
38
+ "tags": null,
39
+ "license": null,
40
+ "chunk_size": 100,
41
+ "n_action_steps": 100,
42
+ "vision_backbone": "resnet18",
43
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
44
+ "replace_final_stride_with_dilation": false,
45
+ "pre_norm": false,
46
+ "dim_model": 512,
47
+ "n_heads": 8,
48
+ "dim_feedforward": 3200,
49
+ "feedforward_activation": "relu",
50
+ "n_encoder_layers": 4,
51
+ "n_decoder_layers": 1,
52
+ "use_vae": true,
53
+ "latent_dim": 32,
54
+ "n_vae_encoder_layers": 4,
55
+ "temporal_ensemble_coeff": null,
56
+ "dropout": 0.1,
57
+ "kl_weight": 10.0,
58
+ "optimizer_lr": 1e-05,
59
+ "optimizer_weight_decay": 0.0001,
60
+ "optimizer_lr_backbone": 1e-05
61
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac59aa36ac2a74adcf6f9574c9aefffe32e1d1ac03877e0c3cb0fd2713388711
3
+ size 206667888
train_config.json ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "lerobot/pusht",
4
+ "root": null,
5
+ "episodes": [
6
+ 0
7
+ ],
8
+ "image_transforms": {
9
+ "enable": false,
10
+ "max_num_transforms": 3,
11
+ "random_order": false,
12
+ "tfs": {
13
+ "brightness": {
14
+ "weight": 1.0,
15
+ "type": "ColorJitter",
16
+ "kwargs": {
17
+ "brightness": [
18
+ 0.8,
19
+ 1.2
20
+ ]
21
+ }
22
+ },
23
+ "contrast": {
24
+ "weight": 1.0,
25
+ "type": "ColorJitter",
26
+ "kwargs": {
27
+ "contrast": [
28
+ 0.8,
29
+ 1.2
30
+ ]
31
+ }
32
+ },
33
+ "saturation": {
34
+ "weight": 1.0,
35
+ "type": "ColorJitter",
36
+ "kwargs": {
37
+ "saturation": [
38
+ 0.5,
39
+ 1.5
40
+ ]
41
+ }
42
+ },
43
+ "hue": {
44
+ "weight": 1.0,
45
+ "type": "ColorJitter",
46
+ "kwargs": {
47
+ "hue": [
48
+ -0.05,
49
+ 0.05
50
+ ]
51
+ }
52
+ },
53
+ "sharpness": {
54
+ "weight": 1.0,
55
+ "type": "SharpnessJitter",
56
+ "kwargs": {
57
+ "sharpness": [
58
+ 0.5,
59
+ 1.5
60
+ ]
61
+ }
62
+ }
63
+ }
64
+ },
65
+ "revision": null,
66
+ "use_imagenet_stats": true,
67
+ "video_backend": "pyav"
68
+ },
69
+ "env": null,
70
+ "policy": {
71
+ "type": "act",
72
+ "n_obs_steps": 1,
73
+ "normalization_mapping": {
74
+ "VISUAL": "MEAN_STD",
75
+ "STATE": "MEAN_STD",
76
+ "ACTION": "MEAN_STD"
77
+ },
78
+ "input_features": {
79
+ "observation.image": {
80
+ "type": "VISUAL",
81
+ "shape": [
82
+ 3,
83
+ 96,
84
+ 96
85
+ ]
86
+ },
87
+ "observation.state": {
88
+ "type": "STATE",
89
+ "shape": [
90
+ 2
91
+ ]
92
+ }
93
+ },
94
+ "output_features": {
95
+ "action": {
96
+ "type": "ACTION",
97
+ "shape": [
98
+ 2
99
+ ]
100
+ }
101
+ },
102
+ "device": "cuda",
103
+ "use_amp": false,
104
+ "push_to_hub": true,
105
+ "repo_id": "arminfg/test_model",
106
+ "private": null,
107
+ "tags": null,
108
+ "license": null,
109
+ "chunk_size": 100,
110
+ "n_action_steps": 100,
111
+ "vision_backbone": "resnet18",
112
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
113
+ "replace_final_stride_with_dilation": false,
114
+ "pre_norm": false,
115
+ "dim_model": 512,
116
+ "n_heads": 8,
117
+ "dim_feedforward": 3200,
118
+ "feedforward_activation": "relu",
119
+ "n_encoder_layers": 4,
120
+ "n_decoder_layers": 1,
121
+ "use_vae": true,
122
+ "latent_dim": 32,
123
+ "n_vae_encoder_layers": 4,
124
+ "temporal_ensemble_coeff": null,
125
+ "dropout": 0.1,
126
+ "kl_weight": 10.0,
127
+ "optimizer_lr": 1e-05,
128
+ "optimizer_weight_decay": 0.0001,
129
+ "optimizer_lr_backbone": 1e-05
130
+ },
131
+ "output_dir": "outputs/train/test",
132
+ "job_name": "test",
133
+ "resume": false,
134
+ "seed": 1000,
135
+ "num_workers": 4,
136
+ "batch_size": 8,
137
+ "steps": 1000,
138
+ "eval_freq": 20000,
139
+ "log_freq": 200,
140
+ "save_checkpoint": true,
141
+ "save_freq": 20000,
142
+ "use_policy_training_preset": true,
143
+ "optimizer": {
144
+ "type": "adamw",
145
+ "lr": 1e-05,
146
+ "weight_decay": 0.0001,
147
+ "grad_clip_norm": 10.0,
148
+ "betas": [
149
+ 0.9,
150
+ 0.999
151
+ ],
152
+ "eps": 1e-08
153
+ },
154
+ "scheduler": null,
155
+ "eval": {
156
+ "n_episodes": 50,
157
+ "batch_size": 50,
158
+ "use_async_envs": false
159
+ },
160
+ "wandb": {
161
+ "enable": false,
162
+ "disable_artifact": false,
163
+ "project": "lerobot",
164
+ "entity": null,
165
+ "notes": null,
166
+ "run_id": null,
167
+ "mode": null
168
+ }
169
+ }