prince-canuma commited on
Commit
7575b21
·
verified ·
1 Parent(s): 44ea5da

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ tags:
13
  ---
14
 
15
  # mlx-community/gemma-3-4b-it-8bit
16
- This model was converted to MLX format from [`google/gemma-3-4b-it`]() using mlx-vlm version **0.1.17**.
17
  Refer to the [original model card](https://huggingface.co/google/gemma-3-4b-it) for more details on the model.
18
  ## Use with mlx
19
 
 
13
  ---
14
 
15
  # mlx-community/gemma-3-4b-it-8bit
16
+ This model was converted to MLX format from [`google/gemma-3-4b-it`]() using mlx-vlm version **0.1.18**.
17
  Refer to the [original model card](https://huggingface.co/google/gemma-3-4b-it) for more details on the model.
18
  ## Use with mlx
19
 
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
3
+ }
config.json CHANGED
@@ -28,5 +28,16 @@
28
  "sliding_window": 1024
29
  },
30
  "torch_dtype": "bfloat16",
31
- "transformers_version": "4.50.0.dev0"
 
 
 
 
 
 
 
 
 
 
 
32
  }
 
28
  "sliding_window": 1024
29
  },
30
  "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.50.0.dev0",
32
+ "vision_config": {
33
+ "hidden_size": 1152,
34
+ "image_size": 896,
35
+ "intermediate_size": 4304,
36
+ "model_type": "siglip_vision_model",
37
+ "num_attention_heads": 16,
38
+ "num_hidden_layers": 27,
39
+ "patch_size": 14,
40
+ "vision_use_head": false,
41
+ "skip_vision": true
42
+ }
43
  }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "transformers_version": "4.50.0.dev0"
11
+ }
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8feeac662e6673fa18895a101a8c1270d4e7beed9fe929f96dc788486cfc4769
3
- size 4957035296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6556f7057794310ca9a3b05922f23df06e7509dec5516d07270e9ece7f9c744a
3
+ size 4957036416
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d6beb1c695b861eef16a433a3c2ef6a093ff9968758da0043e23011bbc76720
3
- size 719106875
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b68e4a8c7287bf65a8eb64fd3d8601c0f0ed67e19f0ca3bda295a1a0f784e23
3
+ size 719106879
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
processor_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "image_seq_length": 256,
3
+ "processor_class": "Gemma3Processor"
4
+ }