morriszms commited on
Commit
25a4836
·
verified ·
1 Parent(s): 5f0e5a0

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ marin-8b-base-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ marin-8b-base-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ marin-8b-base-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ marin-8b-base-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ marin-8b-base-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ marin-8b-base-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ marin-8b-base-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ marin-8b-base-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ marin-8b-base-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ marin-8b-base-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ marin-8b-base-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ marin-8b-base-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - allenai/dolmino-mix-1124
5
+ - allenai/olmo-mix-1124
6
+ - bigcode/starcoderdata
7
+ - EleutherAI/proof-pile-2
8
+ - hltcoe/megawika
9
+ - mlfoundations/dclm-baseline-1.0
10
+ - HuggingFaceTB/finemath
11
+ - marin-community/ar5iv-noproblem-markdown
12
+ - marin-community/ar5iv-warning-markdown
13
+ - marin-community/datashop-science-qa
14
+ - marin-community/stackexchange-markdown
15
+ - marin-community/wikipedia-markdown
16
+ language:
17
+ - en
18
+ tags:
19
+ - text-generation
20
+ - TensorBlock
21
+ - GGUF
22
+ base_model: marin-community/marin-8b-base
23
+ ---
24
+
25
+ <div style="width: auto; margin-left: auto; margin-right: auto">
26
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
27
+ </div>
28
+
29
+ [![Website](https://img.shields.io/badge/Website-tensorblock.co-blue?logo=google-chrome&logoColor=white)](https://tensorblock.co)
30
+ [![Twitter](https://img.shields.io/twitter/follow/tensorblock_aoi?style=social)](https://twitter.com/tensorblock_aoi)
31
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-5865F2?logo=discord&logoColor=white)](https://discord.gg/Ej5NmeHFf2)
32
+ [![GitHub](https://img.shields.io/badge/GitHub-TensorBlock-black?logo=github&logoColor=white)](https://github.com/TensorBlock)
33
+ [![Telegram](https://img.shields.io/badge/Telegram-Group-blue?logo=telegram)](https://t.me/TensorBlock)
34
+
35
+
36
+ ## marin-community/marin-8b-base - GGUF
37
+
38
+ <div style="text-align: left; margin: 20px 0;">
39
+ <a href="https://discord.com/invite/Ej5NmeHFf2" style="display: inline-block; padding: 10px 20px; background-color: #5865F2; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
40
+ Join our Discord to learn more about what we're building ↗
41
+ </a>
42
+ </div>
43
+
44
+ This repo contains GGUF format model files for [marin-community/marin-8b-base](https://huggingface.co/marin-community/marin-8b-base).
45
+
46
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b5753](https://github.com/ggml-org/llama.cpp/commit/73e53dc834c0a2336cd104473af6897197b96277).
47
+
48
+ ## Our projects
49
+ <table border="1" cellspacing="0" cellpadding="10">
50
+ <tr>
51
+ <th colspan="2" style="font-size: 25px;">Forge</th>
52
+ </tr>
53
+ <tr>
54
+ <th colspan="2">
55
+ <img src="https://imgur.com/faI5UKh.jpeg" alt="Forge Project" width="900"/>
56
+ </th>
57
+ </tr>
58
+ <tr>
59
+ <th colspan="2">An OpenAI-compatible multi-provider routing layer.</th>
60
+ </tr>
61
+ <tr>
62
+ <th colspan="2">
63
+ <a href="https://github.com/TensorBlock/forge" target="_blank" style="
64
+ display: inline-block;
65
+ padding: 8px 16px;
66
+ background-color: #FF7F50;
67
+ color: white;
68
+ text-decoration: none;
69
+ border-radius: 6px;
70
+ font-weight: bold;
71
+ font-family: sans-serif;
72
+ ">🚀 Try it now! 🚀</a>
73
+ </th>
74
+ </tr>
75
+
76
+ <tr>
77
+ <th style="font-size: 25px;">Awesome MCP Servers</th>
78
+ <th style="font-size: 25px;">TensorBlock Studio</th>
79
+ </tr>
80
+ <tr>
81
+ <th><img src="https://imgur.com/2Xov7B7.jpeg" alt="MCP Servers" width="450"/></th>
82
+ <th><img src="https://imgur.com/pJcmF5u.jpeg" alt="Studio" width="450"/></th>
83
+ </tr>
84
+ <tr>
85
+ <th>A comprehensive collection of Model Context Protocol (MCP) servers.</th>
86
+ <th>A lightweight, open, and extensible multi-LLM interaction studio.</th>
87
+ </tr>
88
+ <tr>
89
+ <th>
90
+ <a href="https://github.com/TensorBlock/awesome-mcp-servers" target="_blank" style="
91
+ display: inline-block;
92
+ padding: 8px 16px;
93
+ background-color: #FF7F50;
94
+ color: white;
95
+ text-decoration: none;
96
+ border-radius: 6px;
97
+ font-weight: bold;
98
+ font-family: sans-serif;
99
+ ">👀 See what we built 👀</a>
100
+ </th>
101
+ <th>
102
+ <a href="https://github.com/TensorBlock/TensorBlock-Studio" target="_blank" style="
103
+ display: inline-block;
104
+ padding: 8px 16px;
105
+ background-color: #FF7F50;
106
+ color: white;
107
+ text-decoration: none;
108
+ border-radius: 6px;
109
+ font-weight: bold;
110
+ font-family: sans-serif;
111
+ ">👀 See what we built 👀</a>
112
+ </th>
113
+ </tr>
114
+ </table>
115
+
116
+ ## Prompt template
117
+
118
+ ```
119
+ Unable to determine prompt format automatically. Please check the original model repository for the correct prompt format.
120
+ ```
121
+
122
+ ## Model file specification
123
+
124
+ | Filename | Quant type | File Size | Description |
125
+ | -------- | ---------- | --------- | ----------- |
126
+ | [marin-8b-base-Q2_K.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q2_K.gguf) | Q2_K | 3.179 GB | smallest, significant quality loss - not recommended for most purposes |
127
+ | [marin-8b-base-Q3_K_S.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q3_K_S.gguf) | Q3_K_S | 3.665 GB | very small, high quality loss |
128
+ | [marin-8b-base-Q3_K_M.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q3_K_M.gguf) | Q3_K_M | 4.019 GB | very small, high quality loss |
129
+ | [marin-8b-base-Q3_K_L.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q3_K_L.gguf) | Q3_K_L | 4.322 GB | small, substantial quality loss |
130
+ | [marin-8b-base-Q4_0.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q4_0.gguf) | Q4_0 | 4.661 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
131
+ | [marin-8b-base-Q4_K_S.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q4_K_S.gguf) | Q4_K_S | 4.693 GB | small, greater quality loss |
132
+ | [marin-8b-base-Q4_K_M.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q4_K_M.gguf) | Q4_K_M | 4.921 GB | medium, balanced quality - recommended |
133
+ | [marin-8b-base-Q5_0.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q5_0.gguf) | Q5_0 | 5.599 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
134
+ | [marin-8b-base-Q5_K_S.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q5_K_S.gguf) | Q5_K_S | 5.599 GB | large, low quality loss - recommended |
135
+ | [marin-8b-base-Q5_K_M.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q5_K_M.gguf) | Q5_K_M | 5.733 GB | large, very low quality loss - recommended |
136
+ | [marin-8b-base-Q6_K.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q6_K.gguf) | Q6_K | 6.596 GB | very large, extremely low quality loss |
137
+ | [marin-8b-base-Q8_0.gguf](https://huggingface.co/tensorblock/marin-community_marin-8b-base-GGUF/blob/main/marin-8b-base-Q8_0.gguf) | Q8_0 | 8.541 GB | very large, extremely low quality loss - not recommended |
138
+
139
+
140
+ ## Downloading instruction
141
+
142
+ ### Command line
143
+
144
+ Firstly, install Huggingface Client
145
+
146
+ ```shell
147
+ pip install -U "huggingface_hub[cli]"
148
+ ```
149
+
150
+ Then, downoad the individual model file the a local directory
151
+
152
+ ```shell
153
+ huggingface-cli download tensorblock/marin-community_marin-8b-base-GGUF --include "marin-8b-base-Q2_K.gguf" --local-dir MY_LOCAL_DIR
154
+ ```
155
+
156
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
157
+
158
+ ```shell
159
+ huggingface-cli download tensorblock/marin-community_marin-8b-base-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
160
+ ```
marin-8b-base-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05c0be1e3652b6251f99e4d69dbbdb13c29defe764fe2bbe497c66fcb1c6bd1b
3
+ size 3179134272
marin-8b-base-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bd3d02de33df7d034629be963d4055c970341b3cb5830db2bdacf1d7c68ac19
3
+ size 4321959232
marin-8b-base-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00d945e04a940d0ae50e8a9b28bb62a0375e76d2e28720c1518c1bfdd75889ea
3
+ size 4018920768
marin-8b-base-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f622e610e9a5b312a5933636555d7f5902318ec62a5d18f99116549cacbd120
3
+ size 3664502080
marin-8b-base-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63bc108aa5c922505fac60b9f85d0b713081cd1dc2d2509c2c24cceacdbca50c
3
+ size 4661214528
marin-8b-base-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1134ab9f2dbac722bbed736ffb05cb8012b3677da26d51500daa5056c0d4fc7
3
+ size 4920737088
marin-8b-base-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5d8441452355e9a15239bebbd20174db26c78f30d907db1778ce6f3f3732e13
3
+ size 4692671808
marin-8b-base-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f6180d0e95bfb604efb4e2ec7987ff8fdc9ed301c42d797259a8310056a3637
3
+ size 5599296832
marin-8b-base-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6caafe2698149911ebf03d53cce14f0222538b75f6e473a923cfaf81fa37af5b
3
+ size 5732990272
marin-8b-base-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9beb8f2ffc6705872be20498dbb98413a8c6ddd801d3a9089050601b7aac84ac
3
+ size 5599296832
marin-8b-base-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c4eb4f405f44980e46337c3349a2e2966b73459846a96c58631f91462320575
3
+ size 6596009280
marin-8b-base-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5744d65991326e83ae2dd30b2deaabd7431c187c4b0871f7af40be3978ec21d2
3
+ size 8540773696