PyTorch
GGUF
mistral
conversational
piuzha commited on
Commit
8dd2155
·
verified ·
1 Parent(s): 64e4874

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -5,7 +5,7 @@ license: apache-2.0
5
 
6
  <h1 align="center"> Moxin Chat 7B </h1>
7
 
8
- <p align="center"> <a href="https://github.com/moxin-org/Moxin-LLM">Home Page</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://arxiv.org/abs/2412.06845">Technical Report</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://huggingface.co/moxin-org/moxin-llm-7b">Base Model</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://huggingface.co/moxin-org/moxin-chat-7b">Chat Model</a> </p>
9
 
10
 
11
 
@@ -26,7 +26,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
26
  torch.backends.cuda.enable_mem_efficient_sdp(False)
27
  torch.backends.cuda.enable_flash_sdp(False)
28
 
29
- model_name = 'moxin-org/moxin-chat-7b'
30
  tokenizer = AutoTokenizer.from_pretrained(model_name)
31
  model = AutoModelForCausalLM.from_pretrained(
32
  model_name,
@@ -65,8 +65,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
65
 
66
  device = "cuda"
67
 
68
- model = AutoModelForCausalLM.from_pretrained("moxin-org/moxin-chat-7b")
69
- tokenizer = AutoTokenizer.from_pretrained("moxin-org/moxin-chat-7b")
70
 
71
  messages = [
72
  {"role": "user", "content": "What is your favourite condiment?"},
 
5
 
6
  <h1 align="center"> Moxin Chat 7B </h1>
7
 
8
+ <p align="center"> <a href="https://github.com/moxin-org/Moxin-LLM">Home Page</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://arxiv.org/abs/2412.06845">Technical Report</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://huggingface.co/moxin-org/Moxin-7B-LLM">Base Model</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://huggingface.co/moxin-org/Moxin-7B-Chat">Chat Model</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://huggingface.co/moxin-org/Moxin-7B-Instruct">Instruct Model</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://huggingface.co/moxin-org/Moxin-7B-Reasoning">Reasoning Model</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://huggingface.co/moxin-org/Moxin-7B-VLM">VLM Model</a> </p>
9
 
10
 
11
 
 
26
  torch.backends.cuda.enable_mem_efficient_sdp(False)
27
  torch.backends.cuda.enable_flash_sdp(False)
28
 
29
+ model_name = 'moxin-org/Moxin-7B-Chat'
30
  tokenizer = AutoTokenizer.from_pretrained(model_name)
31
  model = AutoModelForCausalLM.from_pretrained(
32
  model_name,
 
65
 
66
  device = "cuda"
67
 
68
+ model = AutoModelForCausalLM.from_pretrained("moxin-org/Moxin-7B-Chat")
69
+ tokenizer = AutoTokenizer.from_pretrained("moxin-org/Moxin-7B-Chat")
70
 
71
  messages = [
72
  {"role": "user", "content": "What is your favourite condiment?"},