Update README.md
Browse files
    	
        README.md
    CHANGED
    
    | @@ -5,7 +5,7 @@ license: apache-2.0 | |
| 5 |  | 
| 6 | 
             
            <h1 align="center"> Moxin Chat 7B </h1>
         | 
| 7 |  | 
| 8 | 
            -
            <p align="center"> <a href="https://github.com/moxin-org/Moxin-LLM">Home Page</a>    |    <a href="https://arxiv.org/abs/2412.06845">Technical Report</a>    |    <a href="https://huggingface.co/moxin-org/ | 
| 9 |  | 
| 10 |  | 
| 11 |  | 
| @@ -26,7 +26,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| 26 | 
             
            torch.backends.cuda.enable_mem_efficient_sdp(False)
         | 
| 27 | 
             
            torch.backends.cuda.enable_flash_sdp(False)
         | 
| 28 |  | 
| 29 | 
            -
            model_name = 'moxin-org/ | 
| 30 | 
             
            tokenizer = AutoTokenizer.from_pretrained(model_name)
         | 
| 31 | 
             
            model = AutoModelForCausalLM.from_pretrained(
         | 
| 32 | 
             
                    model_name,
         | 
| @@ -65,8 +65,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer | |
| 65 |  | 
| 66 | 
             
            device = "cuda" 
         | 
| 67 |  | 
| 68 | 
            -
            model = AutoModelForCausalLM.from_pretrained("moxin-org/ | 
| 69 | 
            -
            tokenizer = AutoTokenizer.from_pretrained("moxin-org/ | 
| 70 |  | 
| 71 | 
             
            messages = [
         | 
| 72 | 
             
                {"role": "user", "content": "What is your favourite condiment?"},
         | 
|  | |
| 5 |  | 
| 6 | 
             
            <h1 align="center"> Moxin Chat 7B </h1>
         | 
| 7 |  | 
| 8 | 
            +
            <p align="center"> <a href="https://github.com/moxin-org/Moxin-LLM">Home Page</a>    |    <a href="https://arxiv.org/abs/2412.06845">Technical Report</a>    |    <a href="https://huggingface.co/moxin-org/Moxin-7B-LLM">Base Model</a>    |    <a href="https://huggingface.co/moxin-org/Moxin-7B-Chat">Chat Model</a>     |     <a href="https://huggingface.co/moxin-org/Moxin-7B-Instruct">Instruct Model</a>    |     <a href="https://huggingface.co/moxin-org/Moxin-7B-Reasoning">Reasoning Model</a>     |     <a href="https://huggingface.co/moxin-org/Moxin-7B-VLM">VLM Model</a> </p>
         | 
| 9 |  | 
| 10 |  | 
| 11 |  | 
|  | |
| 26 | 
             
            torch.backends.cuda.enable_mem_efficient_sdp(False)
         | 
| 27 | 
             
            torch.backends.cuda.enable_flash_sdp(False)
         | 
| 28 |  | 
| 29 | 
            +
            model_name = 'moxin-org/Moxin-7B-Chat'
         | 
| 30 | 
             
            tokenizer = AutoTokenizer.from_pretrained(model_name)
         | 
| 31 | 
             
            model = AutoModelForCausalLM.from_pretrained(
         | 
| 32 | 
             
                    model_name,
         | 
|  | |
| 65 |  | 
| 66 | 
             
            device = "cuda" 
         | 
| 67 |  | 
| 68 | 
            +
            model = AutoModelForCausalLM.from_pretrained("moxin-org/Moxin-7B-Chat")
         | 
| 69 | 
            +
            tokenizer = AutoTokenizer.from_pretrained("moxin-org/Moxin-7B-Chat")
         | 
| 70 |  | 
| 71 | 
             
            messages = [
         | 
| 72 | 
             
                {"role": "user", "content": "What is your favourite condiment?"},
         | 
