File size: 1,510 Bytes
f214f36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
from .GPT import GPT
from .Llama import Llama
from .HF_model import HF_model
from .Deepseek import Deepseek
from .Gemini import Gemini
from .Claude import Claude
import json

def load_json(file_path):
    with open(file_path) as file:
        results = json.load(file)
    return results

def create_model(config_path = None, model_path = None, api_key = None, device = "cuda:0"):
    """
    Factory method to create a LLM instance, the user can use either a config_file or model_name+api_key to specify the model.
    """

    if config_path!=None:
        config = load_json(config_path)
    elif model_path != None and api_key != None:
        config = { 
        "model_info":{
            "provider":None,
            "name": model_path
        },
        "api_key_info":{
            "api_keys":[
                api_key
            ],
            "api_key_use": 0
        },
        "params":{
            "temperature":0.001,
            "max_output_tokens":100
        }
    }
    else:
        raise ValueError("ERROR: Either config_path or both model_name and api_key must be provided")
    
    name = config["model_info"]["name"].lower()
    if 'gpt' in name:
        model = GPT(config)
    elif 'deepseek' in name:
        model = Deepseek(config)
    elif 'gemini' in name:
        model = Gemini(config)
    elif 'claude' in name:
        model = Claude(config)
    elif 'llama' in name:
        model = Llama(config,device)
    else:
        model = HF_model(config,device)
    return model