MuntasirHossain commited on
Commit
9b9483d
·
verified ·
1 Parent(s): f99faa1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -58
app.py CHANGED
@@ -26,55 +26,55 @@ from transformers import AutoTokenizer
26
  from huggingface_hub import HfApi
27
  import requests
28
 
29
- list_llm = ["HuggingFaceH4/zephyr-7b-beta", "meta-llama/Llama-3.1-8B-Instruct"] # "mistralai/Mistral-7B-Instruct-v0.2" # meta-llama/Meta-Llama-3-8B-Instruct
30
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
31
 
32
- class ZephyrLLM(LLM):
33
- def __init__(self, repo_id, huggingfacehub_api_token, max_new_tokens=512, temperature=0.7, **kwargs):
34
- super().__init__(**kwargs)
35
- self.repo_id = repo_id
36
- self.api_token = huggingfacehub_api_token
37
- self.api_url = f"https://api-inference.huggingface.co/models/{repo_id}"
38
- self.headers = {"Authorization": f"Bearer {huggingfacehub_api_token}"}
39
- self.tokenizer = AutoTokenizer.from_pretrained(repo_id)
40
- self.max_new_tokens = max_new_tokens
41
- self.temperature = temperature
42
-
43
- def _call(self, prompt, stop=None):
44
- # Format as chat message
45
- messages = [{"role": "user", "content": prompt}]
46
-
47
- # Apply Zephyr's chat template
48
- formatted_prompt = self.tokenizer.apply_chat_template(
49
- messages, tokenize=False, add_generation_prompt=True
50
- )
51
- # Send request to Hugging Face Inference API
52
- payload = {
53
- "inputs": formatted_prompt,
54
- "parameters": {
55
- "max_new_tokens": self.max_new_tokens,
56
- "temperature": self.temperature
57
- }
58
- }
59
- response = requests.post(self.api_url, headers=self.headers, json=payload)
60
-
61
- if response.status_code == 200:
62
- full_response = response.json()[0]["generated_text"]
63
-
64
- # Extract the assistant reply from the full response
65
- # After <|assistant|>\n, everything is the model's answer
66
- if "<|assistant|>" in full_response:
67
- return full_response.split("<|assistant|>")[-1].strip()
68
- else:
69
- return full_response.strip()
70
 
71
- else:
72
- raise Exception(f"Failed call [{response.status_code}]: {response.text}")
73
 
74
 
75
- @property
76
- def _llm_type(self) -> str:
77
- return "zephyr-custom"
78
 
79
 
80
  # Load and split PDF document
@@ -102,21 +102,22 @@ def create_db(splits):
102
 
103
  # Initialize langchain LLM chain
104
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
105
- if llm_model == "HuggingFaceH4/zephyr-7b-beta":
106
- llm = ZephyrLLM(
 
 
 
 
 
 
 
107
  repo_id=llm_model,
108
- huggingfacehub_api_token=api_token,
109
- temperature=temperature,
110
- max_new_tokens=max_tokens,
 
 
111
  )
112
- # if llm_model == "meta-llama/Llama-3.1-8B-Instruct":
113
- # llm = HuggingFaceEndpoint(
114
- # repo_id=llm_model,
115
- # huggingfacehub_api_token = api_token,
116
- # temperature = temperature,
117
- # max_new_tokens = max_tokens,
118
- # top_k = top_k,
119
- # )
120
 
121
  # llm = HuggingFaceHub(
122
  # repo_id="mistralai/Mistral-7B-Instruct-v0.2",
@@ -127,7 +128,8 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
127
  else:
128
  llm = HuggingFaceEndpoint(
129
  huggingfacehub_api_token = api_token,
130
- repo_id=llm_model,
 
131
  temperature = temperature,
132
  max_new_tokens = max_tokens,
133
  top_k = top_k,
 
26
  from huggingface_hub import HfApi
27
  import requests
28
 
29
+ list_llm = ["meta-llama/Llama-3.1-8B-Instruct"] # , "HuggingFaceH4/zephyr-7b-beta"] # "mistralai/Mistral-7B-Instruct-v0.2" # meta-llama/Meta-Llama-3-8B-Instruct
30
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
31
 
32
+ # class ZephyrLLM(LLM):
33
+ # def __init__(self, repo_id, huggingfacehub_api_token, max_new_tokens=512, temperature=0.7, **kwargs):
34
+ # super().__init__(**kwargs)
35
+ # self.repo_id = repo_id
36
+ # self.api_token = huggingfacehub_api_token
37
+ # self.api_url = f"https://api-inference.huggingface.co/models/{repo_id}"
38
+ # self.headers = {"Authorization": f"Bearer {huggingfacehub_api_token}"}
39
+ # self.tokenizer = AutoTokenizer.from_pretrained(repo_id)
40
+ # self.max_new_tokens = max_new_tokens
41
+ # self.temperature = temperature
42
+
43
+ # def _call(self, prompt, stop=None):
44
+ # # Format as chat message
45
+ # messages = [{"role": "user", "content": prompt}]
46
+
47
+ # # Apply Zephyr's chat template
48
+ # formatted_prompt = self.tokenizer.apply_chat_template(
49
+ # messages, tokenize=False, add_generation_prompt=True
50
+ # )
51
+ # # Send request to Hugging Face Inference API
52
+ # payload = {
53
+ # "inputs": formatted_prompt,
54
+ # "parameters": {
55
+ # "max_new_tokens": self.max_new_tokens,
56
+ # "temperature": self.temperature
57
+ # }
58
+ # }
59
+ # response = requests.post(self.api_url, headers=self.headers, json=payload)
60
+
61
+ # if response.status_code == 200:
62
+ # full_response = response.json()[0]["generated_text"]
63
+
64
+ # # Extract the assistant reply from the full response
65
+ # # After <|assistant|>\n, everything is the model's answer
66
+ # if "<|assistant|>" in full_response:
67
+ # return full_response.split("<|assistant|>")[-1].strip()
68
+ # else:
69
+ # return full_response.strip()
70
 
71
+ # else:
72
+ # raise Exception(f"Failed call [{response.status_code}]: {response.text}")
73
 
74
 
75
+ # @property
76
+ # def _llm_type(self) -> str:
77
+ # return "zephyr-custom"
78
 
79
 
80
  # Load and split PDF document
 
102
 
103
  # Initialize langchain LLM chain
104
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
105
+ # if llm_model == "HuggingFaceH4/zephyr-7b-beta":
106
+ # llm = ZephyrLLM(
107
+ # repo_id=llm_model,
108
+ # huggingfacehub_api_token=api_token,
109
+ # temperature=temperature,
110
+ # max_new_tokens=max_tokens,
111
+ # )
112
+ if llm_model == "meta-llama/Llama-3.1-8B-Instruct":
113
+ llm = HuggingFaceEndpoint(
114
  repo_id=llm_model,
115
+ task="text-generation",
116
+ huggingfacehub_api_token = api_token,
117
+ temperature = temperature,
118
+ max_new_tokens = max_tokens,
119
+ top_k = top_k,
120
  )
 
 
 
 
 
 
 
 
121
 
122
  # llm = HuggingFaceHub(
123
  # repo_id="mistralai/Mistral-7B-Instruct-v0.2",
 
128
  else:
129
  llm = HuggingFaceEndpoint(
130
  huggingfacehub_api_token = api_token,
131
+ repo_id=llm_model,
132
+ task="text-generation",
133
  temperature = temperature,
134
  max_new_tokens = max_tokens,
135
  top_k = top_k,