Spaces:
Running
on
T4
Running
on
T4
mvectors
Browse files
semantic_search/llm_eval.py
CHANGED
@@ -70,7 +70,7 @@ def eval(question, answers):
|
|
70 |
search_results += f"Index: {index_}, Description: {desc}\n\n"
|
71 |
index_ = index_+1
|
72 |
prompt = prompt.format(query, search_results)
|
73 |
-
response = llm.invoke_llm_model(prompt,False)
|
74 |
#response = textgen_llm(prompt)
|
75 |
print("Response from LLM: ", response)
|
76 |
# inter_trim =response.split("[")[1]
|
|
|
70 |
search_results += f"Index: {index_}, Description: {desc}\n\n"
|
71 |
index_ = index_+1
|
72 |
prompt = prompt.format(query, search_results)
|
73 |
+
response = json.loads(llm.invoke_llm_model(prompt,False))
|
74 |
#response = textgen_llm(prompt)
|
75 |
print("Response from LLM: ", response)
|
76 |
# inter_trim =response.split("[")[1]
|