Commit
·
292928f
1
Parent(s):
67a4609
fxing llm error
Browse files- main.py +1 -1
- similarity_check/llm_based_scoring/llm.py +26 -11
main.py
CHANGED
@@ -629,7 +629,7 @@ def compute_marks():
|
|
629 |
word_score = question_vector_word(extracted_text, clean_correct_answer)
|
630 |
tfidf_score = tfidf_answer_score(extracted_text, clean_correct_answer, max_tfidf)
|
631 |
ft_score = fasttext_similarity(extracted_text, clean_correct_answer)
|
632 |
-
llm_marks = llm_score(
|
633 |
|
634 |
# Send individual scores to frontend
|
635 |
notification_queue.put({
|
|
|
629 |
word_score = question_vector_word(extracted_text, clean_correct_answer)
|
630 |
tfidf_score = tfidf_answer_score(extracted_text, clean_correct_answer, max_tfidf)
|
631 |
ft_score = fasttext_similarity(extracted_text, clean_correct_answer)
|
632 |
+
llm_marks = llm_score(clean_correct_answer, extracted_text)
|
633 |
|
634 |
# Send individual scores to frontend
|
635 |
notification_queue.put({
|
similarity_check/llm_based_scoring/llm.py
CHANGED
@@ -18,6 +18,10 @@ except Exception as e:
|
|
18 |
|
19 |
def llm_score(correct_answers, answer):
|
20 |
try:
|
|
|
|
|
|
|
|
|
21 |
score = []
|
22 |
|
23 |
for correct_answer in correct_answers:
|
@@ -73,20 +77,31 @@ def llm_score(correct_answers, answer):
|
|
73 |
|
74 |
# Decode and clean response
|
75 |
response = models.flan_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
76 |
try:
|
77 |
-
#
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
except Exception as e:
|
86 |
-
print(f"Error processing answer: {str(e)}")
|
87 |
-
score.append(0)
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
-
return score
|
90 |
except Exception as e:
|
91 |
print(f"Error in llm_score: {str(e)}")
|
92 |
-
return
|
|
|
18 |
|
19 |
def llm_score(correct_answers, answer):
|
20 |
try:
|
21 |
+
# Convert single answer to list if needed
|
22 |
+
if isinstance(correct_answers, str):
|
23 |
+
correct_answers = [correct_answers]
|
24 |
+
|
25 |
score = []
|
26 |
|
27 |
for correct_answer in correct_answers:
|
|
|
77 |
|
78 |
# Decode and clean response
|
79 |
response = models.flan_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
80 |
+
|
81 |
+
# Extract numeric score
|
82 |
try:
|
83 |
+
# Try to find a number between 0 and 10
|
84 |
+
import re
|
85 |
+
numbers = re.findall(r'\b\d+(?:\.\d+)?\b', response)
|
86 |
+
for num in numbers:
|
87 |
+
score_value = float(num)
|
88 |
+
if 0 <= score_value <= 10:
|
89 |
+
score.append(score_value)
|
90 |
+
break
|
91 |
+
else:
|
92 |
+
score.append(0.0)
|
93 |
+
except:
|
94 |
+
score.append(0.0)
|
95 |
|
96 |
except Exception as e:
|
97 |
+
print(f"Error processing individual answer: {str(e)}")
|
98 |
+
score.append(0.0)
|
99 |
+
|
100 |
+
# Return average score if multiple correct answers, otherwise single score
|
101 |
+
if score:
|
102 |
+
return sum(score) / len(score)
|
103 |
+
return 0.0
|
104 |
|
|
|
105 |
except Exception as e:
|
106 |
print(f"Error in llm_score: {str(e)}")
|
107 |
+
return 0.0
|