File size: 4,289 Bytes
2785a2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import json
from difflib import SequenceMatcher
from transformers import T5Tokenizer, T5ForConditionalGeneration
from transformers.utils import logging as hf_logging

hf_logging.set_verbosity_error()

MODEL_DIR = "t5-viet-qg-finetuned"
DATA_PATH = "30ktrain.json"

tokenizer = T5Tokenizer.from_pretrained(MODEL_DIR)
model = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)

def find_best_match_from_context(user_context, squad_data):
    best_score, best_entry = 0.0, None
    ui = user_context.lower()

    for article in squad_data.get("data", []):
        context_title = article.get("title", "")
        score_title = SequenceMatcher(None, ui, context_title.lower()).ratio()

        for paragraph in article.get("paragraphs", []):
            context = paragraph.get("context", "")
            for qa in paragraph.get("qas", []):
                answers = qa.get("answers", [])
                if not answers:
                    continue
                answer_text = answers[0].get("text", "").strip()
                question_text = qa.get("question", "").strip()

                score = score_title
                if score > best_score:
                    best_score = score
                    best_entry = (context, answer_text, question_text)

    return best_entry

def _near_duplicate(q, seen, thr=0.90):
    for s in seen:
        if SequenceMatcher(None, q, s).ratio() >= thr:
            return True
    return False

def generate_questions(user_context,
                       total_questions=20,
                       batch_size=10,
                       top_k=60,
                       top_p=0.95,
                       temperature=0.9,  
                       max_input_len=512,
                       max_new_tokens=64):
    with open(DATA_PATH, "r", encoding="utf-8") as f:
        squad_data = json.load(f)

    best_entry = find_best_match_from_context(user_context, squad_data)
    if best_entry is None:
        print("Không tìm thấy dữ liệu phù hợp trong file JSON.")
        return

    context, answer, _ = best_entry

   
    input_text = f"answer: {answer}\ncontext: {context}\nquestion:"
    inputs = tokenizer(
        input_text,
        return_tensors="pt",
        truncation=True,
        max_length=max_input_len
    )

    unique_questions = []
    remaining = total_questions

    while remaining > 0:
        n = min(batch_size, remaining)
        outputs = model.generate(
            **inputs,
            do_sample=True,
            top_k=top_k,  
            top_p=top_p,  
            temperature=temperature,  
            max_new_tokens=max_new_tokens,
            num_return_sequences=n,
            no_repeat_ngram_size=3,
            repetition_penalty=1.12
        )

        for out in outputs:
            q = tokenizer.decode(out, skip_special_tokens=True).strip()
            if len(q) < 5:
                continue
            if not _near_duplicate(q, unique_questions, thr=0.90):
                unique_questions.append(q)

        remaining = total_questions - len(unique_questions)
        if remaining <= 0:
            break

    unique_questions = unique_questions[:total_questions]

    
    print("Các câu hỏi mới được sinh ra:")
    for i, q in enumerate(unique_questions, 1):
        if not q.endswith("?"):
            q += "?"  
        print(f"{i}. {q}")

if __name__ == "__main__":
    user_context = input("\nNhập đoạn văn bản:\n ").strip()

    raw_n = input("\nNhập vào số lượng câu hỏi bạn cần:").strip()
    if raw_n == "":
        total_questions = 20
    else:
        try:
            total_questions = int(raw_n)
        except ValueError:
            print("Giá trị không hợp lệ. Dùng mặc định 20.")
            total_questions = 20

    if total_questions < 1:
        total_questions = 1
    if total_questions > 200:
        total_questions = 200

    batch_size = 20 if total_questions >= 30 else min(20, total_questions)

    print("\nĐang phân tích dữ liệu...\n")

    generate_questions(
        user_context=user_context,
        total_questions=total_questions,
        batch_size=batch_size,
        top_k=60,
        top_p=0.95,  
        temperature=0.9,  
        max_input_len=512,
        max_new_tokens=64
    )