Kuberwastaken commited on
Commit
ff1ed9c
·
1 Parent(s): a7b8952

Limited Maximum tokens

Browse files
Files changed (1) hide show
  1. model/analyzer.py +3 -3
model/analyzer.py CHANGED
@@ -88,7 +88,7 @@ class ContentAnalyzer:
88
 
89
  print(f"\nAnalyzing for {mapped_name}...")
90
  prompt = f"""
91
- Check this text for any indication of {mapped_name} ({description}).
92
  Be sensitive to subtle references or implications, make sure the text is not metaphorical.
93
  Respond concisely and ONLY with: YES, NO, or MAYBE.
94
  Text: {chunk}
@@ -104,9 +104,9 @@ class ContentAnalyzer:
104
  print("Generating response...")
105
  outputs = self.model.generate(
106
  **inputs,
107
- max_new_tokens=10,
108
  do_sample=True,
109
- temperature=0.4,
110
  top_p=0.9,
111
  pad_token_id=self.tokenizer.eos_token_id
112
  )
 
88
 
89
  print(f"\nAnalyzing for {mapped_name}...")
90
  prompt = f"""
91
+ Check this text for any clear indication of {mapped_name} ({description}).
92
  Be sensitive to subtle references or implications, make sure the text is not metaphorical.
93
  Respond concisely and ONLY with: YES, NO, or MAYBE.
94
  Text: {chunk}
 
104
  print("Generating response...")
105
  outputs = self.model.generate(
106
  **inputs,
107
+ max_new_tokens=2,
108
  do_sample=True,
109
+ temperature=0.7,
110
  top_p=0.9,
111
  pad_token_id=self.tokenizer.eos_token_id
112
  )