Joekd608 commited on
Commit
a80f109
·
unverified ·
1 Parent(s): a42c63e
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -22,7 +22,7 @@ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
22
  login(token=access_token_read)
23
 
24
  # Initialize the text generation pipeline with GPT-2 model
25
- pipe = pipeline("text-generation", model="gpt2", device=-1) # Using CPU
26
 
27
  # Input from the user
28
  text = st.text_input("Ask a Random Question")
@@ -32,4 +32,4 @@ if text:
32
  response = pipe(f"Answer the question: {text}", max_length=150, num_return_sequences=1)
33
 
34
  # Display the generated response
35
- st.write(f"Answer: {response[0]['generated_text']}")
 
22
  login(token=access_token_read)
23
 
24
  # Initialize the text generation pipeline with GPT-2 model
25
+ pipe = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B-Instruct", device=-1) # Using CPU
26
 
27
  # Input from the user
28
  text = st.text_input("Ask a Random Question")
 
32
  response = pipe(f"Answer the question: {text}", max_length=150, num_return_sequences=1)
33
 
34
  # Display the generated response
35
+ st.write(f"Answer: {response[0]['generated_text']}")