|
import streamlit as st |
|
import transformers |
|
import torch |
|
import requests |
|
from PIL import Image |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
|
|
st.set_page_config( |
|
page_title="Fake News Detection App", |
|
page_icon="fas fa-exclamation-triangle", |
|
layout="wide", |
|
initial_sidebar_state="auto") |
|
|
|
|
|
tokenizer_name = AutoTokenizer.from_pretrained("jy46604790/Fake-News-Bert-Detect") |
|
model_name = AutoModelForSequenceClassification.from_pretrained("jy46604790/Fake-News-Bert-Detect") |
|
|
|
|
|
|
|
st.markdown( |
|
""" |
|
<style> |
|
body { |
|
background-color: #f5f5f5; |
|
} |
|
h1 { |
|
color: #4e79a7; |
|
} |
|
</style> |
|
""", |
|
unsafe_allow_html=True |
|
) |
|
|
|
|
|
@st.cache_resource |
|
def detect_fake_news(text): |
|
|
|
pipeline = transformers.pipeline("text-classification", |
|
model=model_name, |
|
tokenizer=tokenizer_name) |
|
|
|
|
|
prediction = pipeline(text) |
|
sentiment = prediction[0]["label"] |
|
score = prediction[0]["score"] |
|
|
|
return sentiment, score |
|
|
|
|
|
st.markdown("<h1 style='text-align: center;margin-top:0px;'>A fake news detection app</h1>", |
|
unsafe_allow_html=True) |
|
|
|
st.markdown("<h1 style='text-align: center;'>Welcome</h1>", |
|
unsafe_allow_html=True) |
|
st.markdown("<p style='text-align: center;'>This is a Fake News Detection App.</p>", |
|
unsafe_allow_html=True) |
|
|
|
|
|
text = st.text_input("Enter some text and we'll tell you if it's likely to be fake news or not!") |
|
|
|
if st.button('Predict'): |
|
|
|
if text: |
|
with st.spinner('Checking if news is Fake...'): |
|
label, score = detect_fake_news(text) |
|
if label == "FAKE": |
|
st.error(f"The text is likely to be fake news with a confidence score of {score*100:.2f}%!") |
|
else: |
|
st.success(f"The text is likely to be genuine with a confidence score of {score*100:.2f}%!") |
|
else: |
|
with st.spinner('Checking if news is Fake...'): |
|
st.warning("Please enter some text to detect fake news.") |