Spaces:
Sleeping
Sleeping
# import part | |
import streamlit as st | |
from transformers import pipeline | |
# function part | |
# image2text | |
def img2text(img): | |
image_to_text_model = pipeline("image-to-text", | |
model="nlpconnect/vit-gpt2-image-captioning") | |
text = image_to_text_model(img)[0]["generated_text"] | |
return text | |
# text2story | |
def text2story(text): | |
text_generation_model = pipeline("text-generation", | |
model="openai-community/gpt2") | |
story_text = f"Once upon a time in a land far, far away, {text}" | |
generated_story = text_generation_model(story_text, | |
max_length=100, | |
num_return_sequences=1) | |
return generated_story[0]['generated_text'] | |
# text2audio | |
def text2audio(story_text): | |
text_to_speech_model = pipeline("text-to-speech", model="facebook/mms-tts-eng") | |
speech_output = text_to_speech_model(story_text) | |
return speech_output | |
# main part | |
st.title("Storytelling Application") | |
st.write("🎉Transfer an image to a short story with audio.🎉---From ✨LING Yunhan 21010943 ISOM5240 (L2)✨") | |
uploaded_file = st.file_uploader("Choose an image📷...", type=["jpg", "png", "jpeg"]) | |
if uploaded_file is not None: | |
print(uploaded_file) | |
bytes_data = uploaded_file.getvalue() | |
with open(uploaded_file.name, "wb") as file: | |
file.write(bytes_data) | |
st.image(uploaded_file, caption="Uploaded Image", use_container_width=True) | |
# stage 1 | |
st.text('Generating caption✍...') | |
scenario = img2text(uploaded_file.name) | |
st.write(scenario) | |
# stage 2 | |
st.text('Generating story📚...') | |
generated_story = text2story(scenario) | |
# Use the scenario from img2text | |
st.write(generated_story) | |
# stage 3 | |
st.text('Generating audio💽...') | |
audio_data = text2audio(generated_story) | |
if st.button("Play Audio🍩"): | |
st.audio(audio_data['audio'], | |
format="audio/wav", | |
start_time=0, | |
sample_rate=audio_data['sampling_rate']) |