Spaces:
Sleeping
Sleeping
File size: 2,045 Bytes
7d8b3c9 1e10837 7d8b3c9 8581f88 7d8b3c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# import part
import streamlit as st
from transformers import pipeline
# function part
# image2text
def img2text(img):
image_to_text_model = pipeline("image-to-text",
model="nlpconnect/vit-gpt2-image-captioning")
text = image_to_text_model(img)[0]["generated_text"]
return text
# text2story
def text2story(text):
text_generation_model = pipeline("text-generation",
model="openai-community/gpt2")
story_text = f"Once upon a time in a land far, far away, {text}"
generated_story = text_generation_model(story_text,
max_length=100,
num_return_sequences=1)
return generated_story[0]['generated_text']
# text2audio
def text2audio(story_text):
text_to_speech_model = pipeline("text-to-speech", model="facebook/mms-tts-eng")
speech_output = text_to_speech_model(story_text)
return speech_output
# main part
st.title("Storytelling Application")
st.write("🎉Transfer an image to a short story with audio.🎉---From ✨LING Yunhan 21010943 ISOM5240 (L2)✨")
uploaded_file = st.file_uploader("Choose an image📷...", type=["jpg", "png", "jpeg"])
if uploaded_file is not None:
print(uploaded_file)
bytes_data = uploaded_file.getvalue()
with open(uploaded_file.name, "wb") as file:
file.write(bytes_data)
st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
# stage 1
st.text('Generating caption✍...')
scenario = img2text(uploaded_file.name)
st.write(scenario)
# stage 2
st.text('Generating story📚...')
generated_story = text2story(scenario)
# Use the scenario from img2text
st.write(generated_story)
# stage 3
st.text('Generating audio💽...')
audio_data = text2audio(generated_story)
if st.button("Play Audio🍩"):
st.audio(audio_data['audio'],
format="audio/wav",
start_time=0,
sample_rate=audio_data['sampling_rate']) |