File size: 1,728 Bytes
d6cabce 3e7037b 6925e01 ebb2c8f 6925e01 d826060 fa80c7a 6925e01 b608be2 6925e01 b608be2 6925e01 5ef5295 ebb2c8f b608be2 6925e01 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
!pip install --upgrade tensorflow
import nltk
nltk.download('punkt')
import nltk
from nltk.stem.lancaster import LancasterStemmer
import numpy as np
import tflearn
import tensorflow
import random
import json
import pandas as pd
import pickle
import gradio as gr
stemmer = LancasterStemmer()
with open("intents.json") as file:
data = json.load(file)
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
model.load("MentalHealthChatBotmodel.tflearn")
# print('model loaded successfully')
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return np.array(bag)
def chat(message):
message = message.lower()
results = model.predict([bag_of_words(message, words)])
results_index = np.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
# return responses
return random.choice(responses)
# history.append((message, response))
# return history, history
chatbot = gr.Chatbot(label="Chat")
demo = gr.Interface(
chat,
inputs="text",
outputs="label",
title="Tabibu | Mental Health Bot",
)
if __name__ == "__main__":
demo.launch() |