File size: 9,579 Bytes
80beabf
 
 
f9946df
4309d96
e09cc6f
bc4f110
80beabf
 
 
ffb9fa0
 
 
e5edc69
80beabf
 
 
 
 
 
 
 
 
 
 
 
 
ea0606d
f9946df
ea0606d
 
 
 
 
 
 
 
 
 
adb3235
 
3dcac28
87c0654
3dcac28
b27abf3
 
 
 
 
 
80beabf
c0a7e2c
b75c15e
ea0606d
 
 
 
80beabf
 
ea0606d
ffb9fa0
2288685
 
ea0606d
80beabf
 
ffb9fa0
80beabf
ffb9fa0
c44f7ec
ffb9fa0
 
f45547a
ea0606d
 
 
 
2194f61
 
ea0606d
2194f61
 
ffb9fa0
2194f61
ffb9fa0
02e7f4c
3533211
ea0606d
80beabf
ffb9fa0
ea0606d
ffb9fa0
 
 
 
 
 
 
 
 
a8c9a6c
80beabf
ffb9fa0
2194f61
87295c7
a8c9a6c
80beabf
9529e7e
2194f61
c8e25f3
208cf4d
a478bed
ed66fca
27b26b5
3fc3511
27b26b5
3fc3511
27b26b5
80783de
5331983
 
1e04e02
092ba36
cb71ac8
82310d1
c8e25f3
c0a7e2c
22d4f68
 
 
5f3860c
22d4f68
 
 
 
1577770
22d4f68
77befbe
fb397d1
 
197ae4a
 
22d4f68
80beabf
22d4f68
 
 
fb397d1
197ae4a
22d4f68
f45547a
197ae4a
f45547a
765e67e
 
197ae4a
765e67e
197ae4a
3dd8625
 
 
 
 
22d4f68
f45547a
c206a86
bedf39d
b27abf3
5cf27c4
3fc3511
3d0f406
e5edc69
0315d3a
22d4f68
b27abf3
 
 
 
 
1577770
 
 
ac5eab8
 
 
 
e1cab57
b27abf3
 
eea62ae
ffb9fa0
b27abf3
 
79ed4a0
b27abf3
aaf6009
1016965
adb3235
 
 
b27abf3
e5edc69
 
 
39114ab
e5edc69
 
b27abf3
1577770
 
b27abf3
1577770
 
a8c9a6c
1577770
ad2d37e
123e185
dfd51e5
1577770
ba85d22
f93066f
 
b27abf3
 
f93066f
5331983
b27abf3
 
 
 
3533211
1577770
80beabf
d988103
b27abf3
 
 
44bb3b8
 
 
 
b27abf3
 
80beabf
f45547a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
import gradio as gr
import openai
import os
import tiktoken
import re
import time 
import pandas as pd 
# Set your OpenAI API key
openai.api_key = os.getenv('OPENAI_API_KEY')

# Pricing constants
INPUT_COST_PER_TOKEN = 0.50 / 1_000_000
OUTPUT_COST_PER_TOKEN = 1.50 / 1_000_000

def print_like_dislike(x: gr.LikeData):
    print(x.index, x.value, x.liked)

def add_text(history, text):
    history.append((text, "**That's cool!**"))
    return history

def add_file(history, file):
    # Assuming you want to display the name of the uploaded file
    file_info = (f"Uploaded file: {file.name}", "")
    history.append(file_info)
    return history

def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
    encoding = tiktoken.encoding_for_model(model)
    num_tokens = 0
    for message in messages:
        num_tokens += 4  # every message follows <im_start>{role/name}\n{content}<im_end>\n
        for key, value in message.items():
            num_tokens += len(encoding.encode(value))
            if key == "name":  # if there's a name, the role is omitted
                num_tokens += 1  # role is always required and always 1 token
    num_tokens += 2  # every reply is primed with <im_start>assistant
    return num_tokens

def initialize_chat(initial_question):
    # This function initializes the chat with the user-provided initial question.
    chat_history = [(None, initial_question)]
    response, follow_up_questions, token_info = generate_response(initial_question, 0)
    chat_history.append((None, response))
    
    # Extract follow-up questions as examples
    follow_up_questions_formatted = [q.strip() for q in follow_up_questions.split('\n') if q.strip()]
    examples_state = [[q] for q in follow_up_questions_formatted]
    
    return chat_history, follow_up_questions, token_info, examples_state

def generate_response(prompt, token_count=0):
    print(f"Received prompt: {prompt}")
    messages = [
        {"role": "system", "content": "You are a friendly and helpful chatbot."},
        {"role": "user", "content": prompt}
    ]
    
    try:
        input_tokens = num_tokens_from_messages(messages, model="gpt-3.5-turbo")
        
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=messages,
            max_tokens=150,
            temperature=0.7,
        )
        
        output_text = response.choices[0].message['content'].strip()
       
        output_tokens = response.usage['completion_tokens']
        
        follow_up_prompt = f"Based on the following response, suggest three follow-up questions that a young person should ask in first person: {output_text}"
        follow_up_messages = [
            {"role": "system", "content": "You are a friendly and helpful chatbot."},
            {"role": "user", "content": follow_up_prompt}
        ]
        follow_up_response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=follow_up_messages,
            max_tokens=100,
            temperature=0.7,
        )

        follow_up_questions = follow_up_response.choices[0].message['content'].strip().split('\n')
        follow_up_questions = "\n".join(follow_up_questions)
        print(f"Follow up questions: {follow_up_questions}")
        topics_str = "Topic analysis not available"
        
        # Calculate the total tokens used
        total_input_tokens = input_tokens + num_tokens_from_messages(follow_up_messages, model="gpt-3.5-turbo")
        total_output_tokens = output_tokens + follow_up_response.usage['completion_tokens']
        
        # Calculate cost
        input_cost = total_input_tokens * INPUT_COST_PER_TOKEN
        output_cost = total_output_tokens * OUTPUT_COST_PER_TOKEN
        total_cost = input_cost + output_cost

        # Adjusted to return the response and follow-up questions
        new_response = output_text + "\n\nTopics: " + topics_str
        token_info = f"### Token Usage:\n\n* Input Tokens: {total_input_tokens}\n* Output Tokens: {total_output_tokens}\n* Total Cost: ${total_cost:.4f}"
    except Exception as e:
        new_response = f"Error generating response: {e}"
        follow_up_questions = []
       
        token_info = "### Token Usage:\n\n* Input Tokens: 0\n* Output Tokens: 0\n* Total Cost: $0.0000"
    
    return new_response, follow_up_questions, token_info, 

def process_response(prompt, chat_history, token_count, examples_state):
    global examples
    response, new_follow_up_questions, token_info = generate_response(prompt, token_count)
    chat_history.append((prompt, response))

    # Split and format the new follow-up questions into separate values
    new_follow_up_questions_formatted = [q.strip() for q in new_follow_up_questions.split('\n')[:3]]
    question1, question2, question3 = new_follow_up_questions_formatted

     # Update examples state with the new follow-up questions
    examples_state = [[question1], [question2], [question3]]
   
    
    follow_up_questions_md = "\n".join(new_follow_up_questions_formatted)


    return chat_history, token_info, follow_up_questions_md, examples_state

# CSS for the phone layout and background
css = """
#chat-container {
    max-width: 400px!Important;
    margin: auto;
    border: 1px solid #ccc;
    border-radius: 20px;
    overflow: hidden;
    background: url('https://path-to-your-phone-background-image.png') no-repeat center center;
    background-size: cover;

    padding: 20px;
    box-sizing: border-box;
    display: flex;
    flex-direction: column;
}

#chatbot {
    height: calc(100% - 50px);
    overflow-y: auto;
    background: transparent;
    width: 100%;
}

#component-10 {
    font-size: 7px;
    padding: 5px;
    margin: 5px 0;
    width: 100%;
}

#example-container .gr-examples {
    font-size: 0.9em;
    padding: 5px;
    margin: 5px 0;
}
"""

# Initialize the chat history and suggested questionsw
# Initialize the chat history and suggested questions
#chat_history, follow_up_questions, initial_token_info = initialize_chat("I'm 14 years old female and want to become a graphic designer. I'm living in Uttar Pradesh in India. How can I start?")


# Initial example questions from the initialization
questions = [["I'm 14 years old girl living in Utar Pradesh show where I can take HPV vaccine and more information about the HPV"], ["I'm 14 years old female and want to become a graphic designer. I'm living in Uttar Pradesh in India. How can I start?"],["Im a 15 years old bout living in New Delhi how i can lear more about the climate change and What I can do myself?"]]

with gr.Blocks(css=css) as demo:
    examples_state = gr.State([])
    chat_history = gr.State([])
    token_info = gr.State("")
    follow_up_questions_md = gr.State("")
    
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown(
                """
                # Child safe chatbot project!
                In the realm of digital communication, the development of an advanced chatbot that incorporates topic modeling represents a significant leap towards enhancing user interaction and maintaining focus during conversations. This innovative chatbot design is specifically engineered to streamline discussions by guiding users to select from a curated list of suggested questions. This approach is crafted to mitigate the risk of diverging into off-topic dialogues, which are common pitfalls in conventional chatbot systems.
                """
            )
            token_info_display = gr.Markdown(
                value="",
                elem_id="token-info"
            )
            follow_up_questions_display = gr.Markdown(
                value="",
                elem_id="follow-up-questions",
                label="Follow up Questions"
            )
            initial_question_input = gr.Textbox(
                placeholder="Type your initial question here...", 
                label="Initial Question"
            )
            initialize_button = gr.Button("Initialize Chat")

            question_examples = gr.Examples(
                examples = questions,
                inputs = initial_question_input,
                label = "Intial Questions"
            )
        
        with gr.Column(scale=1, elem_id="chat-container"):
            chatbot = gr.Chatbot(
                value=[],
                elem_id="chatbot",
                bubble_full_width=False,
                label="Safe Chatbot v1"
            )
            
            with gr.Row():
                txt = gr.Textbox(scale=4, show_label=False, placeholder="Select question...", container=False, interactive=False)
                btn = gr.Button("Submit")

                btn.click(
                    fn=process_response,
                    inputs=[txt, chat_history, gr.State(0), examples_state],
                    outputs=[chatbot, token_info_display, follow_up_questions_display, examples_state]
                )
                examples_component = gr.Examples(
                    examples=examples_state.value,
                    inputs=[txt],
                    label="Questions"
                )
            
            chatbot.like(print_like_dislike, None, None)

    initialize_button.click(
        fn=initialize_chat,
        inputs=[initial_question_input],
        outputs=[chat_history, follow_up_questions_display, token_info_display, examples_state]
    ).then(
        fn=lambda chat_history: chat_history,
        inputs=[chat_history],
        outputs=[chatbot]
    )

if __name__ == "__main__":
    demo.launch(share=False)