Dev8709 commited on
Commit
81e6a94
·
1 Parent(s): 4cfe630

Add application file

Browse files
Files changed (1) hide show
  1. app.py +176 -0
app.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ from llama_cpp import Llama
4
+ import os
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ # Global variable to store the model
8
+ llm = None
9
+
10
+ def load_model():
11
+ """Load the llama.cpp model"""
12
+ global llm
13
+ try:
14
+ # You can replace this with any GGUF model from Hugging Face
15
+ # For example, using a small model for demonstration
16
+ model_name = "microsoft/DialoGPT-medium"
17
+
18
+ # For now, we'll use a local model path or download one
19
+ # This is a placeholder - you'll need to specify the actual model
20
+ print("Loading llama.cpp model...")
21
+
22
+ # Initialize with basic settings
23
+ # Note: You'll need to provide an actual GGUF model file
24
+ # llm = Llama(
25
+ # model_path="path/to/your/model.gguf",
26
+ # n_ctx=2048,
27
+ # n_threads=2,
28
+ # verbose=False
29
+ # )
30
+
31
+ print("Model loaded successfully!")
32
+ return "Model loaded successfully!"
33
+
34
+ except Exception as e:
35
+ print(f"Error loading model: {e}")
36
+ return f"Error loading model: {e}"
37
+
38
+ def text_to_json(input_text, max_tokens=512, temperature=0.7):
39
+ """Convert plain text to structured JSON using llama.cpp"""
40
+ global llm
41
+
42
+ if llm is None:
43
+ return {"error": "Model not loaded. Please load the model first."}
44
+
45
+ try:
46
+ # Create a prompt for JSON generation
47
+ prompt = f"""Convert the following text into a structured JSON format. Extract key information and organize it logically:
48
+
49
+ Text: {input_text}
50
+
51
+ JSON:"""
52
+
53
+ # Generate response using llama.cpp
54
+ response = llm(
55
+ prompt,
56
+ max_tokens=max_tokens,
57
+ temperature=temperature,
58
+ stop=["```", "\n\n\n"],
59
+ echo=False
60
+ )
61
+
62
+ generated_text = response['choices'][0]['text'].strip()
63
+
64
+ # Try to parse as JSON to validate
65
+ try:
66
+ parsed_json = json.loads(generated_text)
67
+ return json.dumps(parsed_json, indent=2)
68
+ except json.JSONDecodeError:
69
+ # If not valid JSON, return as a structured attempt
70
+ return generated_text
71
+
72
+ except Exception as e:
73
+ return f"Error generating JSON: {str(e)}"
74
+
75
+ def demo_without_model(input_text):
76
+ """Demo function that works without loading a model"""
77
+ try:
78
+ # Simple rule-based JSON conversion for demonstration
79
+ words = input_text.strip().split()
80
+
81
+ # Create a basic JSON structure
82
+ result = {
83
+ "input_text": input_text,
84
+ "word_count": len(words),
85
+ "words": words,
86
+ "character_count": len(input_text),
87
+ "sentences": input_text.split('.'),
88
+ "metadata": {
89
+ "processed_by": "llama.cpp demo",
90
+ "timestamp": "demo_mode"
91
+ }
92
+ }
93
+
94
+ return json.dumps(result, indent=2)
95
+
96
+ except Exception as e:
97
+ return f"Error processing text: {str(e)}"
98
+
99
+ # Create Gradio interface
100
+ with gr.Blocks(title="Plain Text to JSON with llama.cpp") as demo:
101
+ gr.Markdown("# Plain Text to JSON Converter")
102
+ gr.Markdown("Convert plain text into structured JSON format using llama.cpp")
103
+
104
+ with gr.Tab("Text to JSON"):
105
+ with gr.Row():
106
+ with gr.Column():
107
+ input_text = gr.Textbox(
108
+ label="Input Text",
109
+ placeholder="Enter your text here...",
110
+ lines=5
111
+ )
112
+
113
+ with gr.Row():
114
+ max_tokens = gr.Slider(
115
+ minimum=50,
116
+ maximum=1000,
117
+ value=512,
118
+ label="Max Tokens"
119
+ )
120
+ temperature = gr.Slider(
121
+ minimum=0.1,
122
+ maximum=1.0,
123
+ value=0.7,
124
+ label="Temperature"
125
+ )
126
+
127
+ convert_btn = gr.Button("Convert to JSON", variant="primary")
128
+ demo_btn = gr.Button("Demo (No Model)", variant="secondary")
129
+
130
+ with gr.Column():
131
+ output_json = gr.Textbox(
132
+ label="Generated JSON",
133
+ lines=10,
134
+ interactive=False
135
+ )
136
+
137
+ with gr.Tab("Model Management"):
138
+ load_btn = gr.Button("Load Model", variant="primary")
139
+ model_status = gr.Textbox(
140
+ label="Model Status",
141
+ value="Model not loaded",
142
+ interactive=False
143
+ )
144
+
145
+ gr.Markdown("""
146
+ ### Instructions:
147
+ 1. Click "Load Model" to initialize llama.cpp (requires a GGUF model file)
148
+ 2. Use "Demo (No Model)" for basic functionality without loading a model
149
+ 3. For full functionality, you need to provide a GGUF model file
150
+
151
+ ### Notes:
152
+ - This space uses llama.cpp for efficient CPU inference
153
+ - Models should be in GGUF format
154
+ - Adjust max_tokens and temperature for different outputs
155
+ """)
156
+
157
+ # Event handlers
158
+ convert_btn.click(
159
+ fn=text_to_json,
160
+ inputs=[input_text, max_tokens, temperature],
161
+ outputs=output_json
162
+ )
163
+
164
+ demo_btn.click(
165
+ fn=demo_without_model,
166
+ inputs=input_text,
167
+ outputs=output_json
168
+ )
169
+
170
+ load_btn.click(
171
+ fn=load_model,
172
+ outputs=model_status
173
+ )
174
+
175
+ if __name__ == "__main__":
176
+ demo.launch()