akhaliq HF Staff commited on
Commit
9bfb0e4
Β·
verified Β·
1 Parent(s): 5569d41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +300 -1
app.py CHANGED
@@ -40,4 +40,303 @@ Act as an agentic assistant, if a user asks for a long task, break it down and d
40
 
41
  When you want to commit changes, you will always use the 'git commit' bash command. It will always
42
  be suffixed with a line telling it was generated by Mistral Vibe with the appropriate co-authoring information.
43
- The format you will always uses is the following heredoc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  When you want to commit changes, you will always use the 'git commit' bash command. It will always
42
  be suffixed with a line telling it was generated by Mistral Vibe with the appropriate co-authoring information.
43
+ The format you will always uses is the following heredoc.
44
+
45
+ ```bash
46
+ git commit -m "<Commit message here>
47
+
48
+ Generated by Mistral Vibe.
49
+ Co-Authored-By: Mistral Vibe <vibe@mistral.ai>"
50
+ ```"""
51
+
52
+ # Tools configuration
53
+ tools = [
54
+ {
55
+ "type": "function",
56
+ "function": {
57
+ "name": "add_number",
58
+ "description": "Add two numbers.",
59
+ "parameters": {
60
+ "type": "object",
61
+ "properties": {
62
+ "a": {"type": "string", "description": "The first number."},
63
+ "b": {"type": "string", "description": "The second number."},
64
+ },
65
+ "required": ["a", "b"],
66
+ },
67
+ },
68
+ },
69
+ {
70
+ "type": "function",
71
+ "function": {
72
+ "name": "multiply_number",
73
+ "description": "Multiply two numbers.",
74
+ "parameters": {
75
+ "type": "object",
76
+ "properties": {
77
+ "a": {"type": "string", "description": "The first number."},
78
+ "b": {"type": "string", "description": "The second number."},
79
+ },
80
+ "required": ["a", "b"],
81
+ },
82
+ },
83
+ },
84
+ {
85
+ "type": "function",
86
+ "function": {
87
+ "name": "substract_number",
88
+ "description": "Substract two numbers.",
89
+ "parameters": {
90
+ "type": "object",
91
+ "properties": {
92
+ "a": {"type": "string", "description": "The first number."},
93
+ "b": {"type": "string", "description": "The second number."},
94
+ },
95
+ "required": ["a", "b"],
96
+ },
97
+ },
98
+ },
99
+ {
100
+ "type": "function",
101
+ "function": {
102
+ "name": "write_a_story",
103
+ "description": "Write a story about science fiction and people with badass laser sabers.",
104
+ "parameters": {},
105
+ },
106
+ },
107
+ {
108
+ "type": "function",
109
+ "function": {
110
+ "name": "terminal",
111
+ "description": "Perform operations from the terminal.",
112
+ "parameters": {
113
+ "type": "object",
114
+ "properties": {
115
+ "command": {
116
+ "type": "string",
117
+ "description": "The command you wish to launch, e.g `ls`, `rm`, ...",
118
+ },
119
+ "args": {
120
+ "type": "string",
121
+ "description": "The arguments to pass to the command.",
122
+ },
123
+ },
124
+ "required": ["command"],
125
+ },
126
+ },
127
+ },
128
+ {
129
+ "type": "function",
130
+ "function": {
131
+ "name": "python",
132
+ "description": "Call a Python interpreter with some Python code that will be ran.",
133
+ "parameters": {
134
+ "type": "object",
135
+ "properties": {
136
+ "code": {
137
+ "type": "string",
138
+ "description": "The Python code to run",
139
+ },
140
+ "result_variable": {
141
+ "type": "string",
142
+ "description": "Variable containing the result you'd like to retrieve from the execution.",
143
+ },
144
+ },
145
+ "required": ["code", "result_variable"],
146
+ },
147
+ },
148
+ },
149
+ ]
150
+
151
+ @spaces.GPU(duration=60) # Use ZeroGPU with 60 second duration
152
+ def chat_function_gpu(message, history):
153
+ try:
154
+ # Prepare input messages
155
+ messages = [
156
+ {
157
+ "role": "system",
158
+ "content": SP,
159
+ },
160
+ {
161
+ "role": "user",
162
+ "content": [
163
+ {
164
+ "type": "text",
165
+ "text": message,
166
+ }
167
+ ],
168
+ },
169
+ ]
170
+
171
+ # Tokenize input
172
+ tokenized = tokenizer.apply_chat_template(
173
+ conversation=messages,
174
+ tools=tools,
175
+ return_tensors="pt",
176
+ return_dict=True,
177
+ )
178
+
179
+ input_ids = tokenized["input_ids"].to(device="cuda")
180
+
181
+ # Generate output with GPU acceleration
182
+ output = model.generate(
183
+ input_ids,
184
+ max_new_tokens=200,
185
+ do_sample=True,
186
+ temperature=0.7,
187
+ top_p=0.9,
188
+ num_return_sequences=1
189
+ )[0]
190
+
191
+ # Decode and return response
192
+ decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
193
+ return decoded_output
194
+
195
+ except Exception as e:
196
+ return f"Error processing your request: {str(e)}"
197
+
198
+ # Fallback CPU function for when GPU is not available
199
+ def chat_function_cpu(message, history):
200
+ try:
201
+ # Prepare input messages
202
+ messages = [
203
+ {
204
+ "role": "system",
205
+ "content": SP,
206
+ },
207
+ {
208
+ "role": "user",
209
+ "content": [
210
+ {
211
+ "type": "text",
212
+ "text": message,
213
+ }
214
+ ],
215
+ },
216
+ ]
217
+
218
+ # Tokenize input with CPU configuration
219
+ tokenized = tokenizer.apply_chat_template(
220
+ conversation=messages,
221
+ tools=tools,
222
+ return_tensors="pt",
223
+ return_dict=True,
224
+ )
225
+
226
+ input_ids = tokenized["input_ids"].to(device="cpu")
227
+
228
+ # Generate output with CPU-optimized settings
229
+ output = model.generate(
230
+ input_ids,
231
+ max_new_tokens=100, # Reduced for CPU performance
232
+ do_sample=True,
233
+ temperature=0.7,
234
+ top_p=0.9,
235
+ num_return_sequences=1
236
+ )[0]
237
+
238
+ # Decode and return response
239
+ decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
240
+ return decoded_output
241
+
242
+ except Exception as e:
243
+ return f"Error processing your request: {str(e)}"
244
+
245
+ # Create custom theme optimized for ZeroGPU
246
+ custom_theme = gr.themes.Soft(
247
+ primary_hue="blue",
248
+ secondary_hue="indigo",
249
+ neutral_hue="slate",
250
+ font=gr.themes.GoogleFont("Inter"),
251
+ text_size="lg",
252
+ spacing_size="lg",
253
+ radius_size="md"
254
+ ).set(
255
+ button_primary_background_fill="*primary_600",
256
+ button_primary_background_fill_hover="*primary_700",
257
+ block_title_text_weight="600",
258
+ )
259
+
260
+ # Create Gradio interface with ZeroGPU support
261
+ with gr.Blocks() as demo:
262
+ chatbot = gr.Chatbot(height=600)
263
+ msg = gr.Textbox(
264
+ label="Your Message",
265
+ placeholder="Type your message here...",
266
+ lines=3
267
+ )
268
+
269
+ # Clear button
270
+ clear_btn = gr.ClearButton([msg, chatbot])
271
+
272
+ # Submit button with loading indicator
273
+ submit_btn = gr.Button("Send", variant="primary")
274
+
275
+ # Status indicator
276
+ status_text = gr.Markdown("Ready for your input...")
277
+
278
+ def update_status(text):
279
+ return text
280
+
281
+ # Event handlers with status updates
282
+ def handle_submit(message, history):
283
+ if torch.cuda.is_available():
284
+ status_text.value = "Processing with ZeroGPU acceleration..."
285
+ response = chat_function_gpu(message, history)
286
+ else:
287
+ status_text.value = "Processing with CPU (ZeroGPU quota may be exhausted)..."
288
+ response = chat_function_cpu(message, history)
289
+ status_text.value = "Ready for your input..."
290
+ return response
291
+
292
+ msg.submit(
293
+ fn=handle_submit,
294
+ inputs=[msg, chatbot],
295
+ outputs=[chatbot],
296
+ api_visibility="public"
297
+ )
298
+
299
+ submit_btn.click(
300
+ fn=handle_submit,
301
+ inputs=[msg, chatbot],
302
+ outputs=[chatbot],
303
+ api_visibility="public"
304
+ )
305
+
306
+ # Examples with ZeroGPU information
307
+ gr.Examples(
308
+ examples=[
309
+ "Can you implement in Python a method to compute the fibonnaci sequence at the nth element with n a parameter passed to the function?",
310
+ "What are the available tools I can use?",
311
+ "Can you write a story about science fiction with laser sabers?"
312
+ ],
313
+ inputs=msg,
314
+ label="Example Prompts (Powered by ZeroGPU when available)"
315
+ )
316
+
317
+ # Launch with custom theme and ZeroGPU settings
318
+ demo.launch(
319
+ theme=custom_theme,
320
+ footer_links=[
321
+ {
322
+ "label": "Built with anycoder",
323
+ "url": "https://huggingface.co/spaces/akhaliq/anycoder"
324
+ },
325
+ {
326
+ "label": "Mistral AI",
327
+ "url": "https://mistral.ai"
328
+ },
329
+ {
330
+ "label": "Hugging Face ZeroGPU",
331
+ "url": "https://huggingface.co/docs/hub/spaces-zerogpu"
332
+ },
333
+ {
334
+ "label": "Hugging Face Spaces",
335
+ "url": "https://huggingface.co/spaces"
336
+ }
337
+ ],
338
+ share=False, # Disable share for Spaces deployment
339
+ max_threads=4, # Allow more threads for GPU processing
340
+ show_error=True,
341
+ enable_queue=True
342
+ )