akhaliq HF Staff commited on
Commit
f796e9b
Β·
verified Β·
1 Parent(s): eadb7bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +320 -1
app.py CHANGED
@@ -40,4 +40,323 @@ Act as an agentic assistant, if a user asks for a long task, break it down and d
40
 
41
  When you want to commit changes, you will always use the 'git commit' bash command. It will always
42
  be suffixed with a line telling it was generated by Mistral Vibe with the appropriate co-authoring information.
43
- The format you will always uses is the following heredoc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  When you want to commit changes, you will always use the 'git commit' bash command. It will always
42
  be suffixed with a line telling it was generated by Mistral Vibe with the appropriate co-authoring information.
43
+ The format you will always uses is the following heredoc.
44
+
45
+ ```bash
46
+ git commit -m "<Commit message here>
47
+
48
+ Generated by Mistral Vibe.
49
+ Co-Authored-By: Mistral Vibe <vibe@mistral.ai>"
50
+ ```"""
51
+
52
+ # Tools configuration
53
+ tools = [
54
+ {
55
+ "type": "function",
56
+ "function": {
57
+ "name": "add_number",
58
+ "description": "Add two numbers.",
59
+ "parameters": {
60
+ "type": "object",
61
+ "properties": {
62
+ "a": {"type": "string", "description": "The first number."},
63
+ "b": {"type": "string", "description": "The second number."},
64
+ },
65
+ "required": ["a", "b"],
66
+ },
67
+ },
68
+ },
69
+ {
70
+ "type": "function",
71
+ "function": {
72
+ "name": "multiply_number",
73
+ "description": "Multiply two numbers.",
74
+ "parameters": {
75
+ "type": "object",
76
+ "properties": {
77
+ "a": {"type": "string", "description": "The first number."},
78
+ "b": {"type": "string", "description": "The second number."},
79
+ },
80
+ "required": ["a", "b"],
81
+ },
82
+ },
83
+ },
84
+ {
85
+ "type": "function",
86
+ "function": {
87
+ "name": "substract_number",
88
+ "description": "Substract two numbers.",
89
+ "parameters": {
90
+ "type": "object",
91
+ "properties": {
92
+ "a": {"type": "string", "description": "The first number."},
93
+ "b": {"type": "string", "description": "The second number."},
94
+ },
95
+ "required": ["a", "b"],
96
+ },
97
+ },
98
+ },
99
+ {
100
+ "type": "function",
101
+ "function": {
102
+ "name": "write_a_story",
103
+ "description": "Write a story about science fiction and people with badass laser sabers.",
104
+ "parameters": {},
105
+ },
106
+ },
107
+ {
108
+ "type": "function",
109
+ "function": {
110
+ "name": "terminal",
111
+ "description": "Perform operations from the terminal.",
112
+ "parameters": {
113
+ "type": "object",
114
+ "properties": {
115
+ "command": {
116
+ "type": "string",
117
+ "description": "The command you wish to launch, e.g `ls`, `rm`, ...",
118
+ },
119
+ "args": {
120
+ "type": "string",
121
+ "description": "The arguments to pass to the command.",
122
+ },
123
+ },
124
+ "required": ["command"],
125
+ },
126
+ },
127
+ },
128
+ {
129
+ "type": "function",
130
+ "function": {
131
+ "name": "python",
132
+ "description": "Call a Python interpreter with some Python code that will be ran.",
133
+ "parameters": {
134
+ "type": "object",
135
+ "properties": {
136
+ "code": {
137
+ "type": "string",
138
+ "description": "The Python code to run",
139
+ },
140
+ "result_variable": {
141
+ "type": "string",
142
+ "description": "Variable containing the result you'd like to retrieve from the execution.",
143
+ },
144
+ },
145
+ "required": ["code", "result_variable"],
146
+ },
147
+ },
148
+ },
149
+ ]
150
+
151
+ @spaces.GPU(duration=60) # Use ZeroGPU with 60 second duration
152
+ def chat_function_gpu(message, history):
153
+ try:
154
+ # Prepare input messages
155
+ messages = [
156
+ {
157
+ "role": "system",
158
+ "content": SP,
159
+ },
160
+ {
161
+ "role": "user",
162
+ "content": [
163
+ {
164
+ "type": "text",
165
+ "text": message,
166
+ }
167
+ ],
168
+ },
169
+ ]
170
+
171
+ # Tokenize input
172
+ tokenized = tokenizer.apply_chat_template(
173
+ conversation=messages,
174
+ tools=tools,
175
+ return_tensors="pt",
176
+ return_dict=True,
177
+ )
178
+
179
+ input_ids = tokenized["input_ids"].to(device="cuda")
180
+
181
+ # Generate output with GPU acceleration
182
+ output = model.generate(
183
+ input_ids,
184
+ max_new_tokens=200,
185
+ do_sample=True,
186
+ temperature=0.7,
187
+ top_p=0.9,
188
+ num_return_sequences=1
189
+ )[0]
190
+
191
+ # Decode and return response
192
+ decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
193
+
194
+ # Return in tuple format for Gradio chatbot
195
+ return history + [[message, decoded_output]]
196
+
197
+ except Exception as e:
198
+ error_msg = f"Error processing your request: {str(e)}"
199
+ return history + [[message, error_msg]]
200
+
201
+ # Fallback CPU function for when GPU is not available
202
+ def chat_function_cpu(message, history):
203
+ try:
204
+ # Prepare input messages
205
+ messages = [
206
+ {
207
+ "role": "system",
208
+ "content": SP,
209
+ },
210
+ {
211
+ "role": "user",
212
+ "content": [
213
+ {
214
+ "type": "text",
215
+ "text": message,
216
+ }
217
+ ],
218
+ },
219
+ ]
220
+
221
+ # Tokenize input with CPU configuration
222
+ tokenized = tokenizer.apply_chat_template(
223
+ conversation=messages,
224
+ tools=tools,
225
+ return_tensors="pt",
226
+ return_dict=True,
227
+ )
228
+
229
+ input_ids = tokenized["input_ids"].to(device="cpu")
230
+
231
+ # Generate output with CPU-optimized settings
232
+ output = model.generate(
233
+ input_ids,
234
+ max_new_tokens=100, # Reduced for CPU performance
235
+ do_sample=True,
236
+ temperature=0.7,
237
+ top_p=0.9,
238
+ num_return_sequences=1
239
+ )[0]
240
+
241
+ # Decode and return response
242
+ decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
243
+
244
+ # Return in tuple format for Gradio chatbot
245
+ return history + [[message, decoded_output]]
246
+
247
+ except Exception as e:
248
+ error_msg = f"Error processing your request: {str(e)}"
249
+ return history + [[message, error_msg]]
250
+
251
+ # Create custom theme optimized for ZeroGPU
252
+ custom_theme = gr.themes.Soft(
253
+ primary_hue="blue",
254
+ secondary_hue="indigo",
255
+ neutral_hue="slate",
256
+ font=gr.themes.GoogleFont("Inter"),
257
+ text_size="lg",
258
+ spacing_size="lg",
259
+ radius_size="md"
260
+ ).set(
261
+ button_primary_background_fill="*primary_600",
262
+ button_primary_background_fill_hover="*primary_700",
263
+ block_title_text_weight="600",
264
+ )
265
+
266
+ # Create Gradio interface with ZeroGPU support - Gradio 6 syntax
267
+ with gr.Blocks(fill_height=True) as demo:
268
+ gr.Markdown("""
269
+ # πŸš€ Mistral Vibe - AI Coding Assistant
270
+
271
+ Powered by Mistral AI's Devstral-Small-2-24B with ZeroGPU acceleration
272
+
273
+ [Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder)
274
+ """)
275
+
276
+ chatbot = gr.Chatbot(
277
+ height=600,
278
+ label="Chat with Mistral Vibe"
279
+ )
280
+
281
+ with gr.Row():
282
+ msg = gr.Textbox(
283
+ label="Your Message",
284
+ placeholder="Type your message here...",
285
+ lines=3,
286
+ scale=4
287
+ )
288
+ with gr.Column(scale=1):
289
+ submit_btn = gr.Button("Send", variant="primary", size="lg")
290
+ clear_btn = gr.ClearButton([msg, chatbot], value="Clear Chat")
291
+
292
+ # Status indicator
293
+ status_text = gr.Markdown("βœ… Ready for your input...")
294
+
295
+ # Event handlers with status updates
296
+ def handle_submit(message, history):
297
+ if not message.strip():
298
+ return history, ""
299
+
300
+ if torch.cuda.is_available():
301
+ response = chat_function_gpu(message, history if history else [])
302
+ else:
303
+ response = chat_function_cpu(message, history if history else [])
304
+
305
+ return response, ""
306
+
307
+ # Gradio 6 event handlers
308
+ msg.submit(
309
+ fn=handle_submit,
310
+ inputs=[msg, chatbot],
311
+ outputs=[chatbot, msg],
312
+ api_visibility="public"
313
+ )
314
+
315
+ submit_btn.click(
316
+ fn=handle_submit,
317
+ inputs=[msg, chatbot],
318
+ outputs=[chatbot, msg],
319
+ api_visibility="public"
320
+ )
321
+
322
+ # Examples with ZeroGPU information
323
+ gr.Examples(
324
+ examples=[
325
+ "Can you implement in Python a method to compute the fibonacci sequence at the nth element with n a parameter passed to the function?",
326
+ "What are the available tools I can use?",
327
+ "Can you write a story about science fiction with laser sabers?",
328
+ "Add 15 and 27 using the add_number tool",
329
+ "Multiply 8 by 9"
330
+ ],
331
+ inputs=msg,
332
+ label="Example Prompts (Powered by ZeroGPU when available)"
333
+ )
334
+
335
+ gr.Markdown("""
336
+ ### ℹ️ About
337
+ This space uses Mistral AI's Devstral model with ZeroGPU acceleration for fast inference.
338
+ The model has access to various tools including math operations, terminal commands, and Python execution.
339
+ """)
340
+
341
+ # Launch with custom theme and ZeroGPU settings - Gradio 6 syntax
342
+ demo.queue() # Enable queue separately in Gradio 6
343
+ demo.launch(
344
+ theme=custom_theme,
345
+ footer_links=[
346
+ {
347
+ "label": "Built with anycoder",
348
+ "url": "https://huggingface.co/spaces/akhaliq/anycoder"
349
+ },
350
+ {
351
+ "label": "Mistral AI",
352
+ "url": "https://mistral.ai"
353
+ },
354
+ {
355
+ "label": "Hugging Face ZeroGPU",
356
+ "url": "https://huggingface.co/docs/hub/spaces-zerogpu"
357
+ }
358
+ ],
359
+ share=False,
360
+ max_threads=4,
361
+ show_error=True
362
+ )