AiCoderv2 commited on
Commit
7fdac37
Β·
verified Β·
1 Parent(s): b16db00

Update app.py from anycoder

Browse files
Files changed (1) hide show
  1. app.py +89 -33
app.py CHANGED
@@ -2,32 +2,54 @@ import gradio as gr
2
  import random
3
  import time
4
  from datetime import datetime
 
 
5
 
6
- # Simple AI Chatbot Model
7
- class SimpleChatbot:
8
  def __init__(self):
9
- self.responses = {
10
- "greetings": ["Hello!", "Hi there!", "Greetings!", "Nice to meet you!"],
11
- "farewells": ["Goodbye!", "See you later!", "Take care!", "Farewell!"],
12
- "thanks": ["You're welcome!", "No problem!", "Happy to help!", "Anytime!"],
13
- "questions": ["That's an interesting question!", "Let me think about that...", "I'll get back to you on that!"],
14
- "default": ["I see.", "Interesting!", "Tell me more.", "Go on...", "I'm listening."]
15
- }
 
 
 
 
16
 
17
  def respond(self, message, history):
18
- message = message.lower()
19
-
20
- # Simple intent detection
21
- if any(word in message for word in ["hi", "hello", "hey", "greetings"]):
22
- response = random.choice(self.responses["greetings"])
23
- elif any(word in message for word in ["bye", "goodbye", "farewell", "see you"]):
24
- response = random.choice(self.responses["farewells"])
25
- elif any(word in message for word in ["thank", "thanks", "appreciate"]):
26
- response = random.choice(self.responses["thanks"])
27
- elif "?" in message:
28
- response = random.choice(self.responses["questions"])
29
- else:
30
- response = random.choice(self.responses["default"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # Add timestamp to response
33
  timestamp = datetime.now().strftime("%H:%M:%S")
@@ -36,7 +58,7 @@ class SimpleChatbot:
36
  return full_response
37
 
38
  # Create chatbot instance
39
- chatbot = SimpleChatbot()
40
 
41
  # Custom theme for modern look
42
  custom_theme = gr.themes.Soft(
@@ -55,8 +77,10 @@ custom_theme = gr.themes.Soft(
55
 
56
  # Create Gradio interface
57
  with gr.Blocks() as demo:
58
- gr.Markdown("# πŸ€– AI Chatbot")
59
- gr.Markdown("Built with anycoder - A simple AI chatbot for conversation")
 
 
60
 
61
  with gr.Row():
62
  with gr.Column(scale=3):
@@ -79,20 +103,29 @@ with gr.Blocks() as demo:
79
  with gr.Column(scale=1):
80
  gr.Markdown("## Features")
81
  gr.Markdown("""
82
- - βœ… Simple AI responses
83
- - βœ… Conversation history
84
  - βœ… Timestamped messages
85
- - βœ… User-friendly interface
 
86
  """)
87
 
88
  gr.Markdown("## How to Use")
89
  gr.Markdown("""
90
  1. Type your message in the input box
91
  2. Press Enter or click Send
92
- 3. The AI will respond automatically
93
  4. Continue the conversation naturally
94
  """)
95
 
 
 
 
 
 
 
 
 
96
  clear_btn = gr.Button("πŸ”„ Clear Chat", variant="secondary")
97
 
98
  # Chatbot logic
@@ -129,8 +162,31 @@ demo.launch(
129
  theme=custom_theme,
130
  footer_links=[
131
  {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
132
- {"label": "Gradio Docs", "url": "https://gradio.app/docs"}
 
133
  ],
134
- title="AI Chatbot",
135
- description="A simple AI chatbot for conversation using Gradio"
136
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import random
3
  import time
4
  from datetime import datetime
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM
6
+ import torch
7
 
8
+ # Load a small Hugging Face text generation model
9
+ class HuggingFaceChatbot:
10
  def __init__(self):
11
+ # Using a small model for demonstration
12
+ self.model_name = "microsoft/DialoGPT-small"
13
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
14
+ self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
15
+
16
+ # Move model to GPU if available
17
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
18
+ self.model.to(self.device)
19
+
20
+ # Chat history
21
+ self.chat_history_ids = None
22
 
23
  def respond(self, message, history):
24
+ # Encode the new user input and add the eos_token
25
+ new_user_input_ids = self.tokenizer.encode(
26
+ message + self.tokenizer.eos_token,
27
+ return_tensors='pt'
28
+ ).to(self.device)
29
+
30
+ # Append the new user input tokens to the chat history
31
+ bot_input_ids = torch.cat([
32
+ self.chat_history_ids,
33
+ new_user_input_ids
34
+ ], dim=-1) if self.chat_history_ids is not None else new_user_input_ids
35
+
36
+ # Generate a response
37
+ self.chat_history_ids = self.model.generate(
38
+ bot_input_ids,
39
+ max_length=1000,
40
+ pad_token_id=self.tokenizer.eos_token_id,
41
+ no_repeat_ngram_size=3,
42
+ do_sample=True,
43
+ top_k=50,
44
+ top_p=0.95,
45
+ temperature=0.7
46
+ )
47
+
48
+ # Decode the response
49
+ response = self.tokenizer.decode(
50
+ self.chat_history_ids[:, bot_input_ids.shape[-1]:][0],
51
+ skip_special_tokens=True
52
+ )
53
 
54
  # Add timestamp to response
55
  timestamp = datetime.now().strftime("%H:%M:%S")
 
58
  return full_response
59
 
60
  # Create chatbot instance
61
+ chatbot = HuggingFaceChatbot()
62
 
63
  # Custom theme for modern look
64
  custom_theme = gr.themes.Soft(
 
77
 
78
  # Create Gradio interface
79
  with gr.Blocks() as demo:
80
+ gr.Markdown("# πŸ€– AI Chatbot with Hugging Face")
81
+ gr.Markdown("""
82
+ **Built with anycoder** - A conversational AI chatbot using Hugging Face's DialoGPT-small model
83
+ """)
84
 
85
  with gr.Row():
86
  with gr.Column(scale=3):
 
103
  with gr.Column(scale=1):
104
  gr.Markdown("## Features")
105
  gr.Markdown("""
106
+ - βœ… Powered by Hugging Face DialoGPT-small
107
+ - βœ… Context-aware conversation
108
  - βœ… Timestamped messages
109
+ - βœ… Modern, user-friendly interface
110
+ - βœ… GPU acceleration (if available)
111
  """)
112
 
113
  gr.Markdown("## How to Use")
114
  gr.Markdown("""
115
  1. Type your message in the input box
116
  2. Press Enter or click Send
117
+ 3. The AI will respond using the Hugging Face model
118
  4. Continue the conversation naturally
119
  """)
120
 
121
+ gr.Markdown("## Model Info")
122
+ gr.Markdown(f"""
123
+ - **Model**: DialoGPT-small
124
+ - **Device**: {chatbot.device}
125
+ - **Max Response Length**: 1000 tokens
126
+ - **Temperature**: 0.7
127
+ """)
128
+
129
  clear_btn = gr.Button("πŸ”„ Clear Chat", variant="secondary")
130
 
131
  # Chatbot logic
 
162
  theme=custom_theme,
163
  footer_links=[
164
  {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
165
+ {"label": "Gradio Docs", "url": "https://gradio.app/docs"},
166
+ {"label": "Hugging Face", "url": "https://huggingface.co/"}
167
  ],
168
+ title="Hugging Face AI Chatbot",
169
+ description="A conversational AI chatbot using Hugging Face's DialoGPT-small model"
170
+ )
171
+ Key changes made:
172
+
173
+ 1. **Replaced the simple chatbot with Hugging Face model**:
174
+ - Added `transformers` import for AutoTokenizer and AutoModelForCausalLM
175
+ - Created `HuggingFaceChatbot` class using DialoGPT-small model
176
+ - Added proper model loading and GPU support
177
+
178
+ 2. **Enhanced the interface**:
179
+ - Updated title to reflect Hugging Face integration
180
+ - Added model information section showing device and parameters
181
+ - Added Hugging Face link to footer
182
+
183
+ 3. **Improved response generation**:
184
+ - Uses proper tokenization and generation
185
+ - Maintains conversation context with chat history
186
+ - Better response quality with temperature and sampling
187
+
188
+ 4. **Added technical details**:
189
+ - Shows which device (CPU/GPU) is being used
190
+ - Displays model parameters in the sidebar
191
+
192
+ The application now uses a real AI model from Hugging Face while maintaining the same user-friendly interface and conversation flow. The DialoGPT-small model is a good choice as it's relatively lightweight but provides much better conversational abilities than the simple rule-based chatbot.