sebinxj commited on
Commit
a15e558
·
verified ·
1 Parent(s): dee02ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -29
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import gradio as gr
2
  import re
3
  from datetime import datetime
4
- from transformers import pipeline, Conversation
5
 
6
- # Initialize the conversational AI
7
  chatbot = pipeline(
8
  "conversational",
9
- model="facebook/blenderbot-400M-distill", # More conversational model
10
  max_length=200
11
  )
12
 
@@ -33,8 +33,11 @@ def initialize_state():
33
  "stage": ChatState.NORMAL,
34
  "return_product": None,
35
  "receipt": None,
36
- "conversation": Conversation(),
37
- "error_count": 0
 
 
 
38
  }
39
 
40
  # Helper functions
@@ -42,14 +45,25 @@ def process_return(product):
42
  # Simulate return processing
43
  return f"✅ Return processed for your {product}!\nConfirmation sent to your email."
44
 
45
- def handle_return_flow(user_input, state, conversation):
 
 
 
 
 
 
 
 
 
 
 
46
  """Natural conversation handling for return process"""
47
  response = ""
48
  new_stage = state["stage"]
49
 
50
  # Detect return request naturally
51
  if state["stage"] == ChatState.NORMAL:
52
- if re.search(r'\b(return|refund|exchange|send back)\b', user_input.lower()):
53
  state["stage"] = ChatState.RETURN_DETECTED
54
  response = "I'd be happy to help with your return! Could you tell me which product you'd like to return?"
55
 
@@ -114,7 +128,6 @@ def handle_return_flow(user_input, state, conversation):
114
  response += "\n\nIs there anything else I can help you with today?"
115
  # Reset conversation but keep context
116
  state = initialize_state()
117
- state["conversation"] = conversation # Maintain conversation history
118
 
119
  # Disengagement
120
  elif state["stage"] == ChatState.DISENGAGED:
@@ -124,34 +137,22 @@ def handle_return_flow(user_input, state, conversation):
124
 
125
  def chat_fn(user_input, chat_history, state):
126
  """Main chat function with natural conversation handling"""
127
- # Initialize conversation if needed
128
- if not state["conversation"].past_user_inputs:
129
- state["conversation"].add_user_input("Hello!")
130
- state["conversation"] = chatbot(state["conversation"])
131
-
132
- # Add user message to conversation
133
- state["conversation"].add_user_input(user_input)
134
 
135
- # Handle return flow naturally within conversation
136
- return_response, state = handle_return_flow(user_input, state, state["conversation"])
137
-
138
- # If we have a specific response from return flow, use it
139
  if return_response:
 
140
  response = return_response
141
- state["conversation"].append_response(response)
142
  else:
143
- # Generate conversational response
144
- state["conversation"] = chatbot(state["conversation"])
145
- response = state["conversation"].generated_responses[-1]
 
146
 
147
- # Update chat history
 
148
  chat_history.append((user_input, response))
149
 
150
- # Reset state if we completed return process
151
- if state["stage"] == ChatState.RETURN_PROCESSING:
152
- state = initialize_state()
153
- state["conversation"].append_response(response) # Maintain context
154
-
155
  return "", chat_history, state
156
 
157
  # Gradio interface with natural conversation design
 
1
  import gradio as gr
2
  import re
3
  from datetime import datetime
4
+ from transformers import pipeline
5
 
6
+ # Initialize the conversational AI with a model that works in Hugging Face Spaces
7
  chatbot = pipeline(
8
  "conversational",
9
+ model="facebook/blenderbot-400M-distill",
10
  max_length=200
11
  )
12
 
 
33
  "stage": ChatState.NORMAL,
34
  "return_product": None,
35
  "receipt": None,
36
+ "name": None,
37
+ "purchase_date": None,
38
+ "email": None,
39
+ "error_count": 0,
40
+ "conversation_history": [] # Store conversation as tuples
41
  }
42
 
43
  # Helper functions
 
45
  # Simulate return processing
46
  return f"✅ Return processed for your {product}!\nConfirmation sent to your email."
47
 
48
+ def is_return_request(text):
49
+ return bool(re.search(r'\b(return|refund|exchange|send back)\b', text.lower()))
50
+
51
+ def generate_chat_prompt(history, new_input):
52
+ """Build a conversational prompt for the AI model"""
53
+ prompt = ""
54
+ for user, bot in history:
55
+ prompt += f"User: {user}\nAssistant: {bot}\n"
56
+ prompt += f"User: {new_input}\nAssistant:"
57
+ return prompt
58
+
59
+ def handle_return_flow(user_input, state):
60
  """Natural conversation handling for return process"""
61
  response = ""
62
  new_stage = state["stage"]
63
 
64
  # Detect return request naturally
65
  if state["stage"] == ChatState.NORMAL:
66
+ if is_return_request(user_input):
67
  state["stage"] = ChatState.RETURN_DETECTED
68
  response = "I'd be happy to help with your return! Could you tell me which product you'd like to return?"
69
 
 
128
  response += "\n\nIs there anything else I can help you with today?"
129
  # Reset conversation but keep context
130
  state = initialize_state()
 
131
 
132
  # Disengagement
133
  elif state["stage"] == ChatState.DISENGAGED:
 
137
 
138
  def chat_fn(user_input, chat_history, state):
139
  """Main chat function with natural conversation handling"""
140
+ # First check if we're in a return flow
141
+ return_response, state = handle_return_flow(user_input, state)
 
 
 
 
 
142
 
 
 
 
 
143
  if return_response:
144
+ # We have a response from the return flow
145
  response = return_response
 
146
  else:
147
+ # Generate conversational response using AI
148
+ prompt = generate_chat_prompt(state["conversation_history"], user_input)
149
+ result = chatbot(prompt, max_length=200)
150
+ response = result[0]['generated_text'].split("Assistant:")[-1].strip()
151
 
152
+ # Update conversation history
153
+ state["conversation_history"].append((user_input, response))
154
  chat_history.append((user_input, response))
155
 
 
 
 
 
 
156
  return "", chat_history, state
157
 
158
  # Gradio interface with natural conversation design