youssefleb commited on
Commit
414c707
·
verified ·
1 Parent(s): 407dc12

Update mcp_servers.py

Browse files
Files changed (1) hide show
  1. mcp_servers.py +11 -5
mcp_servers.py CHANGED
@@ -1,4 +1,4 @@
1
- # mcp_servers.py (FIXED: Guarantees 4-value return tuple)
2
  import asyncio
3
  import json
4
  import re
@@ -94,7 +94,6 @@ class AgentCalibrator:
94
  if not self.sponsor_llms:
95
  raise Exception("AgentCalibrator cannot run: No LLM clients are configured.")
96
 
97
- # If only one model, return default plan + empty lists for details/usage
98
  if len(self.sponsor_llms) == 1:
99
  default_llm = self.sponsor_llms[0]
100
  print("Only one LLM available. Skipping calibration.")
@@ -103,7 +102,6 @@ class AgentCalibrator:
103
  "Implementer": {"persona": config.CALIBRATION_CONFIG["roles_to_test"]["Implementer"], "llm": default_llm},
104
  "Monitor": {"persona": config.CALIBRATION_CONFIG["roles_to_test"]["Monitor"], "llm": default_llm}
105
  }
106
- # MUST RETURN 4 VALUES
107
  return plan, error_log, [], []
108
 
109
  roles_to_test = {role: PERSONAS_DATA[key]["description"] for role, key in config.CALIBRATION_CONFIG["roles_to_test"].items()}
@@ -154,7 +152,6 @@ class AgentCalibrator:
154
  "Monitor": {"persona": config.CALIBRATION_CONFIG["roles_to_test"]["Monitor"], "llm": best_llms["Monitor"]}
155
  }
156
  print(f"Calibration complete (live). Team plan: {team_plan}")
157
- # MUST RETURN 4 VALUES
158
  return team_plan, error_log, detailed_results, all_usage_stats
159
 
160
  async def run_calibration_test(self, problem, role, llm_name, persona, test_problem):
@@ -169,6 +166,7 @@ class AgentCalibrator:
169
  "role": role, "llm": llm_name, "score": score, "output": solution, "usage_gen": gen_usage, "usage_eval": eval_usage
170
  }
171
 
 
172
  async def get_llm_response(client_name: str, client, system_prompt: str, user_prompt: str) -> Tuple[str, dict]:
173
  """Returns (text_response, usage_dict)"""
174
  usage = {"model": client_name, "input": 0, "output": 0}
@@ -191,10 +189,18 @@ async def get_llm_response(client_name: str, client, system_prompt: str, user_pr
191
  usage["output"] = response.usage.output_tokens
192
  return response.content[0].text, usage
193
 
 
194
  elif client_name in ["SambaNova", "OpenAI", "Nebius"]:
 
 
195
  model_id = config.MODELS.get(client_name, {}).get("default", "gpt-4o-mini")
 
196
  completion = await client.chat.completions.create(
197
- model=model_id, messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
 
 
 
 
198
  )
199
  if hasattr(completion, "usage"):
200
  usage["input"] = completion.usage.prompt_tokens
 
1
+ # mcp_servers.py (FIXED: Added OpenAI & Nebius Support to get_llm_response)
2
  import asyncio
3
  import json
4
  import re
 
94
  if not self.sponsor_llms:
95
  raise Exception("AgentCalibrator cannot run: No LLM clients are configured.")
96
 
 
97
  if len(self.sponsor_llms) == 1:
98
  default_llm = self.sponsor_llms[0]
99
  print("Only one LLM available. Skipping calibration.")
 
102
  "Implementer": {"persona": config.CALIBRATION_CONFIG["roles_to_test"]["Implementer"], "llm": default_llm},
103
  "Monitor": {"persona": config.CALIBRATION_CONFIG["roles_to_test"]["Monitor"], "llm": default_llm}
104
  }
 
105
  return plan, error_log, [], []
106
 
107
  roles_to_test = {role: PERSONAS_DATA[key]["description"] for role, key in config.CALIBRATION_CONFIG["roles_to_test"].items()}
 
152
  "Monitor": {"persona": config.CALIBRATION_CONFIG["roles_to_test"]["Monitor"], "llm": best_llms["Monitor"]}
153
  }
154
  print(f"Calibration complete (live). Team plan: {team_plan}")
 
155
  return team_plan, error_log, detailed_results, all_usage_stats
156
 
157
  async def run_calibration_test(self, problem, role, llm_name, persona, test_problem):
 
166
  "role": role, "llm": llm_name, "score": score, "output": solution, "usage_gen": gen_usage, "usage_eval": eval_usage
167
  }
168
 
169
+ # --- UPDATED: Handles OpenAI and Nebius ---
170
  async def get_llm_response(client_name: str, client, system_prompt: str, user_prompt: str) -> Tuple[str, dict]:
171
  """Returns (text_response, usage_dict)"""
172
  usage = {"model": client_name, "input": 0, "output": 0}
 
189
  usage["output"] = response.usage.output_tokens
190
  return response.content[0].text, usage
191
 
192
+ # --- THIS IS THE PART THAT WAS MISSING OR INCOMPLETE ---
193
  elif client_name in ["SambaNova", "OpenAI", "Nebius"]:
194
+
195
+ # Dynamically get the correct model ID from config.py
196
  model_id = config.MODELS.get(client_name, {}).get("default", "gpt-4o-mini")
197
+
198
  completion = await client.chat.completions.create(
199
+ model=model_id,
200
+ messages=[
201
+ {"role": "system", "content": system_prompt},
202
+ {"role": "user", "content": user_prompt}
203
+ ]
204
  )
205
  if hasattr(completion, "usage"):
206
  usage["input"] = completion.usage.prompt_tokens