Commit
·
62da328
0
Parent(s):
initial update
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +5 -0
- README.md +45 -0
- deep-swarm/.env_template +25 -0
- deep-swarm/camel/__init__.py +25 -0
- deep-swarm/camel/__pycache__/__init__.cpython-311.pyc +0 -0
- deep-swarm/camel/__pycache__/generators.cpython-311.pyc +0 -0
- deep-swarm/camel/__pycache__/human.cpython-311.pyc +0 -0
- deep-swarm/camel/__pycache__/logger.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__init__.py +44 -0
- deep-swarm/camel/agents/__pycache__/__init__.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__pycache__/base.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__pycache__/chat_agent.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__pycache__/critic_agent.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__pycache__/embodied_agent.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__pycache__/knowledge_graph_agent.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__pycache__/role_assignment_agent.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__pycache__/search_agent.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/__pycache__/task_agent.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/base.py +29 -0
- deep-swarm/camel/agents/chat_agent.py +1423 -0
- deep-swarm/camel/agents/critic_agent.py +202 -0
- deep-swarm/camel/agents/deductive_reasoner_agent.py +303 -0
- deep-swarm/camel/agents/embodied_agent.py +201 -0
- deep-swarm/camel/agents/knowledge_graph_agent.py +259 -0
- deep-swarm/camel/agents/role_assignment_agent.py +141 -0
- deep-swarm/camel/agents/search_agent.py +133 -0
- deep-swarm/camel/agents/task_agent.py +410 -0
- deep-swarm/camel/agents/tool_agents/__init__.py +20 -0
- deep-swarm/camel/agents/tool_agents/__pycache__/__init__.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/tool_agents/__pycache__/base.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/tool_agents/__pycache__/hugging_face_tool_agent.cpython-311.pyc +0 -0
- deep-swarm/camel/agents/tool_agents/base.py +39 -0
- deep-swarm/camel/agents/tool_agents/hugging_face_tool_agent.py +206 -0
- deep-swarm/camel/benchmarks/__init__.py +17 -0
- deep-swarm/camel/benchmarks/__pycache__/__init__.cpython-311.pyc +0 -0
- deep-swarm/camel/benchmarks/__pycache__/base.cpython-311.pyc +0 -0
- deep-swarm/camel/benchmarks/base.py +152 -0
- deep-swarm/camel/bots/__init__.py +34 -0
- deep-swarm/camel/bots/discord_app.py +138 -0
- deep-swarm/camel/bots/slack/__init__.py +30 -0
- deep-swarm/camel/bots/slack/models.py +158 -0
- deep-swarm/camel/bots/slack/slack_app.py +255 -0
- deep-swarm/camel/bots/telegram_bot.py +82 -0
- deep-swarm/camel/configs/__init__.py +76 -0
- deep-swarm/camel/configs/__pycache__/__init__.cpython-311.pyc +0 -0
- deep-swarm/camel/configs/__pycache__/anthropic_config.cpython-311.pyc +0 -0
- deep-swarm/camel/configs/__pycache__/base_config.cpython-311.pyc +0 -0
- deep-swarm/camel/configs/__pycache__/cohere_config.cpython-311.pyc +0 -0
- deep-swarm/camel/configs/__pycache__/deepseek_config.cpython-311.pyc +0 -0
- deep-swarm/camel/configs/__pycache__/gemini_config.cpython-311.pyc +0 -0
.gitignore
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.dist
|
| 2 |
+
deep-swarm/data
|
| 3 |
+
deep-swarm/tmp
|
| 4 |
+
deep-swarm/.env
|
| 5 |
+
deep-swarm/utils/__pycache__/
|
README.md
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DeepSwarm
|
| 2 |
+
|
| 3 |
+
## Overview
|
| 4 |
+
|
| 5 |
+
DeepSwarm is a multi-agent framework based from [camel](https://github.com/camel-ai/camel/). It achieved open-source state-of-the-art performance on the [GAIA](https://huggingface.co/datasets/gaia-benchmark/GAIA) benchmark.
|
| 6 |
+
|
| 7 |
+
## Quickstart
|
| 8 |
+
|
| 9 |
+
It is recommended to run the code in linux environment.
|
| 10 |
+
To get started, follow these steps:
|
| 11 |
+
|
| 12 |
+
1. **Clone the Github repository:**
|
| 13 |
+
|
| 14 |
+
```bash
|
| 15 |
+
$ git clone xxx
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
2. **Set up Python Environment:**
|
| 19 |
+
|
| 20 |
+
```bash
|
| 21 |
+
$ conda create -n deepswarm python=3.11
|
| 22 |
+
$ conda activate deepswarm
|
| 23 |
+
```
|
| 24 |
+
|
| 25 |
+
3. **Install Dependencies:**
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
$ pip install -r requirements.txt
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
4. **Set API Keys:** We use `dotenv` to manage API keys. Please copy and check the `.env.example` file to `.env` and fill in the necessary API keys.
|
| 32 |
+
|
| 33 |
+
5. **Run the Demo Code:**
|
| 34 |
+
|
| 35 |
+
```bash
|
| 36 |
+
$ python run.py
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
## Reproduce the Results in GAIA
|
| 40 |
+
|
| 41 |
+
We have provided a script to reproduce the results in GAIA. You can check the `run_gaia_roleplaying.py` file and run the following command:
|
| 42 |
+
|
| 43 |
+
```bash
|
| 44 |
+
$ python run_gaia_roleplaying.py
|
| 45 |
+
```
|
deep-swarm/.env_template
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# OPENAI API
|
| 3 |
+
OPENAI_API_KEY = ""
|
| 4 |
+
|
| 5 |
+
# Hugging Face API (https://huggingface.co/join)
|
| 6 |
+
HF_TOKEN=""
|
| 7 |
+
|
| 8 |
+
# Qwen API (https://help.aliyun.com/document_detail/611472.html)
|
| 9 |
+
QWEN_API_KEY=""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
#===========================================
|
| 13 |
+
# Tools & Services API
|
| 14 |
+
#===========================================
|
| 15 |
+
|
| 16 |
+
# Google Search API (https://developers.google.com/custom-search/v1/overview)
|
| 17 |
+
GOOGLE_API_KEY=""
|
| 18 |
+
SEARCH_ENGINE_ID=""
|
| 19 |
+
|
| 20 |
+
# Chunkr API (https://chunkr.ai/)
|
| 21 |
+
CHUNKR_API_KEY=""
|
| 22 |
+
|
| 23 |
+
# Firecrawl API (https://www.firecrawl.dev/)
|
| 24 |
+
FIRECRAWL_API_KEY=""
|
| 25 |
+
|
deep-swarm/camel/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
|
| 15 |
+
from camel.logger import disable_logging, enable_logging, set_log_level
|
| 16 |
+
|
| 17 |
+
__version__ = '0.2.11'
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
'__version__',
|
| 21 |
+
'camel',
|
| 22 |
+
'disable_logging',
|
| 23 |
+
'enable_logging',
|
| 24 |
+
'set_log_level',
|
| 25 |
+
]
|
deep-swarm/camel/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (393 Bytes). View file
|
|
|
deep-swarm/camel/__pycache__/generators.cpython-311.pyc
ADDED
|
Binary file (18 kB). View file
|
|
|
deep-swarm/camel/__pycache__/human.cpython-311.pyc
ADDED
|
Binary file (6.13 kB). View file
|
|
|
deep-swarm/camel/__pycache__/logger.cpython-311.pyc
ADDED
|
Binary file (5.4 kB). View file
|
|
|
deep-swarm/camel/agents/__init__.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from .base import BaseAgent
|
| 15 |
+
from .chat_agent import ChatAgent
|
| 16 |
+
from .critic_agent import CriticAgent
|
| 17 |
+
from .embodied_agent import EmbodiedAgent
|
| 18 |
+
from .knowledge_graph_agent import KnowledgeGraphAgent
|
| 19 |
+
from .role_assignment_agent import RoleAssignmentAgent
|
| 20 |
+
from .search_agent import SearchAgent
|
| 21 |
+
from .task_agent import (
|
| 22 |
+
TaskCreationAgent,
|
| 23 |
+
TaskPlannerAgent,
|
| 24 |
+
TaskPrioritizationAgent,
|
| 25 |
+
TaskSpecifyAgent,
|
| 26 |
+
)
|
| 27 |
+
from .tool_agents.base import BaseToolAgent
|
| 28 |
+
from .tool_agents.hugging_face_tool_agent import HuggingFaceToolAgent
|
| 29 |
+
|
| 30 |
+
__all__ = [
|
| 31 |
+
'BaseAgent',
|
| 32 |
+
'ChatAgent',
|
| 33 |
+
'TaskSpecifyAgent',
|
| 34 |
+
'TaskPlannerAgent',
|
| 35 |
+
'TaskCreationAgent',
|
| 36 |
+
'TaskPrioritizationAgent',
|
| 37 |
+
'CriticAgent',
|
| 38 |
+
'BaseToolAgent',
|
| 39 |
+
'HuggingFaceToolAgent',
|
| 40 |
+
'EmbodiedAgent',
|
| 41 |
+
'RoleAssignmentAgent',
|
| 42 |
+
'SearchAgent',
|
| 43 |
+
'KnowledgeGraphAgent',
|
| 44 |
+
]
|
deep-swarm/camel/agents/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (1.13 kB). View file
|
|
|
deep-swarm/camel/agents/__pycache__/base.cpython-311.pyc
ADDED
|
Binary file (1.12 kB). View file
|
|
|
deep-swarm/camel/agents/__pycache__/chat_agent.cpython-311.pyc
ADDED
|
Binary file (52.1 kB). View file
|
|
|
deep-swarm/camel/agents/__pycache__/critic_agent.cpython-311.pyc
ADDED
|
Binary file (8.66 kB). View file
|
|
|
deep-swarm/camel/agents/__pycache__/embodied_agent.cpython-311.pyc
ADDED
|
Binary file (8.93 kB). View file
|
|
|
deep-swarm/camel/agents/__pycache__/knowledge_graph_agent.cpython-311.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
deep-swarm/camel/agents/__pycache__/role_assignment_agent.cpython-311.pyc
ADDED
|
Binary file (6.47 kB). View file
|
|
|
deep-swarm/camel/agents/__pycache__/search_agent.cpython-311.pyc
ADDED
|
Binary file (5.37 kB). View file
|
|
|
deep-swarm/camel/agents/__pycache__/task_agent.cpython-311.pyc
ADDED
|
Binary file (16.9 kB). View file
|
|
|
deep-swarm/camel/agents/base.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from abc import ABC, abstractmethod
|
| 15 |
+
from typing import Any
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class BaseAgent(ABC):
|
| 19 |
+
r"""An abstract base class for all CAMEL agents."""
|
| 20 |
+
|
| 21 |
+
@abstractmethod
|
| 22 |
+
def reset(self, *args: Any, **kwargs: Any) -> Any:
|
| 23 |
+
r"""Resets the agent to its initial state."""
|
| 24 |
+
pass
|
| 25 |
+
|
| 26 |
+
@abstractmethod
|
| 27 |
+
def step(self, *args: Any, **kwargs: Any) -> Any:
|
| 28 |
+
r"""Performs a single step of the agent."""
|
| 29 |
+
pass
|
deep-swarm/camel/agents/chat_agent.py
ADDED
|
@@ -0,0 +1,1423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import json
|
| 17 |
+
# import logging
|
| 18 |
+
import re
|
| 19 |
+
import uuid
|
| 20 |
+
from collections import defaultdict
|
| 21 |
+
from typing import (
|
| 22 |
+
TYPE_CHECKING,
|
| 23 |
+
Any,
|
| 24 |
+
Callable,
|
| 25 |
+
Dict,
|
| 26 |
+
List,
|
| 27 |
+
Optional,
|
| 28 |
+
Tuple,
|
| 29 |
+
Type,
|
| 30 |
+
Union,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from loguru import logger
|
| 34 |
+
|
| 35 |
+
from openai.types.chat import ChatCompletionMessageToolCall
|
| 36 |
+
from openai.types.chat.chat_completion_message_tool_call import Function
|
| 37 |
+
from pydantic import BaseModel
|
| 38 |
+
|
| 39 |
+
from camel.agents.base import BaseAgent
|
| 40 |
+
from camel.memories import (
|
| 41 |
+
AgentMemory,
|
| 42 |
+
ChatHistoryMemory,
|
| 43 |
+
MemoryRecord,
|
| 44 |
+
ScoreBasedContextCreator,
|
| 45 |
+
)
|
| 46 |
+
from camel.messages import BaseMessage, FunctionCallingMessage, OpenAIMessage
|
| 47 |
+
from camel.models import (
|
| 48 |
+
BaseModelBackend,
|
| 49 |
+
ModelFactory,
|
| 50 |
+
ModelManager,
|
| 51 |
+
ModelProcessingError,
|
| 52 |
+
)
|
| 53 |
+
from camel.responses import ChatAgentResponse
|
| 54 |
+
from camel.types import (
|
| 55 |
+
ChatCompletion,
|
| 56 |
+
ChatCompletionChunk,
|
| 57 |
+
ModelPlatformType,
|
| 58 |
+
ModelType,
|
| 59 |
+
OpenAIBackendRole,
|
| 60 |
+
RoleType,
|
| 61 |
+
)
|
| 62 |
+
from camel.utils import (
|
| 63 |
+
func_string_to_callable,
|
| 64 |
+
get_model_encoding,
|
| 65 |
+
get_pydantic_object_schema,
|
| 66 |
+
json_to_function_code,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
if TYPE_CHECKING:
|
| 70 |
+
from openai import Stream
|
| 71 |
+
|
| 72 |
+
from camel.terminators import ResponseTerminator
|
| 73 |
+
from camel.toolkits import FunctionTool
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# logger = logging.getLogger(__name__)
|
| 77 |
+
|
| 78 |
+
# AgentOps decorator setting
|
| 79 |
+
try:
|
| 80 |
+
import os
|
| 81 |
+
|
| 82 |
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
| 83 |
+
from agentops import track_agent
|
| 84 |
+
else:
|
| 85 |
+
raise ImportError
|
| 86 |
+
except (ImportError, AttributeError):
|
| 87 |
+
from camel.utils import track_agent
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class FunctionCallingRecord(BaseModel):
|
| 91 |
+
r"""Historical records of functions called in the conversation.
|
| 92 |
+
|
| 93 |
+
Attributes:
|
| 94 |
+
func_name (str): The name of the function being called.
|
| 95 |
+
args (Dict[str, Any]): The dictionary of arguments passed to
|
| 96 |
+
the function.
|
| 97 |
+
result (Any): The execution result of calling this function.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
func_name: str
|
| 101 |
+
args: Dict[str, Any]
|
| 102 |
+
result: Any
|
| 103 |
+
|
| 104 |
+
def __str__(self) -> str:
|
| 105 |
+
r"""Overridden version of the string function.
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
str: Modified string to represent the function calling.
|
| 109 |
+
"""
|
| 110 |
+
return (
|
| 111 |
+
f"Function Execution: {self.func_name}\n"
|
| 112 |
+
f"\tArgs: {self.args}\n"
|
| 113 |
+
f"\tResult: {self.result}"
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
def as_dict(self) -> dict[str, Any]:
|
| 117 |
+
r"""Returns the function calling record as a dictionary.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
dict[str, Any]: The function calling record as a dictionary.
|
| 121 |
+
"""
|
| 122 |
+
return self.model_dump()
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@track_agent(name="ChatAgent")
|
| 126 |
+
class ChatAgent(BaseAgent):
|
| 127 |
+
r"""Class for managing conversations of CAMEL Chat Agents.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
system_message (Union[BaseMessage, str], optional): The system message
|
| 131 |
+
for the chat agent.
|
| 132 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 133 |
+
generating responses. (default: :obj:`ModelPlatformType.DEFAULT`
|
| 134 |
+
with `ModelType.DEFAULT`)
|
| 135 |
+
memory (AgentMemory, optional): The agent memory for managing chat
|
| 136 |
+
messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
|
| 137 |
+
(default: :obj:`None`)
|
| 138 |
+
message_window_size (int, optional): The maximum number of previous
|
| 139 |
+
messages to include in the context window. If `None`, no windowing
|
| 140 |
+
is performed. (default: :obj:`None`)
|
| 141 |
+
token_limit (int, optional): The maximum number of tokens in a context.
|
| 142 |
+
The context will be automatically pruned to fulfill the limitation.
|
| 143 |
+
If `None`, it will be set according to the backend model.
|
| 144 |
+
(default: :obj:`None`)
|
| 145 |
+
output_language (str, optional): The language to be output by the
|
| 146 |
+
agent. (default: :obj:`None`)
|
| 147 |
+
tools (List[FunctionTool], optional): List of available
|
| 148 |
+
:obj:`FunctionTool`. (default: :obj:`None`)
|
| 149 |
+
external_tools (List[FunctionTool], optional): List of external tools
|
| 150 |
+
(:obj:`FunctionTool`) bind to one chat agent. When these tools
|
| 151 |
+
are called, the agent will directly return the request instead of
|
| 152 |
+
processing it. (default: :obj:`None`)
|
| 153 |
+
response_terminators (List[ResponseTerminator], optional): List of
|
| 154 |
+
:obj:`ResponseTerminator` bind to one chat agent.
|
| 155 |
+
(default: :obj:`None`)
|
| 156 |
+
scheduling_strategy (str): name of function that defines how to select
|
| 157 |
+
the next model in ModelManager. (default: :str:`round_robin`)
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
def __init__(
|
| 161 |
+
self,
|
| 162 |
+
system_message: Optional[Union[BaseMessage, str]] = None,
|
| 163 |
+
model: Optional[
|
| 164 |
+
Union[BaseModelBackend, List[BaseModelBackend]]
|
| 165 |
+
] = None,
|
| 166 |
+
memory: Optional[AgentMemory] = None,
|
| 167 |
+
message_window_size: Optional[int] = None,
|
| 168 |
+
token_limit: Optional[int] = None,
|
| 169 |
+
output_language: Optional[str] = None,
|
| 170 |
+
tools: Optional[List[FunctionTool]] = None,
|
| 171 |
+
external_tools: Optional[List[FunctionTool]] = None,
|
| 172 |
+
response_terminators: Optional[List[ResponseTerminator]] = None,
|
| 173 |
+
scheduling_strategy: str = "round_robin",
|
| 174 |
+
) -> None:
|
| 175 |
+
from copy import deepcopy
|
| 176 |
+
if isinstance(system_message, str):
|
| 177 |
+
system_message = BaseMessage.make_assistant_message(
|
| 178 |
+
role_name='Assistant', content=system_message
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
self.orig_sys_message: Optional[BaseMessage] = system_message
|
| 182 |
+
self._system_message: Optional[BaseMessage] = system_message
|
| 183 |
+
self.role_name: str = (
|
| 184 |
+
getattr(system_message, 'role_name', None) or "assistant"
|
| 185 |
+
)
|
| 186 |
+
self.role_type: RoleType = (
|
| 187 |
+
getattr(system_message, 'role_type', None) or RoleType.ASSISTANT
|
| 188 |
+
)
|
| 189 |
+
self.model_backend = ModelManager(
|
| 190 |
+
model
|
| 191 |
+
if model is not None
|
| 192 |
+
else ModelFactory.create(
|
| 193 |
+
model_platform=ModelPlatformType.DEFAULT,
|
| 194 |
+
model_type=ModelType.DEFAULT,
|
| 195 |
+
),
|
| 196 |
+
scheduling_strategy=scheduling_strategy,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
self.model_type = self.model_backend.model_type
|
| 200 |
+
|
| 201 |
+
# Tool registration
|
| 202 |
+
external_tools = external_tools or []
|
| 203 |
+
tools = tools or []
|
| 204 |
+
all_tools = tools + external_tools
|
| 205 |
+
self.external_tool_names = [
|
| 206 |
+
tool.get_function_name() for tool in external_tools
|
| 207 |
+
]
|
| 208 |
+
self.func_dict = {
|
| 209 |
+
tool.get_function_name(): tool.func for tool in all_tools
|
| 210 |
+
}
|
| 211 |
+
self.tool_dict = {tool.get_function_name(): tool for tool in all_tools}
|
| 212 |
+
self._all_tools = all_tools
|
| 213 |
+
|
| 214 |
+
# If the user set tools from `ChatAgent`, it will override the
|
| 215 |
+
# configured tools in `BaseModelBackend`.
|
| 216 |
+
if all_tools:
|
| 217 |
+
# logger.warning(
|
| 218 |
+
# "Overriding the configured tools in `BaseModelBackend` with the tools from `ChatAgent`."
|
| 219 |
+
# )
|
| 220 |
+
tool_schema_list = [
|
| 221 |
+
tool.get_openai_tool_schema() for tool in all_tools
|
| 222 |
+
]
|
| 223 |
+
self.model_backend.model_config_dict['tools'] = tool_schema_list
|
| 224 |
+
self.tool_schema_list = tool_schema_list
|
| 225 |
+
|
| 226 |
+
from copy import deepcopy
|
| 227 |
+
self.model_config_dict = deepcopy(self.model_backend.model_config_dict)
|
| 228 |
+
|
| 229 |
+
self.model_token_limit = token_limit or self.model_backend.token_limit
|
| 230 |
+
context_creator = ScoreBasedContextCreator(
|
| 231 |
+
self.model_backend.token_counter,
|
| 232 |
+
self.model_token_limit,
|
| 233 |
+
)
|
| 234 |
+
self.memory: AgentMemory = memory or ChatHistoryMemory(
|
| 235 |
+
context_creator, window_size=message_window_size
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
self.output_language: Optional[str] = output_language
|
| 239 |
+
if self.output_language is not None:
|
| 240 |
+
self.set_output_language(self.output_language)
|
| 241 |
+
|
| 242 |
+
self.terminated: bool = False
|
| 243 |
+
self.response_terminators = response_terminators or []
|
| 244 |
+
self.init_messages()
|
| 245 |
+
|
| 246 |
+
self.tool_prompt_added = False
|
| 247 |
+
|
| 248 |
+
# ruff: noqa: E501
|
| 249 |
+
def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
|
| 250 |
+
r"""Generates a tool prompt based on the provided tool schema list.
|
| 251 |
+
|
| 252 |
+
Args:
|
| 253 |
+
tool_schema_list (List[Dict]): A list of dictionaries, each
|
| 254 |
+
containing a tool schema.
|
| 255 |
+
|
| 256 |
+
Returns:
|
| 257 |
+
str: A string representing the tool prompt.
|
| 258 |
+
"""
|
| 259 |
+
tool_prompts = []
|
| 260 |
+
|
| 261 |
+
for tool in tool_schema_list:
|
| 262 |
+
tool_info = tool['function']
|
| 263 |
+
tool_name = tool_info['name']
|
| 264 |
+
tool_description = tool_info['description']
|
| 265 |
+
tool_json = json.dumps(tool_info, indent=4)
|
| 266 |
+
|
| 267 |
+
prompt = f"Use the function '{tool_name}' to '{tool_description}':\n{tool_json}\n"
|
| 268 |
+
tool_prompts.append(prompt)
|
| 269 |
+
|
| 270 |
+
tool_prompt_str = "\n".join(tool_prompts)
|
| 271 |
+
|
| 272 |
+
final_prompt = f'''
|
| 273 |
+
# Tool prompt
|
| 274 |
+
TOOL_PROMPT = f"""
|
| 275 |
+
You have access to the following functions:
|
| 276 |
+
|
| 277 |
+
{tool_prompt_str}
|
| 278 |
+
|
| 279 |
+
If you choose to call a function ONLY reply in the following format with no
|
| 280 |
+
prefix or suffix:
|
| 281 |
+
|
| 282 |
+
<function=example_function_name>{{"example_name": "example_value"}}
|
| 283 |
+
</function>
|
| 284 |
+
|
| 285 |
+
Reminder:
|
| 286 |
+
- Function calls MUST follow the specified format, start with <function=
|
| 287 |
+
and end with </function>
|
| 288 |
+
- Required parameters MUST be specified
|
| 289 |
+
- Only call one function at a time
|
| 290 |
+
- Put the entire function call reply on one line
|
| 291 |
+
- If there is no function call available, answer the question like normal
|
| 292 |
+
with your current knowledge and do not tell the user about function calls
|
| 293 |
+
"""
|
| 294 |
+
'''
|
| 295 |
+
return final_prompt
|
| 296 |
+
|
| 297 |
+
def _parse_tool_response(self, response: str):
|
| 298 |
+
r"""Parses the tool response to extract the function name and
|
| 299 |
+
arguments.
|
| 300 |
+
|
| 301 |
+
Args:
|
| 302 |
+
response (str): The response from the model containing the
|
| 303 |
+
function call.
|
| 304 |
+
|
| 305 |
+
Returns:
|
| 306 |
+
Optional[Dict[str, Any]]: The parsed function name and arguments
|
| 307 |
+
if found, otherwise :obj:`None`.
|
| 308 |
+
"""
|
| 309 |
+
function_regex = r"<function=(\w+)>(.*?)</function>"
|
| 310 |
+
match = re.search(function_regex, response)
|
| 311 |
+
|
| 312 |
+
if match:
|
| 313 |
+
function_name, args_string = match.groups()
|
| 314 |
+
try:
|
| 315 |
+
args = json.loads(args_string)
|
| 316 |
+
return {"function": function_name, "arguments": args}
|
| 317 |
+
except json.JSONDecodeError as error:
|
| 318 |
+
print(f"Error parsing function arguments: {error}")
|
| 319 |
+
return None
|
| 320 |
+
return None
|
| 321 |
+
|
| 322 |
+
def reset(self):
|
| 323 |
+
r"""Resets the :obj:`ChatAgent` to its initial state."""
|
| 324 |
+
self.terminated = False
|
| 325 |
+
self.init_messages()
|
| 326 |
+
for terminator in self.response_terminators:
|
| 327 |
+
terminator.reset()
|
| 328 |
+
|
| 329 |
+
@property
|
| 330 |
+
def system_message(self) -> Optional[BaseMessage]:
|
| 331 |
+
r"""The getter method for the property :obj:`system_message`.
|
| 332 |
+
|
| 333 |
+
Returns:
|
| 334 |
+
Optional[BaseMessage]: The system message of this agent if set,
|
| 335 |
+
else :obj:`None`.
|
| 336 |
+
"""
|
| 337 |
+
return self._system_message
|
| 338 |
+
|
| 339 |
+
@system_message.setter
|
| 340 |
+
def system_message(self, message: BaseMessage) -> None:
|
| 341 |
+
r"""The setter method for the property :obj:`system_message`.
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
message (BaseMessage): The message to be set as the
|
| 345 |
+
new system message of this agent.
|
| 346 |
+
"""
|
| 347 |
+
self._system_message = message
|
| 348 |
+
|
| 349 |
+
def is_tools_added(self) -> bool:
|
| 350 |
+
r"""Whether OpenAI function calling is enabled for this agent.
|
| 351 |
+
|
| 352 |
+
Returns:
|
| 353 |
+
bool: Whether OpenAI function calling is enabled for this
|
| 354 |
+
agent, determined by whether the dictionary of tools
|
| 355 |
+
is empty.
|
| 356 |
+
"""
|
| 357 |
+
return len(self.func_dict) > 0
|
| 358 |
+
|
| 359 |
+
def update_memory(
|
| 360 |
+
self, message: BaseMessage, role: OpenAIBackendRole
|
| 361 |
+
) -> None:
|
| 362 |
+
r"""Updates the agent memory with a new message.
|
| 363 |
+
|
| 364 |
+
Args:
|
| 365 |
+
message (BaseMessage): The new message to add to the stored
|
| 366 |
+
messages.
|
| 367 |
+
role (OpenAIBackendRole): The backend role type.
|
| 368 |
+
"""
|
| 369 |
+
self.memory.write_record(
|
| 370 |
+
MemoryRecord(message=message, role_at_backend=role)
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
def set_output_language(self, output_language: str) -> BaseMessage:
|
| 374 |
+
r"""Sets the output language for the system message. This method
|
| 375 |
+
updates the output language for the system message. The output
|
| 376 |
+
language determines the language in which the output text should be
|
| 377 |
+
generated.
|
| 378 |
+
|
| 379 |
+
Args:
|
| 380 |
+
output_language (str): The desired output language.
|
| 381 |
+
|
| 382 |
+
Returns:
|
| 383 |
+
BaseMessage: The updated system message object.
|
| 384 |
+
"""
|
| 385 |
+
self.output_language = output_language
|
| 386 |
+
language_prompt = (
|
| 387 |
+
"\nRegardless of the input language, "
|
| 388 |
+
f"you must output text in {output_language}."
|
| 389 |
+
)
|
| 390 |
+
if self.orig_sys_message is not None:
|
| 391 |
+
content = self.orig_sys_message.content + language_prompt
|
| 392 |
+
self._system_message = self.orig_sys_message.create_new_instance(
|
| 393 |
+
content
|
| 394 |
+
)
|
| 395 |
+
else:
|
| 396 |
+
self._system_message = BaseMessage.make_assistant_message(
|
| 397 |
+
role_name="Assistant",
|
| 398 |
+
content=language_prompt,
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
system_record = MemoryRecord(
|
| 402 |
+
message=self._system_message,
|
| 403 |
+
role_at_backend=OpenAIBackendRole.SYSTEM,
|
| 404 |
+
)
|
| 405 |
+
self.memory.clear()
|
| 406 |
+
self.memory.write_record(system_record)
|
| 407 |
+
return self._system_message
|
| 408 |
+
|
| 409 |
+
def get_info(
|
| 410 |
+
self,
|
| 411 |
+
session_id: Optional[str],
|
| 412 |
+
usage: Optional[Dict[str, int]],
|
| 413 |
+
termination_reasons: List[str],
|
| 414 |
+
num_tokens: int,
|
| 415 |
+
tool_calls: List[FunctionCallingRecord],
|
| 416 |
+
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
|
| 417 |
+
) -> Dict[str, Any]:
|
| 418 |
+
r"""Returns a dictionary containing information about the chat session.
|
| 419 |
+
|
| 420 |
+
Args:
|
| 421 |
+
session_id (str, optional): The ID of the chat session.
|
| 422 |
+
usage (Dict[str, int], optional): Information about the usage of
|
| 423 |
+
the LLM model.
|
| 424 |
+
termination_reasons (List[str]): The reasons for the termination
|
| 425 |
+
of the chat session.
|
| 426 |
+
num_tokens (int): The number of tokens used in the chat session.
|
| 427 |
+
tool_calls (List[FunctionCallingRecord]): The list of function
|
| 428 |
+
calling records, containing the information of called tools.
|
| 429 |
+
external_tool_request
|
| 430 |
+
(Optional[ChatCompletionMessageToolCall], optional):
|
| 431 |
+
The tool calling request of external tools from the model.
|
| 432 |
+
These requests are directly returned to the user instead of
|
| 433 |
+
being processed by the agent automatically.
|
| 434 |
+
(default: :obj:`None`)
|
| 435 |
+
|
| 436 |
+
Returns:
|
| 437 |
+
Dict[str, Any]: The chat session information.
|
| 438 |
+
"""
|
| 439 |
+
return {
|
| 440 |
+
"id": session_id,
|
| 441 |
+
"usage": usage,
|
| 442 |
+
"termination_reasons": termination_reasons,
|
| 443 |
+
"num_tokens": num_tokens,
|
| 444 |
+
"tool_calls": tool_calls,
|
| 445 |
+
"external_tool_request": external_tool_request,
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
def init_messages(self) -> None:
|
| 449 |
+
r"""Initializes the stored messages list with the current system
|
| 450 |
+
message.
|
| 451 |
+
"""
|
| 452 |
+
if self._system_message is not None:
|
| 453 |
+
system_record = MemoryRecord(
|
| 454 |
+
message=self._system_message,
|
| 455 |
+
role_at_backend=OpenAIBackendRole.SYSTEM,
|
| 456 |
+
)
|
| 457 |
+
self.memory.clear()
|
| 458 |
+
self.memory.write_record(system_record)
|
| 459 |
+
else:
|
| 460 |
+
self.memory.clear()
|
| 461 |
+
|
| 462 |
+
def _transform_function_calling_format(self, openai_messages: List[dict]):
|
| 463 |
+
r"""Used in deepseek-chat backend. It can modify function calling records' format to match the deepseek-chat backend's format."""
|
| 464 |
+
from copy import deepcopy
|
| 465 |
+
_messages = deepcopy(openai_messages)
|
| 466 |
+
modified_messages = []
|
| 467 |
+
for message in _messages:
|
| 468 |
+
if message['role'] == 'function':
|
| 469 |
+
new_message = {
|
| 470 |
+
'role': 'tool',
|
| 471 |
+
'tool_call_id': message['name'],
|
| 472 |
+
'content': message['content']
|
| 473 |
+
}
|
| 474 |
+
modified_messages.append(new_message)
|
| 475 |
+
else:
|
| 476 |
+
modified_messages.append(message)
|
| 477 |
+
|
| 478 |
+
return modified_messages
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def record_message(self, message: BaseMessage) -> None:
|
| 482 |
+
r"""Records the externally provided message into the agent memory as if
|
| 483 |
+
it were an answer of the :obj:`ChatAgent` from the backend. Currently,
|
| 484 |
+
the choice of the critic is submitted with this method.
|
| 485 |
+
|
| 486 |
+
Args:
|
| 487 |
+
message (BaseMessage): An external message to be recorded in the
|
| 488 |
+
memory.
|
| 489 |
+
"""
|
| 490 |
+
self.update_memory(message, OpenAIBackendRole.ASSISTANT)
|
| 491 |
+
|
| 492 |
+
def step(
|
| 493 |
+
self,
|
| 494 |
+
input_message: Union[BaseMessage, str],
|
| 495 |
+
response_format: Optional[Type[BaseModel]] = None,
|
| 496 |
+
) -> ChatAgentResponse:
|
| 497 |
+
r"""Performs a single step in the chat session by generating a response
|
| 498 |
+
to the input message.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
input_message (Union[BaseMessage, str]): The input message to the
|
| 502 |
+
agent. For BaseMessage input, its `role` field that specifies
|
| 503 |
+
the role at backend may be either `user` or `assistant` but it
|
| 504 |
+
will be set to `user` anyway since for the self agent any
|
| 505 |
+
incoming message is external. For str input, the `role_name` would be `User`.
|
| 506 |
+
response_format (Optional[Type[BaseModel]], optional): A pydantic
|
| 507 |
+
model class that includes value types and field descriptions
|
| 508 |
+
used to generate a structured response by LLM. This schema
|
| 509 |
+
helps in defining the expected output format. (default:
|
| 510 |
+
:obj:`None`)
|
| 511 |
+
|
| 512 |
+
Returns:
|
| 513 |
+
ChatAgentResponse: A struct containing the output messages,
|
| 514 |
+
a boolean indicating whether the chat session has terminated,
|
| 515 |
+
and information about the chat session.
|
| 516 |
+
"""
|
| 517 |
+
from copy import deepcopy
|
| 518 |
+
self.model_backend.model_config_dict = deepcopy(self.model_config_dict)
|
| 519 |
+
self.tool_dict = {tool.get_function_name(): tool for tool in self._all_tools}
|
| 520 |
+
if (
|
| 521 |
+
self.model_backend.model_config_dict.get("response_format")
|
| 522 |
+
and response_format
|
| 523 |
+
):
|
| 524 |
+
raise ValueError(
|
| 525 |
+
"The `response_format` parameter cannot be set both in "
|
| 526 |
+
"the model configuration and in the ChatAgent step."
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
if isinstance(input_message, str):
|
| 530 |
+
input_message = BaseMessage.make_user_message(
|
| 531 |
+
role_name='User', content=input_message
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
if "llama" in self.model_type.lower():
|
| 535 |
+
if (
|
| 536 |
+
self.model_backend.model_config_dict.get("tools", None)
|
| 537 |
+
and not self.tool_prompt_added
|
| 538 |
+
):
|
| 539 |
+
tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
|
| 540 |
+
|
| 541 |
+
tool_sys_msg = BaseMessage.make_assistant_message(
|
| 542 |
+
role_name="Assistant",
|
| 543 |
+
content=tool_prompt,
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
self.update_memory(tool_sys_msg, OpenAIBackendRole.SYSTEM)
|
| 547 |
+
self.tool_prompt_added = True
|
| 548 |
+
|
| 549 |
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
| 550 |
+
|
| 551 |
+
tool_call_records: List[FunctionCallingRecord] = []
|
| 552 |
+
while True:
|
| 553 |
+
# Check if token has exceeded
|
| 554 |
+
try:
|
| 555 |
+
openai_messages, num_tokens = self.memory.get_context()
|
| 556 |
+
except RuntimeError as e:
|
| 557 |
+
return self._step_token_exceed(
|
| 558 |
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
| 559 |
+
)
|
| 560 |
+
(
|
| 561 |
+
response,
|
| 562 |
+
output_messages,
|
| 563 |
+
finish_reasons,
|
| 564 |
+
usage_dict,
|
| 565 |
+
response_id,
|
| 566 |
+
) = self._step_model_response(openai_messages, num_tokens)
|
| 567 |
+
# If the model response is not a function call, meaning the
|
| 568 |
+
# model has generated a message response, break the loop
|
| 569 |
+
if (
|
| 570 |
+
not self.is_tools_added()
|
| 571 |
+
or not isinstance(response, ChatCompletion)
|
| 572 |
+
or "</function>" not in response.choices[0].message.content # type: ignore[operator]
|
| 573 |
+
):
|
| 574 |
+
break
|
| 575 |
+
|
| 576 |
+
parsed_content = self._parse_tool_response(
|
| 577 |
+
response.choices[0].message.content # type: ignore[arg-type]
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
response.choices[0].message.tool_calls = [
|
| 581 |
+
ChatCompletionMessageToolCall(
|
| 582 |
+
id=str(uuid.uuid4()),
|
| 583 |
+
function=Function(
|
| 584 |
+
arguments=str(parsed_content["arguments"]).replace(
|
| 585 |
+
"'", '"'
|
| 586 |
+
),
|
| 587 |
+
name=str(parsed_content["function"]),
|
| 588 |
+
),
|
| 589 |
+
type="function",
|
| 590 |
+
)
|
| 591 |
+
]
|
| 592 |
+
|
| 593 |
+
# Check for external tool call
|
| 594 |
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
| 595 |
+
if tool_call_request.function.name in self.external_tool_names:
|
| 596 |
+
# if model calls an external tool, directly return the
|
| 597 |
+
# request
|
| 598 |
+
info = self._step_get_info(
|
| 599 |
+
output_messages,
|
| 600 |
+
finish_reasons,
|
| 601 |
+
usage_dict,
|
| 602 |
+
response_id,
|
| 603 |
+
tool_call_records,
|
| 604 |
+
num_tokens,
|
| 605 |
+
tool_call_request,
|
| 606 |
+
)
|
| 607 |
+
return ChatAgentResponse(
|
| 608 |
+
msgs=output_messages,
|
| 609 |
+
terminated=self.terminated,
|
| 610 |
+
info=info,
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
# Normal function calling
|
| 614 |
+
tool_call_records.append(
|
| 615 |
+
self._step_tool_call_and_update(response)
|
| 616 |
+
)
|
| 617 |
+
|
| 618 |
+
if response_format is not None:
|
| 619 |
+
(
|
| 620 |
+
output_messages,
|
| 621 |
+
finish_reasons,
|
| 622 |
+
usage_dict,
|
| 623 |
+
response_id,
|
| 624 |
+
tool_call,
|
| 625 |
+
num_tokens,
|
| 626 |
+
) = self._structure_output_with_function(response_format)
|
| 627 |
+
tool_call_records.append(tool_call)
|
| 628 |
+
|
| 629 |
+
info = self._step_get_info(
|
| 630 |
+
output_messages,
|
| 631 |
+
finish_reasons,
|
| 632 |
+
usage_dict,
|
| 633 |
+
response_id,
|
| 634 |
+
tool_call_records,
|
| 635 |
+
num_tokens,
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
if len(output_messages) == 1:
|
| 639 |
+
# Auto record if the output result is a single message
|
| 640 |
+
self.record_message(output_messages[0])
|
| 641 |
+
else:
|
| 642 |
+
logger.warning(
|
| 643 |
+
"Multiple messages returned in `step()`, message won't be "
|
| 644 |
+
"recorded automatically. Please call `record_message()` "
|
| 645 |
+
"to record the selected message manually."
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
return ChatAgentResponse(
|
| 649 |
+
msgs=output_messages, terminated=self.terminated, info=info
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
else:
|
| 653 |
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
| 654 |
+
# try:
|
| 655 |
+
|
| 656 |
+
tool_call_records: List[FunctionCallingRecord] = [] # type: ignore[no-redef]
|
| 657 |
+
while True:
|
| 658 |
+
# Check if token has exceeded
|
| 659 |
+
try:
|
| 660 |
+
openai_messages, num_tokens = self.memory.get_context()
|
| 661 |
+
except RuntimeError as e:
|
| 662 |
+
return self._step_token_exceed(
|
| 663 |
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
(
|
| 667 |
+
response,
|
| 668 |
+
output_messages,
|
| 669 |
+
finish_reasons,
|
| 670 |
+
usage_dict,
|
| 671 |
+
response_id,
|
| 672 |
+
) = self._step_model_response(openai_messages, num_tokens)
|
| 673 |
+
# If the model response is not a function call, meaning the
|
| 674 |
+
# model has generated a message response, break the loop
|
| 675 |
+
if (
|
| 676 |
+
not self.is_tools_added()
|
| 677 |
+
or not isinstance(response, ChatCompletion)
|
| 678 |
+
or not response.choices[0].message.tool_calls
|
| 679 |
+
):
|
| 680 |
+
break
|
| 681 |
+
|
| 682 |
+
# Check for external tool call
|
| 683 |
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
| 684 |
+
|
| 685 |
+
if tool_call_request.function.name in self.external_tool_names:
|
| 686 |
+
# if model calls an external tool, directly return the
|
| 687 |
+
# request
|
| 688 |
+
info = self._step_get_info(
|
| 689 |
+
output_messages,
|
| 690 |
+
finish_reasons,
|
| 691 |
+
usage_dict,
|
| 692 |
+
response_id,
|
| 693 |
+
tool_call_records,
|
| 694 |
+
num_tokens,
|
| 695 |
+
tool_call_request,
|
| 696 |
+
)
|
| 697 |
+
return ChatAgentResponse(
|
| 698 |
+
msgs=output_messages,
|
| 699 |
+
terminated=self.terminated,
|
| 700 |
+
info=info,
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
# Normal function calling
|
| 704 |
+
tool_call_records.append(
|
| 705 |
+
self._step_tool_call_and_update(response)
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
if (
|
| 709 |
+
response_format is not None
|
| 710 |
+
and self.model_type.support_native_tool_calling
|
| 711 |
+
):
|
| 712 |
+
(
|
| 713 |
+
output_messages,
|
| 714 |
+
finish_reasons,
|
| 715 |
+
usage_dict,
|
| 716 |
+
response_id,
|
| 717 |
+
tool_call,
|
| 718 |
+
num_tokens,
|
| 719 |
+
) = self._structure_output_with_function(response_format)
|
| 720 |
+
tool_call_records.append(tool_call)
|
| 721 |
+
|
| 722 |
+
info = self._step_get_info(
|
| 723 |
+
output_messages,
|
| 724 |
+
finish_reasons,
|
| 725 |
+
usage_dict,
|
| 726 |
+
response_id,
|
| 727 |
+
tool_call_records,
|
| 728 |
+
num_tokens,
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
if len(output_messages) == 1:
|
| 732 |
+
# Auto record if the output result is a single message
|
| 733 |
+
self.record_message(output_messages[0])
|
| 734 |
+
else:
|
| 735 |
+
logger.warning(
|
| 736 |
+
"Multiple messages returned in `step()`, message won't be "
|
| 737 |
+
"recorded automatically. Please call `record_message()` "
|
| 738 |
+
"to record the selected message manually."
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
return ChatAgentResponse(
|
| 742 |
+
msgs=output_messages, terminated=self.terminated, info=info
|
| 743 |
+
)
|
| 744 |
+
|
| 745 |
+
# except Exception as e:
|
| 746 |
+
# logger.error(e)
|
| 747 |
+
# breakpoint()
|
| 748 |
+
# raise e
|
| 749 |
+
|
| 750 |
+
async def step_async(
|
| 751 |
+
self,
|
| 752 |
+
input_message: Union[BaseMessage, str],
|
| 753 |
+
response_format: Optional[Type[BaseModel]] = None,
|
| 754 |
+
) -> ChatAgentResponse:
|
| 755 |
+
r"""Performs a single step in the chat session by generating a response
|
| 756 |
+
to the input message. This agent step can call async function calls.
|
| 757 |
+
|
| 758 |
+
Args:
|
| 759 |
+
input_message (Union[BaseMessage, str]): The input message to the
|
| 760 |
+
agent. For BaseMessage input, its `role` field that specifies
|
| 761 |
+
the role at backend may be either `user` or `assistant` but it
|
| 762 |
+
will be set to `user` anyway since for the self agent any
|
| 763 |
+
incoming message is external. For str input, the `role_name` would be `User`.
|
| 764 |
+
response_format (Optional[Type[BaseModel]], optional): A pydantic
|
| 765 |
+
model class that includes value types and field descriptions
|
| 766 |
+
used to generate a structured response by LLM. This schema
|
| 767 |
+
helps in defining the expected output format. (default:
|
| 768 |
+
:obj:`None`)
|
| 769 |
+
|
| 770 |
+
Returns:
|
| 771 |
+
ChatAgentResponse: A struct containing the output messages,
|
| 772 |
+
a boolean indicating whether the chat session has terminated,
|
| 773 |
+
and information about the chat session.
|
| 774 |
+
"""
|
| 775 |
+
if isinstance(input_message, str):
|
| 776 |
+
input_message = BaseMessage.make_user_message(
|
| 777 |
+
role_name='User', content=input_message
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
| 781 |
+
|
| 782 |
+
tool_call_records: List[FunctionCallingRecord] = []
|
| 783 |
+
while True:
|
| 784 |
+
try:
|
| 785 |
+
openai_messages, num_tokens = self.memory.get_context()
|
| 786 |
+
except RuntimeError as e:
|
| 787 |
+
return self._step_token_exceed(
|
| 788 |
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
| 789 |
+
)
|
| 790 |
+
|
| 791 |
+
(
|
| 792 |
+
response,
|
| 793 |
+
output_messages,
|
| 794 |
+
finish_reasons,
|
| 795 |
+
usage_dict,
|
| 796 |
+
response_id,
|
| 797 |
+
) = self._step_model_response(openai_messages, num_tokens)
|
| 798 |
+
|
| 799 |
+
if (
|
| 800 |
+
not self.is_tools_added()
|
| 801 |
+
or not isinstance(response, ChatCompletion)
|
| 802 |
+
or response.choices[0].message.tool_calls is None
|
| 803 |
+
):
|
| 804 |
+
break
|
| 805 |
+
|
| 806 |
+
# Check for external tool call
|
| 807 |
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
| 808 |
+
if tool_call_request.function.name in self.external_tool_names:
|
| 809 |
+
# if model calls an external tool, directly return the request
|
| 810 |
+
info = self._step_get_info(
|
| 811 |
+
output_messages,
|
| 812 |
+
finish_reasons,
|
| 813 |
+
usage_dict,
|
| 814 |
+
response_id,
|
| 815 |
+
tool_call_records,
|
| 816 |
+
num_tokens,
|
| 817 |
+
tool_call_request,
|
| 818 |
+
)
|
| 819 |
+
return ChatAgentResponse(
|
| 820 |
+
msgs=output_messages, terminated=self.terminated, info=info
|
| 821 |
+
)
|
| 822 |
+
|
| 823 |
+
# Normal function calling
|
| 824 |
+
tool_call_records.append(
|
| 825 |
+
await self._step_tool_call_and_update_async(response)
|
| 826 |
+
)
|
| 827 |
+
|
| 828 |
+
if (
|
| 829 |
+
response_format is not None
|
| 830 |
+
and self.model_type.support_native_tool_calling
|
| 831 |
+
):
|
| 832 |
+
(
|
| 833 |
+
output_messages,
|
| 834 |
+
finish_reasons,
|
| 835 |
+
usage_dict,
|
| 836 |
+
response_id,
|
| 837 |
+
tool_call_record,
|
| 838 |
+
num_tokens,
|
| 839 |
+
) = self._structure_output_with_function(response_format)
|
| 840 |
+
tool_call_records.append(tool_call_record)
|
| 841 |
+
|
| 842 |
+
info = self._step_get_info(
|
| 843 |
+
output_messages,
|
| 844 |
+
finish_reasons,
|
| 845 |
+
usage_dict,
|
| 846 |
+
response_id,
|
| 847 |
+
tool_call_records,
|
| 848 |
+
num_tokens,
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
if len(output_messages) == 1:
|
| 852 |
+
# Auto record if the output result is a single message
|
| 853 |
+
self.record_message(output_messages[0])
|
| 854 |
+
else:
|
| 855 |
+
logger.warning(
|
| 856 |
+
"Multiple messages returned in `step()`, message won't be "
|
| 857 |
+
"recorded automatically. Please call `record_message()` to "
|
| 858 |
+
"record the selected message manually."
|
| 859 |
+
)
|
| 860 |
+
|
| 861 |
+
return ChatAgentResponse(
|
| 862 |
+
msgs=output_messages, terminated=self.terminated, info=info
|
| 863 |
+
)
|
| 864 |
+
|
| 865 |
+
def _step_tool_call_and_update(
|
| 866 |
+
self, response: ChatCompletion
|
| 867 |
+
) -> FunctionCallingRecord:
|
| 868 |
+
r"""Processes a function call within the chat completion response,
|
| 869 |
+
records the function call in the provided list of tool calls and
|
| 870 |
+
updates the memory of the current agent.
|
| 871 |
+
|
| 872 |
+
Args:
|
| 873 |
+
response (ChatCompletion): The response object from the chat
|
| 874 |
+
completion.
|
| 875 |
+
|
| 876 |
+
Returns:
|
| 877 |
+
FunctionCallingRecord: The record of calling the function.
|
| 878 |
+
"""
|
| 879 |
+
|
| 880 |
+
# Perform function calling
|
| 881 |
+
func_assistant_msg, func_result_msg, tool_call_record = (
|
| 882 |
+
self.step_tool_call(response)
|
| 883 |
+
)
|
| 884 |
+
|
| 885 |
+
# Update the messages
|
| 886 |
+
self.update_memory(func_assistant_msg, OpenAIBackendRole.ASSISTANT)
|
| 887 |
+
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
| 888 |
+
|
| 889 |
+
return tool_call_record
|
| 890 |
+
|
| 891 |
+
async def _step_tool_call_and_update_async(
|
| 892 |
+
self, response: ChatCompletion
|
| 893 |
+
) -> FunctionCallingRecord:
|
| 894 |
+
(
|
| 895 |
+
func_assistant_msg,
|
| 896 |
+
func_result_msg,
|
| 897 |
+
func_record,
|
| 898 |
+
) = await self.step_tool_call_async(response)
|
| 899 |
+
|
| 900 |
+
self.update_memory(func_assistant_msg, OpenAIBackendRole.ASSISTANT)
|
| 901 |
+
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
| 902 |
+
|
| 903 |
+
return func_record
|
| 904 |
+
|
| 905 |
+
def _structure_output_with_function(
|
| 906 |
+
self, response_format: Type[BaseModel]
|
| 907 |
+
) -> Tuple[
|
| 908 |
+
List[BaseMessage],
|
| 909 |
+
List[str],
|
| 910 |
+
Dict[str, int],
|
| 911 |
+
str,
|
| 912 |
+
FunctionCallingRecord,
|
| 913 |
+
int,
|
| 914 |
+
]:
|
| 915 |
+
r"""Internal function of structuring the output of the agent based on
|
| 916 |
+
the given output schema.
|
| 917 |
+
|
| 918 |
+
Args:
|
| 919 |
+
response_format (Type[BaseModel]): The output schema to use for
|
| 920 |
+
structuring the output.
|
| 921 |
+
|
| 922 |
+
Returns:
|
| 923 |
+
Tuple[List[BaseMessage], List[str], Dict[str, int], str,
|
| 924 |
+
FunctionCallingRecord, int]:
|
| 925 |
+
A tuple containing the output messages, finish reasons, usage
|
| 926 |
+
dictionary, response ID, function calling record, and number of
|
| 927 |
+
tokens.
|
| 928 |
+
"""
|
| 929 |
+
from camel.toolkits import FunctionTool
|
| 930 |
+
|
| 931 |
+
schema_json = get_pydantic_object_schema(response_format)
|
| 932 |
+
func_str = json_to_function_code(schema_json)
|
| 933 |
+
func_callable = func_string_to_callable(func_str)
|
| 934 |
+
func = FunctionTool(func_callable)
|
| 935 |
+
|
| 936 |
+
original_func_dict = self.func_dict
|
| 937 |
+
original_model_dict = self.model_backend.model_config_dict
|
| 938 |
+
|
| 939 |
+
# Replace the original tools with the structuring function
|
| 940 |
+
self.func_dict = {func.get_function_name(): func.func}
|
| 941 |
+
self.tool_dict = {func.get_function_name(): func}
|
| 942 |
+
self.model_backend.model_config_dict = original_model_dict.copy()
|
| 943 |
+
self.model_backend.model_config_dict["tools"] = [
|
| 944 |
+
func.get_openai_tool_schema()
|
| 945 |
+
]
|
| 946 |
+
self.model_backend.model_config_dict["tool_choice"] = "required"
|
| 947 |
+
|
| 948 |
+
openai_messages, num_tokens = self.memory.get_context()
|
| 949 |
+
(
|
| 950 |
+
response,
|
| 951 |
+
output_messages,
|
| 952 |
+
finish_reasons,
|
| 953 |
+
usage_dict,
|
| 954 |
+
response_id,
|
| 955 |
+
) = self._step_model_response(openai_messages, num_tokens)
|
| 956 |
+
|
| 957 |
+
if isinstance(response, ChatCompletion):
|
| 958 |
+
tool_call_record = self._step_tool_call_and_update(response)
|
| 959 |
+
else:
|
| 960 |
+
raise ValueError(
|
| 961 |
+
"Structured output is not supported for stream responses."
|
| 962 |
+
)
|
| 963 |
+
|
| 964 |
+
for base_message_item in output_messages:
|
| 965 |
+
base_message_item.content = str(tool_call_record.result)
|
| 966 |
+
|
| 967 |
+
# Recover the original tools
|
| 968 |
+
self.func_dict = original_func_dict
|
| 969 |
+
self.model_backend.model_config_dict = original_model_dict
|
| 970 |
+
|
| 971 |
+
return (
|
| 972 |
+
output_messages,
|
| 973 |
+
finish_reasons,
|
| 974 |
+
usage_dict,
|
| 975 |
+
response_id,
|
| 976 |
+
tool_call_record,
|
| 977 |
+
num_tokens,
|
| 978 |
+
)
|
| 979 |
+
|
| 980 |
+
def _step_model_response(
|
| 981 |
+
self,
|
| 982 |
+
openai_messages: List[OpenAIMessage],
|
| 983 |
+
num_tokens: int,
|
| 984 |
+
) -> tuple[
|
| 985 |
+
Union[ChatCompletion, Stream],
|
| 986 |
+
List[BaseMessage],
|
| 987 |
+
List[str],
|
| 988 |
+
Dict[str, int],
|
| 989 |
+
str,
|
| 990 |
+
]:
|
| 991 |
+
r"""Internal function for agent step model response."""
|
| 992 |
+
|
| 993 |
+
response = None
|
| 994 |
+
# Obtain the model's response
|
| 995 |
+
for _ in range(len(self.model_backend.models)):
|
| 996 |
+
try:
|
| 997 |
+
response = self.model_backend.run(openai_messages)
|
| 998 |
+
break
|
| 999 |
+
except Exception as exc:
|
| 1000 |
+
logger.error(
|
| 1001 |
+
f"An error occurred while running model "
|
| 1002 |
+
f"{self.model_backend.model_type}, "
|
| 1003 |
+
f"index: {self.model_backend.current_model_index}",
|
| 1004 |
+
exc_info=exc,
|
| 1005 |
+
)
|
| 1006 |
+
continue
|
| 1007 |
+
if not response:
|
| 1008 |
+
raise ModelProcessingError(
|
| 1009 |
+
"Unable to process messages: none of the provided models "
|
| 1010 |
+
"run succesfully."
|
| 1011 |
+
)
|
| 1012 |
+
|
| 1013 |
+
# logger.debug(
|
| 1014 |
+
# f"Model {self.model_backend.model_type}, "
|
| 1015 |
+
# f"index {self.model_backend.current_model_index}, "
|
| 1016 |
+
# f"processed these messages: {openai_messages}"
|
| 1017 |
+
# )
|
| 1018 |
+
|
| 1019 |
+
if isinstance(response, ChatCompletion):
|
| 1020 |
+
output_messages, finish_reasons, usage_dict, response_id = (
|
| 1021 |
+
self.handle_batch_response(response)
|
| 1022 |
+
)
|
| 1023 |
+
else:
|
| 1024 |
+
output_messages, finish_reasons, usage_dict, response_id = (
|
| 1025 |
+
self.handle_stream_response(response, num_tokens)
|
| 1026 |
+
)
|
| 1027 |
+
return (
|
| 1028 |
+
response,
|
| 1029 |
+
output_messages,
|
| 1030 |
+
finish_reasons,
|
| 1031 |
+
usage_dict,
|
| 1032 |
+
response_id,
|
| 1033 |
+
)
|
| 1034 |
+
|
| 1035 |
+
def _step_get_info(
|
| 1036 |
+
self,
|
| 1037 |
+
output_messages: List[BaseMessage],
|
| 1038 |
+
finish_reasons: List[str],
|
| 1039 |
+
usage_dict: Dict[str, int],
|
| 1040 |
+
response_id: str,
|
| 1041 |
+
tool_calls: List[FunctionCallingRecord],
|
| 1042 |
+
num_tokens: int,
|
| 1043 |
+
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
|
| 1044 |
+
) -> Dict[str, Any]:
|
| 1045 |
+
r"""Process the output of a chat step and gather information about the
|
| 1046 |
+
step.
|
| 1047 |
+
|
| 1048 |
+
This method checks for termination conditions, updates the agent's
|
| 1049 |
+
state, and collects information about the chat step, including tool
|
| 1050 |
+
calls and termination reasons.
|
| 1051 |
+
|
| 1052 |
+
Args:
|
| 1053 |
+
output_messages (List[BaseMessage]): The messages generated in
|
| 1054 |
+
this step.
|
| 1055 |
+
finish_reasons (List[str]): The reasons for finishing the
|
| 1056 |
+
generation for each message.
|
| 1057 |
+
usage_dict (Dict[str, int]): Dictionary containing token usage
|
| 1058 |
+
information.
|
| 1059 |
+
response_id (str): The ID of the response from the model.
|
| 1060 |
+
tool_calls (List[FunctionCallingRecord]): Records of function calls
|
| 1061 |
+
made during this step.
|
| 1062 |
+
num_tokens (int): The number of tokens used in this step.
|
| 1063 |
+
external_tool_request (Optional[ChatCompletionMessageToolCall]):
|
| 1064 |
+
Any external tool request made during this step.
|
| 1065 |
+
(default::obj:`None`)
|
| 1066 |
+
|
| 1067 |
+
Returns:
|
| 1068 |
+
Dict[str, Any]: A dictionary containing information about the chat
|
| 1069 |
+
step, including termination status, reasons, and tool call
|
| 1070 |
+
information.
|
| 1071 |
+
|
| 1072 |
+
Note:
|
| 1073 |
+
This method iterates over all response terminators and checks if
|
| 1074 |
+
any of them signal termination. If a terminator signals
|
| 1075 |
+
termination, the agent's state is updated accordingly, and the
|
| 1076 |
+
termination reason is recorded.
|
| 1077 |
+
"""
|
| 1078 |
+
termination = [
|
| 1079 |
+
terminator.is_terminated(output_messages)
|
| 1080 |
+
for terminator in self.response_terminators
|
| 1081 |
+
]
|
| 1082 |
+
# Terminate the agent if any of the terminator terminates
|
| 1083 |
+
self.terminated, termination_reason = next(
|
| 1084 |
+
(
|
| 1085 |
+
(terminated, termination_reason)
|
| 1086 |
+
for terminated, termination_reason in termination
|
| 1087 |
+
if terminated
|
| 1088 |
+
),
|
| 1089 |
+
(False, None),
|
| 1090 |
+
)
|
| 1091 |
+
# For now only retain the first termination reason
|
| 1092 |
+
if self.terminated and termination_reason is not None:
|
| 1093 |
+
finish_reasons = [termination_reason] * len(finish_reasons)
|
| 1094 |
+
|
| 1095 |
+
info = self.get_info(
|
| 1096 |
+
response_id,
|
| 1097 |
+
usage_dict,
|
| 1098 |
+
finish_reasons,
|
| 1099 |
+
num_tokens,
|
| 1100 |
+
tool_calls,
|
| 1101 |
+
external_tool_request,
|
| 1102 |
+
)
|
| 1103 |
+
return info
|
| 1104 |
+
|
| 1105 |
+
def handle_batch_response(
|
| 1106 |
+
self, response: ChatCompletion
|
| 1107 |
+
) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
|
| 1108 |
+
r"""Process a batch response from the model and extract the necessary
|
| 1109 |
+
information.
|
| 1110 |
+
|
| 1111 |
+
Args:
|
| 1112 |
+
response (dict): Model response.
|
| 1113 |
+
|
| 1114 |
+
Returns:
|
| 1115 |
+
tuple: A tuple of list of output `ChatMessage`, list of
|
| 1116 |
+
finish reasons, usage dictionary, and response id.
|
| 1117 |
+
"""
|
| 1118 |
+
output_messages: List[BaseMessage] = []
|
| 1119 |
+
for choice in response.choices:
|
| 1120 |
+
chat_message = BaseMessage(
|
| 1121 |
+
role_name=self.role_name,
|
| 1122 |
+
role_type=self.role_type,
|
| 1123 |
+
meta_dict=dict(),
|
| 1124 |
+
content=choice.message.content or "",
|
| 1125 |
+
parsed=getattr(choice.message, 'parsed', None),
|
| 1126 |
+
)
|
| 1127 |
+
# Process log probabilities and append to the message meta information
|
| 1128 |
+
if choice.logprobs is not None:
|
| 1129 |
+
tokens_logprobs = choice.logprobs.content
|
| 1130 |
+
|
| 1131 |
+
if tokens_logprobs is not None:
|
| 1132 |
+
# Extract and structure logprob information
|
| 1133 |
+
logprobs_info = [
|
| 1134 |
+
{
|
| 1135 |
+
"token": token_logprob.token,
|
| 1136 |
+
"logprob": token_logprob.logprob,
|
| 1137 |
+
"top_logprobs": [
|
| 1138 |
+
(top_logprob.token, top_logprob.logprob)
|
| 1139 |
+
for top_logprob in token_logprob.top_logprobs
|
| 1140 |
+
],
|
| 1141 |
+
}
|
| 1142 |
+
for token_logprob in tokens_logprobs
|
| 1143 |
+
]
|
| 1144 |
+
# Ensure meta_dict exists before adding logprobs info
|
| 1145 |
+
if chat_message.meta_dict is None:
|
| 1146 |
+
chat_message.meta_dict = {}
|
| 1147 |
+
chat_message.meta_dict["logprobs_info"] = logprobs_info
|
| 1148 |
+
# Append the processed chat message to output
|
| 1149 |
+
output_messages.append(chat_message)
|
| 1150 |
+
|
| 1151 |
+
finish_reasons = [
|
| 1152 |
+
str(choice.finish_reason) for choice in response.choices
|
| 1153 |
+
]
|
| 1154 |
+
usage = (
|
| 1155 |
+
self._safe_model_dump(response.usage)
|
| 1156 |
+
if response.usage is not None
|
| 1157 |
+
else {}
|
| 1158 |
+
)
|
| 1159 |
+
return (
|
| 1160 |
+
output_messages,
|
| 1161 |
+
finish_reasons,
|
| 1162 |
+
usage,
|
| 1163 |
+
response.id,
|
| 1164 |
+
)
|
| 1165 |
+
|
| 1166 |
+
def _safe_model_dump(self, obj) -> dict:
|
| 1167 |
+
r"""Safely dump a Pydantic model to a dictionary.
|
| 1168 |
+
|
| 1169 |
+
This method attempts to use the `model_dump` method if available,
|
| 1170 |
+
otherwise it falls back to the `dict` method.
|
| 1171 |
+
|
| 1172 |
+
Args:
|
| 1173 |
+
obj: The Pydantic model instance to be dumped.
|
| 1174 |
+
|
| 1175 |
+
Returns:
|
| 1176 |
+
dict: A dictionary representation of the Pydantic model.
|
| 1177 |
+
"""
|
| 1178 |
+
# Check if the `model_dump` method exists (Pydantic v2)
|
| 1179 |
+
if hasattr(obj, 'model_dump'):
|
| 1180 |
+
return obj.model_dump()
|
| 1181 |
+
# Fallback to `dict()` method (Pydantic v1)
|
| 1182 |
+
elif hasattr(obj, 'dict'):
|
| 1183 |
+
return obj.dict()
|
| 1184 |
+
else:
|
| 1185 |
+
raise TypeError("The object is not a Pydantic model")
|
| 1186 |
+
|
| 1187 |
+
def handle_stream_response(
|
| 1188 |
+
self,
|
| 1189 |
+
response: Stream[ChatCompletionChunk],
|
| 1190 |
+
prompt_tokens: int,
|
| 1191 |
+
) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
|
| 1192 |
+
r"""Process a stream response from the model and extract the necessary
|
| 1193 |
+
information.
|
| 1194 |
+
|
| 1195 |
+
Args:
|
| 1196 |
+
response (dict): Model response.
|
| 1197 |
+
prompt_tokens (int): Number of input prompt tokens.
|
| 1198 |
+
|
| 1199 |
+
Returns:
|
| 1200 |
+
tuple: A tuple of list of output `ChatMessage`, list of
|
| 1201 |
+
finish reasons, usage dictionary, and response id.
|
| 1202 |
+
"""
|
| 1203 |
+
content_dict: defaultdict = defaultdict(lambda: "")
|
| 1204 |
+
finish_reasons_dict: defaultdict = defaultdict(lambda: "")
|
| 1205 |
+
output_messages: List[BaseMessage] = []
|
| 1206 |
+
response_id: str = ""
|
| 1207 |
+
# All choices in one response share one role
|
| 1208 |
+
for chunk in response:
|
| 1209 |
+
response_id = chunk.id
|
| 1210 |
+
for choice in chunk.choices:
|
| 1211 |
+
index = choice.index
|
| 1212 |
+
delta = choice.delta
|
| 1213 |
+
if delta.content is not None:
|
| 1214 |
+
# When response has not been stopped
|
| 1215 |
+
# Notice that only the first chunk_dict has the "role"
|
| 1216 |
+
content_dict[index] += delta.content
|
| 1217 |
+
if choice.finish_reason:
|
| 1218 |
+
finish_reasons_dict[index] = choice.finish_reason
|
| 1219 |
+
chat_message = BaseMessage(
|
| 1220 |
+
role_name=self.role_name,
|
| 1221 |
+
role_type=self.role_type,
|
| 1222 |
+
meta_dict=dict(),
|
| 1223 |
+
content=content_dict[index],
|
| 1224 |
+
)
|
| 1225 |
+
output_messages.append(chat_message)
|
| 1226 |
+
finish_reasons = [
|
| 1227 |
+
finish_reasons_dict[i] for i in range(len(finish_reasons_dict))
|
| 1228 |
+
]
|
| 1229 |
+
usage_dict = self.get_usage_dict(output_messages, prompt_tokens)
|
| 1230 |
+
return output_messages, finish_reasons, usage_dict, response_id
|
| 1231 |
+
|
| 1232 |
+
def _step_token_exceed(
|
| 1233 |
+
self,
|
| 1234 |
+
num_tokens: int,
|
| 1235 |
+
tool_calls: List[FunctionCallingRecord],
|
| 1236 |
+
termination_reason: str,
|
| 1237 |
+
) -> ChatAgentResponse:
|
| 1238 |
+
r"""Return trivial response containing number of tokens and information
|
| 1239 |
+
of called functions when the number of tokens exceeds.
|
| 1240 |
+
|
| 1241 |
+
Args:
|
| 1242 |
+
num_tokens (int): Number of tokens in the messages.
|
| 1243 |
+
tool_calls (List[FunctionCallingRecord]): List of information
|
| 1244 |
+
objects of functions called in the current step.
|
| 1245 |
+
termination_reason (str): String of termination reason.
|
| 1246 |
+
|
| 1247 |
+
Returns:
|
| 1248 |
+
ChatAgentResponse: The struct containing trivial outputs and
|
| 1249 |
+
information about token number and called functions.
|
| 1250 |
+
"""
|
| 1251 |
+
self.terminated = True
|
| 1252 |
+
output_messages: List[BaseMessage] = []
|
| 1253 |
+
|
| 1254 |
+
info = self.get_info(
|
| 1255 |
+
None,
|
| 1256 |
+
None,
|
| 1257 |
+
[termination_reason],
|
| 1258 |
+
num_tokens,
|
| 1259 |
+
tool_calls,
|
| 1260 |
+
)
|
| 1261 |
+
|
| 1262 |
+
return ChatAgentResponse(
|
| 1263 |
+
msgs=output_messages,
|
| 1264 |
+
terminated=self.terminated,
|
| 1265 |
+
info=info,
|
| 1266 |
+
)
|
| 1267 |
+
|
| 1268 |
+
def step_tool_call(
|
| 1269 |
+
self,
|
| 1270 |
+
response: ChatCompletion,
|
| 1271 |
+
) -> Tuple[
|
| 1272 |
+
FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
|
| 1273 |
+
]:
|
| 1274 |
+
r"""Execute the function with arguments following the model's response.
|
| 1275 |
+
|
| 1276 |
+
Args:
|
| 1277 |
+
response (Dict[str, Any]): The response obtained by calling the
|
| 1278 |
+
model.
|
| 1279 |
+
|
| 1280 |
+
Returns:
|
| 1281 |
+
tuple: A tuple consisting of two obj:`FunctionCallingMessage`,
|
| 1282 |
+
one about the arguments and the other about the execution
|
| 1283 |
+
result, and a struct for logging information about this
|
| 1284 |
+
function call.
|
| 1285 |
+
"""
|
| 1286 |
+
choice = response.choices[0]
|
| 1287 |
+
if choice.message.tool_calls is None:
|
| 1288 |
+
raise RuntimeError("Tool call is None")
|
| 1289 |
+
func_name = choice.message.tool_calls[0].function.name
|
| 1290 |
+
|
| 1291 |
+
args = json.loads(choice.message.tool_calls[0].function.arguments)
|
| 1292 |
+
tool = self.tool_dict[func_name]
|
| 1293 |
+
|
| 1294 |
+
# ! Here, if the agent calls advanced reasoning, provide the chat history
|
| 1295 |
+
if func_name == "make_advanced_reasoning":
|
| 1296 |
+
reformed_question = f"""
|
| 1297 |
+
Please help an assistant to solve reasoning tasks.
|
| 1298 |
+
Here are the chat history between the assistant and the user, which may help you understand the intention of the user and the question:
|
| 1299 |
+
<chat_history>{self.memory.get_context()}</chat_history>
|
| 1300 |
+
|
| 1301 |
+
Now please answer the following question:
|
| 1302 |
+
<question>{args['question']}</question>
|
| 1303 |
+
"""
|
| 1304 |
+
args["question"] = reformed_question
|
| 1305 |
+
|
| 1306 |
+
result = tool(**args)
|
| 1307 |
+
|
| 1308 |
+
assist_msg = FunctionCallingMessage(
|
| 1309 |
+
role_name=self.role_name,
|
| 1310 |
+
role_type=self.role_type,
|
| 1311 |
+
meta_dict=None,
|
| 1312 |
+
content="",
|
| 1313 |
+
func_name=func_name,
|
| 1314 |
+
args=args,
|
| 1315 |
+
)
|
| 1316 |
+
func_msg = FunctionCallingMessage(
|
| 1317 |
+
role_name=self.role_name,
|
| 1318 |
+
role_type=self.role_type,
|
| 1319 |
+
meta_dict=None,
|
| 1320 |
+
content="",
|
| 1321 |
+
func_name=func_name,
|
| 1322 |
+
result=result,
|
| 1323 |
+
)
|
| 1324 |
+
|
| 1325 |
+
# Record information about this function call
|
| 1326 |
+
func_record = FunctionCallingRecord(
|
| 1327 |
+
func_name=func_name, args=args, result=result
|
| 1328 |
+
)
|
| 1329 |
+
return assist_msg, func_msg, func_record
|
| 1330 |
+
|
| 1331 |
+
async def step_tool_call_async(
|
| 1332 |
+
self,
|
| 1333 |
+
response: ChatCompletion,
|
| 1334 |
+
) -> Tuple[
|
| 1335 |
+
FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
|
| 1336 |
+
]:
|
| 1337 |
+
r"""Execute the async function with arguments following the model's
|
| 1338 |
+
response.
|
| 1339 |
+
|
| 1340 |
+
Args:
|
| 1341 |
+
response (Dict[str, Any]): The response obtained by calling the
|
| 1342 |
+
model.
|
| 1343 |
+
|
| 1344 |
+
Returns:
|
| 1345 |
+
tuple: A tuple consisting of two obj:`FunctionCallingMessage`,
|
| 1346 |
+
one about the arguments and the other about the execution
|
| 1347 |
+
result, and a struct for logging information about this
|
| 1348 |
+
function call.
|
| 1349 |
+
"""
|
| 1350 |
+
# Note that when function calling is enabled, `n` is set to 1.
|
| 1351 |
+
choice = response.choices[0]
|
| 1352 |
+
if choice.message.tool_calls is None:
|
| 1353 |
+
raise RuntimeError("Tool call is None")
|
| 1354 |
+
func_name = choice.message.tool_calls[0].function.name
|
| 1355 |
+
|
| 1356 |
+
args = json.loads(choice.message.tool_calls[0].function.arguments)
|
| 1357 |
+
tool = self.tool_dict[func_name]
|
| 1358 |
+
result = await tool(**args)
|
| 1359 |
+
|
| 1360 |
+
assist_msg = FunctionCallingMessage(
|
| 1361 |
+
role_name=self.role_name,
|
| 1362 |
+
role_type=self.role_type,
|
| 1363 |
+
meta_dict=None,
|
| 1364 |
+
content="",
|
| 1365 |
+
func_name=func_name,
|
| 1366 |
+
args=args,
|
| 1367 |
+
)
|
| 1368 |
+
func_msg = FunctionCallingMessage(
|
| 1369 |
+
role_name=self.role_name,
|
| 1370 |
+
role_type=self.role_type,
|
| 1371 |
+
meta_dict=None,
|
| 1372 |
+
content="",
|
| 1373 |
+
func_name=func_name,
|
| 1374 |
+
result=result,
|
| 1375 |
+
)
|
| 1376 |
+
|
| 1377 |
+
# Record information about this function call
|
| 1378 |
+
func_record = FunctionCallingRecord(
|
| 1379 |
+
func_name=func_name, args=args, result=result
|
| 1380 |
+
)
|
| 1381 |
+
return assist_msg, func_msg, func_record
|
| 1382 |
+
|
| 1383 |
+
def get_usage_dict(
|
| 1384 |
+
self, output_messages: List[BaseMessage], prompt_tokens: int
|
| 1385 |
+
) -> Dict[str, int]:
|
| 1386 |
+
r"""Get usage dictionary when using the stream mode.
|
| 1387 |
+
|
| 1388 |
+
Args:
|
| 1389 |
+
output_messages (list): List of output messages.
|
| 1390 |
+
prompt_tokens (int): Number of input prompt tokens.
|
| 1391 |
+
|
| 1392 |
+
Returns:
|
| 1393 |
+
dict: Usage dictionary.
|
| 1394 |
+
"""
|
| 1395 |
+
encoding = get_model_encoding(self.model_type.value_for_tiktoken)
|
| 1396 |
+
completion_tokens = 0
|
| 1397 |
+
for message in output_messages:
|
| 1398 |
+
completion_tokens += len(encoding.encode(message.content))
|
| 1399 |
+
usage_dict = dict(
|
| 1400 |
+
completion_tokens=completion_tokens,
|
| 1401 |
+
prompt_tokens=prompt_tokens,
|
| 1402 |
+
total_tokens=completion_tokens + prompt_tokens,
|
| 1403 |
+
)
|
| 1404 |
+
return usage_dict
|
| 1405 |
+
|
| 1406 |
+
def add_model_scheduling_strategy(self, name: str, strategy_fn: Callable):
|
| 1407 |
+
r"""Add a scheduling strategy method provided by user to ModelManger.
|
| 1408 |
+
|
| 1409 |
+
Args:
|
| 1410 |
+
name (str): The name of the strategy.
|
| 1411 |
+
strategy_fn (Callable): The scheduling strategy function.
|
| 1412 |
+
"""
|
| 1413 |
+
self.model_backend.add_strategy(name, strategy_fn)
|
| 1414 |
+
|
| 1415 |
+
def __repr__(self) -> str:
|
| 1416 |
+
r"""Returns a string representation of the :obj:`ChatAgent`.
|
| 1417 |
+
|
| 1418 |
+
Returns:
|
| 1419 |
+
str: The string representation of the :obj:`ChatAgent`.
|
| 1420 |
+
"""
|
| 1421 |
+
return (
|
| 1422 |
+
f"ChatAgent({self.role_name}, {self.role_type}, {self.model_type})"
|
| 1423 |
+
)
|
deep-swarm/camel/agents/critic_agent.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
import random
|
| 15 |
+
import warnings
|
| 16 |
+
from typing import Any, Dict, Optional, Sequence
|
| 17 |
+
|
| 18 |
+
from colorama import Fore
|
| 19 |
+
|
| 20 |
+
from camel.agents.chat_agent import ChatAgent
|
| 21 |
+
from camel.memories import AgentMemory
|
| 22 |
+
from camel.messages import BaseMessage
|
| 23 |
+
from camel.models import BaseModelBackend
|
| 24 |
+
from camel.responses import ChatAgentResponse
|
| 25 |
+
from camel.utils import get_first_int, print_text_animated
|
| 26 |
+
|
| 27 |
+
# AgentOps decorator setting
|
| 28 |
+
try:
|
| 29 |
+
import os
|
| 30 |
+
|
| 31 |
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
| 32 |
+
from agentops import track_agent
|
| 33 |
+
else:
|
| 34 |
+
raise ImportError
|
| 35 |
+
except (ImportError, AttributeError):
|
| 36 |
+
from camel.utils import track_agent
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@track_agent(name="CriticAgent")
|
| 40 |
+
class CriticAgent(ChatAgent):
|
| 41 |
+
r"""A class for the critic agent that assists in selecting an option.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
system_message (BaseMessage): The system message for the critic
|
| 45 |
+
agent.
|
| 46 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 47 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 48 |
+
`GPT_4O_MINI`)
|
| 49 |
+
message_window_size (int, optional): The maximum number of previous
|
| 50 |
+
messages to include in the context window. If `None`, no windowing
|
| 51 |
+
is performed. (default: :obj:`6`)
|
| 52 |
+
retry_attempts (int, optional): The number of retry attempts if the
|
| 53 |
+
critic fails to return a valid option. (default: :obj:`2`)
|
| 54 |
+
verbose (bool, optional): Whether to print the critic's messages.
|
| 55 |
+
logger_color (Any): The color of the menu options displayed to the
|
| 56 |
+
user. (default: :obj:`Fore.MAGENTA`)
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(
|
| 60 |
+
self,
|
| 61 |
+
system_message: BaseMessage,
|
| 62 |
+
model: Optional[BaseModelBackend] = None,
|
| 63 |
+
memory: Optional[AgentMemory] = None,
|
| 64 |
+
message_window_size: int = 6,
|
| 65 |
+
retry_attempts: int = 2,
|
| 66 |
+
verbose: bool = False,
|
| 67 |
+
logger_color: Any = Fore.MAGENTA,
|
| 68 |
+
) -> None:
|
| 69 |
+
super().__init__(
|
| 70 |
+
system_message,
|
| 71 |
+
model=model,
|
| 72 |
+
memory=memory,
|
| 73 |
+
message_window_size=message_window_size,
|
| 74 |
+
)
|
| 75 |
+
self.options_dict: Dict[str, str] = dict()
|
| 76 |
+
self.retry_attempts = retry_attempts
|
| 77 |
+
self.verbose = verbose
|
| 78 |
+
self.logger_color = logger_color
|
| 79 |
+
|
| 80 |
+
def flatten_options(self, messages: Sequence[BaseMessage]) -> str:
|
| 81 |
+
r"""Flattens the options to the critic.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
messages (Sequence[BaseMessage]): A list of `BaseMessage` objects.
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
str: A string containing the flattened options to the critic.
|
| 88 |
+
"""
|
| 89 |
+
options = [message.content for message in messages]
|
| 90 |
+
flatten_options = (
|
| 91 |
+
f"> Proposals from "
|
| 92 |
+
f"{messages[0].role_name} ({messages[0].role_type}). "
|
| 93 |
+
"Please choose an option:\n"
|
| 94 |
+
)
|
| 95 |
+
for index, option in enumerate(options):
|
| 96 |
+
flatten_options += f"Option {index + 1}:\n{option}\n\n"
|
| 97 |
+
self.options_dict[str(index + 1)] = option
|
| 98 |
+
format = (
|
| 99 |
+
f"Please first enter your choice ([1-{len(self.options_dict)}]) "
|
| 100 |
+
"and then your explanation and comparison: "
|
| 101 |
+
)
|
| 102 |
+
return flatten_options + format
|
| 103 |
+
|
| 104 |
+
def get_option(self, input_message: BaseMessage) -> str:
|
| 105 |
+
r"""Gets the option selected by the critic.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
input_message (BaseMessage): A `BaseMessage` object representing
|
| 109 |
+
the input message.
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
str: The option selected by the critic.
|
| 113 |
+
"""
|
| 114 |
+
# TODO: Add support for editing options by the critic.
|
| 115 |
+
msg_content = input_message.content
|
| 116 |
+
i = 0
|
| 117 |
+
while i < self.retry_attempts:
|
| 118 |
+
critic_response = self.step(input_message)
|
| 119 |
+
|
| 120 |
+
if critic_response.msgs is None or len(critic_response.msgs) == 0:
|
| 121 |
+
raise RuntimeError("Got None critic messages.")
|
| 122 |
+
if critic_response.terminated:
|
| 123 |
+
raise RuntimeError("Critic step failed.")
|
| 124 |
+
|
| 125 |
+
critic_msg = critic_response.msg
|
| 126 |
+
if self.verbose:
|
| 127 |
+
print_text_animated(
|
| 128 |
+
self.logger_color + "\n> Critic response: "
|
| 129 |
+
f"\x1b[3m{critic_msg.content}\x1b[0m\n"
|
| 130 |
+
)
|
| 131 |
+
choice = self.parse_critic(critic_msg)
|
| 132 |
+
|
| 133 |
+
if choice in self.options_dict:
|
| 134 |
+
return self.options_dict[choice]
|
| 135 |
+
else:
|
| 136 |
+
input_message = BaseMessage(
|
| 137 |
+
role_name=input_message.role_name,
|
| 138 |
+
role_type=input_message.role_type,
|
| 139 |
+
meta_dict=input_message.meta_dict,
|
| 140 |
+
content="> Invalid choice. Please choose again.\n"
|
| 141 |
+
+ msg_content,
|
| 142 |
+
)
|
| 143 |
+
i += 1
|
| 144 |
+
warnings.warn(
|
| 145 |
+
"Critic failed to get a valid option. "
|
| 146 |
+
f"After {self.retry_attempts} attempts. "
|
| 147 |
+
"Returning a random option."
|
| 148 |
+
)
|
| 149 |
+
return random.choice(list(self.options_dict.values()))
|
| 150 |
+
|
| 151 |
+
def parse_critic(self, critic_msg: BaseMessage) -> Optional[str]:
|
| 152 |
+
r"""Parses the critic's message and extracts the choice.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
critic_msg (BaseMessage): A `BaseMessage` object representing the
|
| 156 |
+
critic's response.
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
Optional[str]: The critic's choice as a string, or None if the
|
| 160 |
+
message could not be parsed.
|
| 161 |
+
"""
|
| 162 |
+
choice = str(get_first_int(critic_msg.content))
|
| 163 |
+
return choice
|
| 164 |
+
|
| 165 |
+
def reduce_step(
|
| 166 |
+
self,
|
| 167 |
+
input_messages: Sequence[BaseMessage],
|
| 168 |
+
) -> ChatAgentResponse:
|
| 169 |
+
r"""Performs one step of the conversation by flattening options to the
|
| 170 |
+
critic, getting the option, and parsing the choice.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
input_messages (Sequence[BaseMessage]): A list of BaseMessage
|
| 174 |
+
objects.
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
ChatAgentResponse: A `ChatAgentResponse` object includes the
|
| 178 |
+
critic's choice.
|
| 179 |
+
"""
|
| 180 |
+
meta_chat_message = BaseMessage(
|
| 181 |
+
role_name=input_messages[0].role_name,
|
| 182 |
+
role_type=input_messages[0].role_type,
|
| 183 |
+
meta_dict=input_messages[0].meta_dict,
|
| 184 |
+
content="",
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
flatten_options = self.flatten_options(input_messages)
|
| 188 |
+
if self.verbose:
|
| 189 |
+
print_text_animated(
|
| 190 |
+
self.logger_color + f"\x1b[3m{flatten_options}\x1b[0m\n"
|
| 191 |
+
)
|
| 192 |
+
input_msg = meta_chat_message.create_new_instance(flatten_options)
|
| 193 |
+
|
| 194 |
+
option = self.get_option(input_msg)
|
| 195 |
+
output_msg = meta_chat_message.create_new_instance(option)
|
| 196 |
+
|
| 197 |
+
# TODO: The return `info` can be improved.
|
| 198 |
+
return ChatAgentResponse(
|
| 199 |
+
msgs=[output_msg],
|
| 200 |
+
terminated=False,
|
| 201 |
+
info={},
|
| 202 |
+
)
|
deep-swarm/camel/agents/deductive_reasoner_agent.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
import re
|
| 15 |
+
from typing import Dict, List, Optional, Union
|
| 16 |
+
|
| 17 |
+
from camel.agents.chat_agent import ChatAgent
|
| 18 |
+
from camel.logger import get_logger
|
| 19 |
+
from camel.messages import BaseMessage
|
| 20 |
+
from camel.models import BaseModelBackend
|
| 21 |
+
from camel.prompts import TextPrompt
|
| 22 |
+
from camel.types import RoleType
|
| 23 |
+
|
| 24 |
+
logger = get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
# AgentOps decorator setting
|
| 27 |
+
try:
|
| 28 |
+
import os
|
| 29 |
+
|
| 30 |
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
| 31 |
+
from agentops import track_agent
|
| 32 |
+
else:
|
| 33 |
+
raise ImportError
|
| 34 |
+
except (ImportError, AttributeError):
|
| 35 |
+
from camel.utils import track_agent
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@track_agent(name="DeductiveReasonerAgent")
|
| 39 |
+
class DeductiveReasonerAgent(ChatAgent):
|
| 40 |
+
r"""An agent responsible for deductive reasoning. Model of deductive
|
| 41 |
+
reasoning:
|
| 42 |
+
- L: A ⊕ C -> q * B
|
| 43 |
+
- A represents the known starting state.
|
| 44 |
+
- B represents the known target state.
|
| 45 |
+
- C represents the conditions required to transition from A to B.
|
| 46 |
+
- Q represents the quality or effectiveness of the transition from
|
| 47 |
+
A to B.
|
| 48 |
+
- L represents the path or process from A to B.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 52 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 53 |
+
`GPT_4O_MINI`)
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(
|
| 57 |
+
self,
|
| 58 |
+
model: Optional[BaseModelBackend] = None,
|
| 59 |
+
) -> None:
|
| 60 |
+
system_message = BaseMessage(
|
| 61 |
+
role_name="Insight Agent",
|
| 62 |
+
role_type=RoleType.ASSISTANT,
|
| 63 |
+
meta_dict=None,
|
| 64 |
+
content="You assign roles based on tasks.",
|
| 65 |
+
)
|
| 66 |
+
super().__init__(system_message, model=model)
|
| 67 |
+
|
| 68 |
+
def deduce_conditions_and_quality(
|
| 69 |
+
self,
|
| 70 |
+
starting_state: str,
|
| 71 |
+
target_state: str,
|
| 72 |
+
role_descriptions_dict: Optional[Dict[str, str]] = None,
|
| 73 |
+
) -> Dict[str, Union[List[str], Dict[str, str]]]:
|
| 74 |
+
r"""Derives the conditions and quality from the starting state and the
|
| 75 |
+
target state based on the model of the deductive reasoning and the
|
| 76 |
+
knowledge base. It can optionally consider the roles involved in the
|
| 77 |
+
scenario, which allows tailoring the output more closely to the AI
|
| 78 |
+
agent's environment.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
starting_state (str): The initial or starting state from which
|
| 82 |
+
conditions are deduced.
|
| 83 |
+
target_state (str): The target state of the task.
|
| 84 |
+
role_descriptions_dict (Optional[Dict[str, str]], optional): The
|
| 85 |
+
descriptions of the roles. (default: :obj:`None`)
|
| 86 |
+
role_descriptions_dict (Optional[Dict[str, str]], optional): A
|
| 87 |
+
dictionary describing the roles involved in the scenario. This
|
| 88 |
+
is optional and can be used to provide a context for the
|
| 89 |
+
CAMEL's role-playing, enabling the generation of more relevant
|
| 90 |
+
and tailored conditions and quality assessments. This could be
|
| 91 |
+
generated using a `RoleAssignmentAgent()` or defined manually
|
| 92 |
+
by the user.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
Dict[str, Union[List[str], Dict[str, str]]]: A dictionary with the
|
| 96 |
+
extracted data from the message. The dictionary contains three
|
| 97 |
+
keys:
|
| 98 |
+
- 'conditions': A list where each key is a condition ID and
|
| 99 |
+
each value is the corresponding condition text.
|
| 100 |
+
- 'labels': A list of label strings extracted from the message.
|
| 101 |
+
- 'quality': A string of quality assessment strings extracted
|
| 102 |
+
from the message.
|
| 103 |
+
"""
|
| 104 |
+
self.reset()
|
| 105 |
+
|
| 106 |
+
deduce_prompt = """You are a deductive reasoner. You are tasked to
|
| 107 |
+
complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the
|
| 108 |
+
STARTING STATE A and the TARGET STATE B. You are given the CONTEXT
|
| 109 |
+
CONTENT to help you complete the TASK.
|
| 110 |
+
Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY
|
| 111 |
+
fill in the BLANKs, and DO NOT alter or modify any other part of the template
|
| 112 |
+
|
| 113 |
+
===== MODELING OF DEDUCTIVE REASONING =====
|
| 114 |
+
You are tasked with understanding a mathematical model based on the components
|
| 115 |
+
${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
|
| 116 |
+
- $A$ represents the known starting state.
|
| 117 |
+
- $B$ represents the known target state.
|
| 118 |
+
- $C$ represents the conditions required to transition from $A$ to $B$.
|
| 119 |
+
- $Q$ represents the quality or effectiveness of the transition from $A$ to
|
| 120 |
+
$B$.
|
| 121 |
+
- $L$ represents the path or process from $A$ to $B$.
|
| 122 |
+
|
| 123 |
+
===== THOUGHT OF DEDUCTIVE REASONING =====
|
| 124 |
+
1. Define the Parameters of A and B:
|
| 125 |
+
- Characterization: Before delving into transitions, thoroughly understand
|
| 126 |
+
the nature and boundaries of both $A$ and $B$. This includes the type,
|
| 127 |
+
properties, constraints, and possible interactions between the two.
|
| 128 |
+
- Contrast and Compare: Highlight the similarities and differences between
|
| 129 |
+
$A$ and $B$. This comparative analysis will give an insight into what
|
| 130 |
+
needs changing and what remains constant.
|
| 131 |
+
2. Historical & Empirical Analysis:
|
| 132 |
+
- Previous Transitions according to the Knowledge Base of GPT: (if
|
| 133 |
+
applicable) Extract conditions and patterns from the historical instances
|
| 134 |
+
where a similar transition from a state comparable to $A$ moved towards
|
| 135 |
+
$B$.
|
| 136 |
+
- Scientific Principles: (if applicable) Consider the underlying
|
| 137 |
+
scientific principles governing or related to the states and their
|
| 138 |
+
transition. For example, if $A$ and $B$ are physical states, laws of
|
| 139 |
+
physics might apply.
|
| 140 |
+
3. Logical Deduction of Conditions ($C$):
|
| 141 |
+
- Direct Path Analysis: What are the immediate and direct conditions
|
| 142 |
+
required to move from $A$ to $B$?
|
| 143 |
+
- Intermediate States: Are there states between $A$ and $B$ that must be
|
| 144 |
+
traversed or can be used to make the transition smoother or more
|
| 145 |
+
efficient? If yes, what is the content?
|
| 146 |
+
- Constraints & Limitations: Identify potential barriers or restrictions
|
| 147 |
+
in moving from $A$ to $B$. These can be external (e.g., environmental
|
| 148 |
+
factors) or internal (properties of $A$ or $B$).
|
| 149 |
+
- Resource and Information Analysis: What resources and information are
|
| 150 |
+
required for the transition? This could be time, entity, factor, code
|
| 151 |
+
language, software platform, unknowns, etc.
|
| 152 |
+
- External Influences: Consider socio-economic, political, or
|
| 153 |
+
environmental factors (if applicable) that could influence the transition
|
| 154 |
+
conditions.
|
| 155 |
+
- Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s,
|
| 156 |
+
no matter how unconventional they might seem. Utilize analogies,
|
| 157 |
+
metaphors, or brainstorming techniques to envision possible conditions or
|
| 158 |
+
paths from $A$ to $B$.
|
| 159 |
+
- The conditions $C$ should be multiple but in one sentence. And each
|
| 160 |
+
condition should be concerned with one aspect/entity.
|
| 161 |
+
4. Entity/Label Recognition of Conditions ($C$):
|
| 162 |
+
- Identify and categorize entities of Conditions ($C$) such as the names,
|
| 163 |
+
locations, dates, specific technical terms or contextual parameters that
|
| 164 |
+
might be associated with events, innovations post-2022.
|
| 165 |
+
- The output of the entities/labels will be used as tags or labels for
|
| 166 |
+
semantic similarity searches. The entities/labels may be the words, or
|
| 167 |
+
phrases, each of them should contain valuable, high information entropy
|
| 168 |
+
information, and should be independent.
|
| 169 |
+
- Ensure that the identified entities are formatted in a manner suitable
|
| 170 |
+
for database indexing and retrieval. Organize the entities into
|
| 171 |
+
categories, and combine the category with its instance into a continuous
|
| 172 |
+
phrase, without using colons or other separators.
|
| 173 |
+
- Format these entities for database indexing: output the category rather
|
| 174 |
+
than its instance/content into a continuous phrase. For example, instead
|
| 175 |
+
of "Jan. 02", identify it as "Event time".
|
| 176 |
+
5. Quality Assessment ($Q$):
|
| 177 |
+
- Efficiency: How efficient is the transition from $A$ to $B$, which
|
| 178 |
+
measures the resources used versus the desired outcome?
|
| 179 |
+
- Effectiveness: Did the transition achieve the desired outcome or was the
|
| 180 |
+
target state achieved as intended?
|
| 181 |
+
- Safety & Risks: Assess any risks associated with the transition and the
|
| 182 |
+
measures to mitigate them.
|
| 183 |
+
- Feedback Mechanisms: Incorporate feedback loops to continuously monitor
|
| 184 |
+
and adjust the quality of transition, making it more adaptive.
|
| 185 |
+
6. Iterative Evaluation:
|
| 186 |
+
- Test & Refine: Based on the initially deduced conditions and assessed
|
| 187 |
+
quality, iterate the process to refine and optimize the transition. This
|
| 188 |
+
might involve tweaking conditions, employing different paths, or changing
|
| 189 |
+
resources.
|
| 190 |
+
- Feedback Integration: Use feedback to make improvements and increase the
|
| 191 |
+
quality of the transition.
|
| 192 |
+
7. Real-world scenarios often present challenges that may not be captured by
|
| 193 |
+
models and frameworks. While using the model, maintain an adaptive mindset:
|
| 194 |
+
- Scenario Exploration: Continuously imagine various possible scenarios,
|
| 195 |
+
both positive and negative, to prepare for unexpected events.
|
| 196 |
+
- Flexibility: Be prepared to modify conditions ($C$) or alter the path/
|
| 197 |
+
process ($L$) if unforeseen challenges arise.
|
| 198 |
+
- Feedback Integration: Rapidly integrate feedback from actual
|
| 199 |
+
implementations to adjust the model's application, ensuring relevancy and
|
| 200 |
+
effectiveness.
|
| 201 |
+
|
| 202 |
+
===== TASK =====
|
| 203 |
+
Given the starting state $A$ and the target state $B$, assuming that a path
|
| 204 |
+
$L$ always exists between $A$ and $B$, how can one deduce or identify the
|
| 205 |
+
necessary conditions $C$ and the quality $Q$ of the transition?
|
| 206 |
+
|
| 207 |
+
===== STARTING STATE $A$ =====
|
| 208 |
+
{starting_state}
|
| 209 |
+
|
| 210 |
+
===== TARGET STATE $B$ =====
|
| 211 |
+
{target_state}
|
| 212 |
+
|
| 213 |
+
{role_with_description_prompt}
|
| 214 |
+
===== ANSWER TEMPLATE =====
|
| 215 |
+
- Characterization and comparison of $A$ and $B$:\n<BLANK>
|
| 216 |
+
- Historical & Empirical Analysis:\n<BLANK>/None
|
| 217 |
+
- Logical Deduction of Conditions ($C$) (multiple conditions can be deduced):
|
| 218 |
+
condition <NUM>:
|
| 219 |
+
<BLANK>.
|
| 220 |
+
- Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include
|
| 221 |
+
square brackets)
|
| 222 |
+
- Quality Assessment ($Q$) (do not use symbols):
|
| 223 |
+
<BLANK>.
|
| 224 |
+
- Iterative Evaluation:\n<BLANK>/None"""
|
| 225 |
+
|
| 226 |
+
if role_descriptions_dict is not None:
|
| 227 |
+
role_names = role_descriptions_dict.keys()
|
| 228 |
+
role_with_description_prompt = (
|
| 229 |
+
"===== ROLES WITH DESCRIPTIONS =====\n"
|
| 230 |
+
+ "\n".join(
|
| 231 |
+
f"{role_name}:\n{role_descriptions_dict[role_name]}\n"
|
| 232 |
+
for role_name in role_names
|
| 233 |
+
)
|
| 234 |
+
+ "\n\n"
|
| 235 |
+
)
|
| 236 |
+
else:
|
| 237 |
+
role_with_description_prompt = ""
|
| 238 |
+
deduce_prompt = TextPrompt(deduce_prompt)
|
| 239 |
+
|
| 240 |
+
deduce = deduce_prompt.format(
|
| 241 |
+
starting_state=starting_state,
|
| 242 |
+
target_state=target_state,
|
| 243 |
+
role_with_description_prompt=role_with_description_prompt,
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
conditions_and_quality_generation_msg = BaseMessage.make_user_message(
|
| 247 |
+
role_name="Deductive Reasoner", content=deduce
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
response = self.step(
|
| 251 |
+
input_message=conditions_and_quality_generation_msg
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
if response.terminated:
|
| 255 |
+
raise RuntimeError(
|
| 256 |
+
"Deduction failed. Error:\n" + f"{response.info}"
|
| 257 |
+
)
|
| 258 |
+
msg: BaseMessage = response.msg
|
| 259 |
+
logger.info(f"Message content:\n{msg.content}")
|
| 260 |
+
|
| 261 |
+
# Extract the conditions from the message
|
| 262 |
+
conditions_dict = {
|
| 263 |
+
f"condition {i}": cdt.replace("<", "")
|
| 264 |
+
.replace(">", "")
|
| 265 |
+
.strip()
|
| 266 |
+
.strip('\n')
|
| 267 |
+
for i, cdt in re.findall(
|
| 268 |
+
r"condition (\d+):\s*(.+?)(?=condition \d+|- Entity)",
|
| 269 |
+
msg.content,
|
| 270 |
+
re.DOTALL,
|
| 271 |
+
)
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
# Extract the labels from the message
|
| 275 |
+
labels = [
|
| 276 |
+
label.strip().strip('\n').strip("\"'")
|
| 277 |
+
for label in re.findall(
|
| 278 |
+
r"Entity/Label Recognition of Conditions:\n\[(.+?)\]",
|
| 279 |
+
msg.content,
|
| 280 |
+
re.DOTALL,
|
| 281 |
+
)[0].split(",")
|
| 282 |
+
]
|
| 283 |
+
|
| 284 |
+
# Extract the quality from the message
|
| 285 |
+
quality = next(
|
| 286 |
+
q.strip().strip('\n')
|
| 287 |
+
for q in re.findall(
|
| 288 |
+
r"Quality Assessment \(\$Q\$\) \(do not use symbols\):"
|
| 289 |
+
r"\n(.+?)- Iterative",
|
| 290 |
+
msg.content,
|
| 291 |
+
re.DOTALL,
|
| 292 |
+
)
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
# Convert them into JSON format
|
| 296 |
+
conditions_and_quality_json: Dict[
|
| 297 |
+
str, Union[List[str], Dict[str, str]]
|
| 298 |
+
] = {}
|
| 299 |
+
conditions_and_quality_json["conditions"] = conditions_dict
|
| 300 |
+
conditions_and_quality_json["labels"] = labels
|
| 301 |
+
conditions_and_quality_json["evaluate_quality"] = quality
|
| 302 |
+
|
| 303 |
+
return conditions_and_quality_json
|
deep-swarm/camel/agents/embodied_agent.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from typing import Any, List, Optional
|
| 15 |
+
|
| 16 |
+
from colorama import Fore
|
| 17 |
+
|
| 18 |
+
from camel.agents.chat_agent import ChatAgent
|
| 19 |
+
from camel.agents.tool_agents.base import BaseToolAgent
|
| 20 |
+
from camel.interpreters import (
|
| 21 |
+
BaseInterpreter,
|
| 22 |
+
InternalPythonInterpreter,
|
| 23 |
+
SubprocessInterpreter,
|
| 24 |
+
)
|
| 25 |
+
from camel.messages import BaseMessage
|
| 26 |
+
from camel.models import BaseModelBackend
|
| 27 |
+
from camel.responses import ChatAgentResponse
|
| 28 |
+
from camel.utils import print_text_animated
|
| 29 |
+
|
| 30 |
+
# AgentOps decorator setting
|
| 31 |
+
try:
|
| 32 |
+
import os
|
| 33 |
+
|
| 34 |
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
| 35 |
+
from agentops import track_agent
|
| 36 |
+
else:
|
| 37 |
+
raise ImportError
|
| 38 |
+
except (ImportError, AttributeError):
|
| 39 |
+
from camel.utils import track_agent
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@track_agent(name="EmbodiedAgent")
|
| 43 |
+
class EmbodiedAgent(ChatAgent):
|
| 44 |
+
r"""Class for managing conversations of CAMEL Embodied Agents.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
system_message (BaseMessage): The system message for the chat agent.
|
| 48 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 49 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 50 |
+
`GPT_4O_MINI`)
|
| 51 |
+
message_window_size (int, optional): The maximum number of previous
|
| 52 |
+
messages to include in the context window. If `None`, no windowing
|
| 53 |
+
is performed. (default: :obj:`None`)
|
| 54 |
+
tool_agents (List[BaseToolAgent], optional): The tools agents to use in
|
| 55 |
+
the embodied agent. (default: :obj:`None`)
|
| 56 |
+
code_interpreter (BaseInterpreter, optional): The code interpreter to
|
| 57 |
+
execute codes. If `code_interpreter` and `tool_agent` are both
|
| 58 |
+
`None`, default to `SubProcessInterpreter`. If `code_interpreter`
|
| 59 |
+
is `None` and `tool_agents` is not `None`, default to
|
| 60 |
+
`InternalPythonInterpreter`. (default: :obj:`None`)
|
| 61 |
+
verbose (bool, optional): Whether to print the critic's messages.
|
| 62 |
+
logger_color (Any): The color of the logger displayed to the user.
|
| 63 |
+
(default: :obj:`Fore.MAGENTA`)
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def __init__(
|
| 67 |
+
self,
|
| 68 |
+
system_message: BaseMessage,
|
| 69 |
+
model: Optional[BaseModelBackend] = None,
|
| 70 |
+
message_window_size: Optional[int] = None,
|
| 71 |
+
tool_agents: Optional[List[BaseToolAgent]] = None,
|
| 72 |
+
code_interpreter: Optional[BaseInterpreter] = None,
|
| 73 |
+
verbose: bool = False,
|
| 74 |
+
logger_color: Any = Fore.MAGENTA,
|
| 75 |
+
) -> None:
|
| 76 |
+
self.tool_agents = tool_agents
|
| 77 |
+
self.code_interpreter: BaseInterpreter
|
| 78 |
+
if code_interpreter is not None:
|
| 79 |
+
self.code_interpreter = code_interpreter
|
| 80 |
+
elif self.tool_agents:
|
| 81 |
+
self.code_interpreter = InternalPythonInterpreter()
|
| 82 |
+
else:
|
| 83 |
+
self.code_interpreter = SubprocessInterpreter()
|
| 84 |
+
|
| 85 |
+
if self.tool_agents:
|
| 86 |
+
system_message = self._set_tool_agents(system_message)
|
| 87 |
+
self.verbose = verbose
|
| 88 |
+
self.logger_color = logger_color
|
| 89 |
+
super().__init__(
|
| 90 |
+
system_message=system_message,
|
| 91 |
+
model=model,
|
| 92 |
+
message_window_size=message_window_size,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
def _set_tool_agents(self, system_message: BaseMessage) -> BaseMessage:
|
| 96 |
+
action_space_prompt = self._get_tool_agents_prompt()
|
| 97 |
+
result_message = system_message.create_new_instance(
|
| 98 |
+
content=system_message.content.format(
|
| 99 |
+
action_space=action_space_prompt
|
| 100 |
+
)
|
| 101 |
+
)
|
| 102 |
+
if self.tool_agents is not None:
|
| 103 |
+
self.code_interpreter.update_action_space(
|
| 104 |
+
{tool.name: tool for tool in self.tool_agents}
|
| 105 |
+
)
|
| 106 |
+
return result_message
|
| 107 |
+
|
| 108 |
+
def _get_tool_agents_prompt(self) -> str:
|
| 109 |
+
r"""Returns the action space prompt.
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
str: The action space prompt.
|
| 113 |
+
"""
|
| 114 |
+
if self.tool_agents is not None:
|
| 115 |
+
return "\n".join(
|
| 116 |
+
[
|
| 117 |
+
f"*** {tool.name} ***:\n {tool.description}"
|
| 118 |
+
for tool in self.tool_agents
|
| 119 |
+
]
|
| 120 |
+
)
|
| 121 |
+
else:
|
| 122 |
+
return ""
|
| 123 |
+
|
| 124 |
+
def get_tool_agent_names(self) -> List[str]:
|
| 125 |
+
r"""Returns the names of tool agents.
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
List[str]: The names of tool agents.
|
| 129 |
+
"""
|
| 130 |
+
if self.tool_agents is not None:
|
| 131 |
+
return [tool.name for tool in self.tool_agents]
|
| 132 |
+
else:
|
| 133 |
+
return []
|
| 134 |
+
|
| 135 |
+
# ruff: noqa: E501
|
| 136 |
+
def step(self, input_message: BaseMessage) -> ChatAgentResponse: # type: ignore[override]
|
| 137 |
+
r"""Performs a step in the conversation.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
input_message (BaseMessage): The input message.
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
ChatAgentResponse: A struct containing the output messages,
|
| 144 |
+
a boolean indicating whether the chat session has terminated,
|
| 145 |
+
and information about the chat session.
|
| 146 |
+
"""
|
| 147 |
+
response = super().step(input_message)
|
| 148 |
+
|
| 149 |
+
if response.msgs is None or len(response.msgs) == 0:
|
| 150 |
+
raise RuntimeError("Got None output messages.")
|
| 151 |
+
if response.terminated:
|
| 152 |
+
raise RuntimeError(f"{self.__class__.__name__} step failed.")
|
| 153 |
+
|
| 154 |
+
# NOTE: Only single output messages are supported
|
| 155 |
+
explanations, codes = response.msg.extract_text_and_code_prompts()
|
| 156 |
+
|
| 157 |
+
if self.verbose:
|
| 158 |
+
for explanation, code in zip(explanations, codes):
|
| 159 |
+
print_text_animated(
|
| 160 |
+
self.logger_color + f"> Explanation:\n{explanation}"
|
| 161 |
+
)
|
| 162 |
+
print_text_animated(self.logger_color + f"> Code:\n{code}")
|
| 163 |
+
|
| 164 |
+
if len(explanations) > len(codes):
|
| 165 |
+
print_text_animated(
|
| 166 |
+
self.logger_color + f"> Explanation:\n{explanations[-1]}"
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
content = response.msg.content
|
| 170 |
+
|
| 171 |
+
if codes is not None:
|
| 172 |
+
try:
|
| 173 |
+
content = "\n> Executed Results:\n"
|
| 174 |
+
for block_idx, code in enumerate(codes):
|
| 175 |
+
executed_output = self.code_interpreter.run(
|
| 176 |
+
code, code.code_type
|
| 177 |
+
)
|
| 178 |
+
content += (
|
| 179 |
+
f"Executing code block {block_idx}: {{\n"
|
| 180 |
+
+ executed_output
|
| 181 |
+
+ "}\n"
|
| 182 |
+
)
|
| 183 |
+
except InterruptedError as e:
|
| 184 |
+
content = (
|
| 185 |
+
f"\n> Running code fail: {e}\n"
|
| 186 |
+
"Please regenerate the code."
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
# TODO: Handle errors
|
| 190 |
+
content = input_message.content + f"\n> Embodied Actions:\n{content}"
|
| 191 |
+
message = BaseMessage(
|
| 192 |
+
input_message.role_name,
|
| 193 |
+
input_message.role_type,
|
| 194 |
+
input_message.meta_dict,
|
| 195 |
+
content,
|
| 196 |
+
)
|
| 197 |
+
return ChatAgentResponse(
|
| 198 |
+
msgs=[message],
|
| 199 |
+
terminated=response.terminated,
|
| 200 |
+
info=response.info,
|
| 201 |
+
)
|
deep-swarm/camel/agents/knowledge_graph_agent.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from typing import TYPE_CHECKING, Optional, Union
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from unstructured.documents.elements import Element
|
| 18 |
+
|
| 19 |
+
from camel.agents import ChatAgent
|
| 20 |
+
from camel.messages import BaseMessage
|
| 21 |
+
from camel.models import BaseModelBackend
|
| 22 |
+
from camel.prompts import TextPrompt
|
| 23 |
+
from camel.storages.graph_storages.graph_element import (
|
| 24 |
+
GraphElement,
|
| 25 |
+
Node,
|
| 26 |
+
Relationship,
|
| 27 |
+
)
|
| 28 |
+
from camel.types import RoleType
|
| 29 |
+
|
| 30 |
+
# AgentOps decorator setting
|
| 31 |
+
try:
|
| 32 |
+
import os
|
| 33 |
+
|
| 34 |
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
| 35 |
+
from agentops import track_agent
|
| 36 |
+
else:
|
| 37 |
+
raise ImportError
|
| 38 |
+
except (ImportError, AttributeError):
|
| 39 |
+
from camel.utils import track_agent
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
text_prompt = """
|
| 43 |
+
You are tasked with extracting nodes and relationships from given content and
|
| 44 |
+
structures them into Node and Relationship objects. Here's the outline of what
|
| 45 |
+
you needs to do:
|
| 46 |
+
|
| 47 |
+
Content Extraction:
|
| 48 |
+
You should be able to process input content and identify entities mentioned
|
| 49 |
+
within it.
|
| 50 |
+
Entities can be any noun phrases or concepts that represent distinct entities
|
| 51 |
+
in the context of the given content.
|
| 52 |
+
|
| 53 |
+
Node Extraction:
|
| 54 |
+
For each identified entity, you should create a Node object.
|
| 55 |
+
Each Node object should have a unique identifier (id) and a type (type).
|
| 56 |
+
Additional properties associated with the node can also be extracted and
|
| 57 |
+
stored.
|
| 58 |
+
|
| 59 |
+
Relationship Extraction:
|
| 60 |
+
You should identify relationships between entities mentioned in the content.
|
| 61 |
+
For each relationship, create a Relationship object.
|
| 62 |
+
A Relationship object should have a subject (subj) and an object (obj) which
|
| 63 |
+
are Node objects representing the entities involved in the relationship.
|
| 64 |
+
Each relationship should also have a type (type), and additional properties if
|
| 65 |
+
applicable.
|
| 66 |
+
|
| 67 |
+
Output Formatting:
|
| 68 |
+
The extracted nodes and relationships should be formatted as instances of the
|
| 69 |
+
provided Node and Relationship classes.
|
| 70 |
+
Ensure that the extracted data adheres to the structure defined by the classes.
|
| 71 |
+
Output the structured data in a format that can be easily validated against
|
| 72 |
+
the provided code.
|
| 73 |
+
|
| 74 |
+
Instructions for you:
|
| 75 |
+
Read the provided content thoroughly.
|
| 76 |
+
Identify distinct entities mentioned in the content and categorize them as
|
| 77 |
+
nodes.
|
| 78 |
+
Determine relationships between these entities and represent them as directed
|
| 79 |
+
relationships.
|
| 80 |
+
Provide the extracted nodes and relationships in the specified format below.
|
| 81 |
+
Example for you:
|
| 82 |
+
|
| 83 |
+
Example Content:
|
| 84 |
+
"John works at XYZ Corporation. He is a software engineer. The company is
|
| 85 |
+
located in New York City."
|
| 86 |
+
|
| 87 |
+
Expected Output:
|
| 88 |
+
|
| 89 |
+
Nodes:
|
| 90 |
+
|
| 91 |
+
Node(id='John', type='Person')
|
| 92 |
+
Node(id='XYZ Corporation', type='Organization')
|
| 93 |
+
Node(id='New York City', type='Location')
|
| 94 |
+
|
| 95 |
+
Relationships:
|
| 96 |
+
|
| 97 |
+
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
|
| 98 |
+
Corporation', type='Organization'), type='WorksAt')
|
| 99 |
+
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
|
| 100 |
+
type='Location'), type='ResidesIn')
|
| 101 |
+
|
| 102 |
+
===== TASK =====
|
| 103 |
+
Please extracts nodes and relationships from given content and structures them
|
| 104 |
+
into Node and Relationship objects.
|
| 105 |
+
|
| 106 |
+
{task}
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@track_agent(name="KnowledgeGraphAgent")
|
| 111 |
+
class KnowledgeGraphAgent(ChatAgent):
|
| 112 |
+
r"""An agent that can extract node and relationship information for
|
| 113 |
+
different entities from given `Element` content.
|
| 114 |
+
|
| 115 |
+
Attributes:
|
| 116 |
+
task_prompt (TextPrompt): A prompt for the agent to extract node and
|
| 117 |
+
relationship information for different entities.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
def __init__(
|
| 121 |
+
self,
|
| 122 |
+
model: Optional[BaseModelBackend] = None,
|
| 123 |
+
) -> None:
|
| 124 |
+
r"""Initialize the `KnowledgeGraphAgent`.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 128 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 129 |
+
`GPT_4O_MINI`)
|
| 130 |
+
"""
|
| 131 |
+
system_message = BaseMessage(
|
| 132 |
+
role_name="Graphify",
|
| 133 |
+
role_type=RoleType.ASSISTANT,
|
| 134 |
+
meta_dict=None,
|
| 135 |
+
content="Your mission is to transform unstructured content "
|
| 136 |
+
"into structured graph data. Extract nodes and relationships with "
|
| 137 |
+
"precision, and let the connections unfold. Your graphs will "
|
| 138 |
+
"illuminate the hidden connections within the chaos of "
|
| 139 |
+
"information.",
|
| 140 |
+
)
|
| 141 |
+
super().__init__(system_message, model=model)
|
| 142 |
+
|
| 143 |
+
def run(
|
| 144 |
+
self,
|
| 145 |
+
element: "Element",
|
| 146 |
+
parse_graph_elements: bool = False,
|
| 147 |
+
) -> Union[str, GraphElement]:
|
| 148 |
+
r"""Run the agent to extract node and relationship information.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
element (Element): The input element.
|
| 152 |
+
parse_graph_elements (bool, optional): Whether to parse into
|
| 153 |
+
`GraphElement`. Defaults to `False`.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
Union[str, GraphElement]: The extracted node and relationship
|
| 157 |
+
information. If `parse_graph_elements` is `True` then return
|
| 158 |
+
`GraphElement`, else return `str`.
|
| 159 |
+
"""
|
| 160 |
+
self.reset()
|
| 161 |
+
self.element = element
|
| 162 |
+
|
| 163 |
+
knowledge_graph_prompt = TextPrompt(text_prompt)
|
| 164 |
+
knowledge_graph_generation = knowledge_graph_prompt.format(
|
| 165 |
+
task=str(element)
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
knowledge_graph_generation_msg = BaseMessage.make_user_message(
|
| 169 |
+
role_name="Graphify", content=knowledge_graph_generation
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
response = self.step(input_message=knowledge_graph_generation_msg)
|
| 173 |
+
|
| 174 |
+
content = response.msg.content
|
| 175 |
+
|
| 176 |
+
if parse_graph_elements:
|
| 177 |
+
content = self._parse_graph_elements(content)
|
| 178 |
+
|
| 179 |
+
return content
|
| 180 |
+
|
| 181 |
+
def _validate_node(self, node: Node) -> bool:
|
| 182 |
+
r"""Validate if the object is a valid Node.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
node (Node): Object to be validated.
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
bool: True if the object is a valid Node, False otherwise.
|
| 189 |
+
"""
|
| 190 |
+
return (
|
| 191 |
+
isinstance(node, Node)
|
| 192 |
+
and isinstance(node.id, (str, int))
|
| 193 |
+
and isinstance(node.type, str)
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
def _validate_relationship(self, relationship: Relationship) -> bool:
|
| 197 |
+
r"""Validate if the object is a valid Relationship.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
relationship (Relationship): Object to be validated.
|
| 201 |
+
|
| 202 |
+
Returns:
|
| 203 |
+
bool: True if the object is a valid Relationship, False otherwise.
|
| 204 |
+
"""
|
| 205 |
+
return (
|
| 206 |
+
isinstance(relationship, Relationship)
|
| 207 |
+
and self._validate_node(relationship.subj)
|
| 208 |
+
and self._validate_node(relationship.obj)
|
| 209 |
+
and isinstance(relationship.type, str)
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
def _parse_graph_elements(self, input_string: str) -> GraphElement:
|
| 213 |
+
r"""Parses graph elements from given content.
|
| 214 |
+
|
| 215 |
+
Args:
|
| 216 |
+
input_string (str): The input content.
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
GraphElement: The parsed graph elements.
|
| 220 |
+
"""
|
| 221 |
+
import re
|
| 222 |
+
|
| 223 |
+
# Regular expressions to extract nodes and relationships
|
| 224 |
+
node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
|
| 225 |
+
rel_pattern = (
|
| 226 |
+
r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
|
| 227 |
+
r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
nodes = {}
|
| 231 |
+
relationships = []
|
| 232 |
+
|
| 233 |
+
# Extract nodes
|
| 234 |
+
for match in re.finditer(node_pattern, input_string):
|
| 235 |
+
id, type = match.groups()
|
| 236 |
+
properties = {'source': 'agent_created'}
|
| 237 |
+
if id not in nodes:
|
| 238 |
+
node = Node(id=id, type=type, properties=properties)
|
| 239 |
+
if self._validate_node(node):
|
| 240 |
+
nodes[id] = node
|
| 241 |
+
|
| 242 |
+
# Extract relationships
|
| 243 |
+
for match in re.finditer(rel_pattern, input_string):
|
| 244 |
+
subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
|
| 245 |
+
properties = {'source': 'agent_created'}
|
| 246 |
+
if subj_id in nodes and obj_id in nodes:
|
| 247 |
+
subj = nodes[subj_id]
|
| 248 |
+
obj = nodes[obj_id]
|
| 249 |
+
relationship = Relationship(
|
| 250 |
+
subj=subj, obj=obj, type=rel_type, properties=properties
|
| 251 |
+
)
|
| 252 |
+
if self._validate_relationship(relationship):
|
| 253 |
+
relationships.append(relationship)
|
| 254 |
+
|
| 255 |
+
return GraphElement(
|
| 256 |
+
nodes=list(nodes.values()),
|
| 257 |
+
relationships=relationships,
|
| 258 |
+
source=self.element,
|
| 259 |
+
)
|
deep-swarm/camel/agents/role_assignment_agent.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
import re
|
| 15 |
+
from typing import Dict, Optional, Union
|
| 16 |
+
|
| 17 |
+
from camel.agents.chat_agent import ChatAgent
|
| 18 |
+
from camel.messages import BaseMessage
|
| 19 |
+
from camel.models import BaseModelBackend
|
| 20 |
+
from camel.prompts import TextPrompt
|
| 21 |
+
from camel.types import RoleType
|
| 22 |
+
|
| 23 |
+
# AgentOps decorator setting
|
| 24 |
+
try:
|
| 25 |
+
import os
|
| 26 |
+
|
| 27 |
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
| 28 |
+
from agentops import track_agent
|
| 29 |
+
else:
|
| 30 |
+
raise ImportError
|
| 31 |
+
except (ImportError, AttributeError):
|
| 32 |
+
from camel.utils import track_agent
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@track_agent(name="RoleAssignmentAgent")
|
| 36 |
+
class RoleAssignmentAgent(ChatAgent):
|
| 37 |
+
r"""An agent that generates role names based on the task prompt.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 41 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 42 |
+
`GPT_4O_MINI`)
|
| 43 |
+
|
| 44 |
+
Attributes:
|
| 45 |
+
role_assignment_prompt (TextPrompt): A prompt for the agent to generate
|
| 46 |
+
role names.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
model: Optional[BaseModelBackend] = None,
|
| 52 |
+
) -> None:
|
| 53 |
+
system_message = BaseMessage(
|
| 54 |
+
role_name="Role Assigner",
|
| 55 |
+
role_type=RoleType.ASSISTANT,
|
| 56 |
+
meta_dict=None,
|
| 57 |
+
content="You assign roles based on tasks.",
|
| 58 |
+
)
|
| 59 |
+
super().__init__(system_message, model=model)
|
| 60 |
+
|
| 61 |
+
def run(
|
| 62 |
+
self,
|
| 63 |
+
task_prompt: Union[str, TextPrompt],
|
| 64 |
+
num_roles: int = 2,
|
| 65 |
+
) -> Dict[str, str]:
|
| 66 |
+
r"""Generate role names based on the input task prompt.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
task_prompt (Union[str, TextPrompt]): The prompt
|
| 70 |
+
for the task based on which the roles are to be generated.
|
| 71 |
+
num_roles (int, optional): The number of roles to generate.
|
| 72 |
+
(default: :obj:`2`)
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
Dict[str, str]: A dictionary mapping role names to their
|
| 76 |
+
descriptions.
|
| 77 |
+
"""
|
| 78 |
+
self.reset()
|
| 79 |
+
|
| 80 |
+
expert_prompt = "===== ANSWER PROMPT =====\n" + "\n".join(
|
| 81 |
+
f"Domain expert {i + 1}: <BLANK>\n"
|
| 82 |
+
f"Associated competencies, characteristics, duties "
|
| 83 |
+
f"and workflows: <BLANK>. End."
|
| 84 |
+
for i in range(num_roles or 0)
|
| 85 |
+
)
|
| 86 |
+
role_assignment_generation_prompt = TextPrompt(
|
| 87 |
+
"You are a role assignment agent, and you're in charge of "
|
| 88 |
+
+ "recruiting {num_roles} experts for the following task."
|
| 89 |
+
+ "\n==== TASK =====\n {task}\n\n"
|
| 90 |
+
+ "Identify the domain experts you'd recruit and detail their "
|
| 91 |
+
+ "associated competencies, characteristics, duties and workflows "
|
| 92 |
+
+ "to complete the task.\n "
|
| 93 |
+
+ "Your answer MUST adhere to the format of ANSWER PROMPT, and "
|
| 94 |
+
+ "ONLY answer the BLANKs.\n"
|
| 95 |
+
+ expert_prompt
|
| 96 |
+
)
|
| 97 |
+
role_assignment_generation = role_assignment_generation_prompt.format(
|
| 98 |
+
num_roles=num_roles, task=task_prompt
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
role_assignment_generation_msg = BaseMessage.make_user_message(
|
| 102 |
+
role_name="Role Assigner", content=role_assignment_generation
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
response = self.step(input_message=role_assignment_generation_msg)
|
| 106 |
+
|
| 107 |
+
msg = response.msg # type: BaseMessage
|
| 108 |
+
terminated = response.terminated
|
| 109 |
+
|
| 110 |
+
# Distribute the output completions into role names and descriptions
|
| 111 |
+
role_names = [
|
| 112 |
+
desc.replace("<|", "").replace("|>", "")
|
| 113 |
+
for desc in re.findall(
|
| 114 |
+
r"Domain expert \d: (.+?)\nAssociated competencies,",
|
| 115 |
+
msg.content,
|
| 116 |
+
re.DOTALL,
|
| 117 |
+
)
|
| 118 |
+
]
|
| 119 |
+
role_descriptions = [
|
| 120 |
+
desc.replace("<|", "").replace("|>", "")
|
| 121 |
+
for desc in re.findall(
|
| 122 |
+
r"Associated competencies, characteristics, "
|
| 123 |
+
r"duties and workflows: (.+?) End.",
|
| 124 |
+
msg.content,
|
| 125 |
+
re.DOTALL,
|
| 126 |
+
)
|
| 127 |
+
]
|
| 128 |
+
|
| 129 |
+
if len(role_names) != num_roles or len(role_descriptions) != num_roles:
|
| 130 |
+
raise RuntimeError(
|
| 131 |
+
"Got None or insufficient information of roles."
|
| 132 |
+
)
|
| 133 |
+
if terminated:
|
| 134 |
+
raise RuntimeError("Role assignment failed.")
|
| 135 |
+
|
| 136 |
+
role_descriptions_dict = {
|
| 137 |
+
role_name: description
|
| 138 |
+
for role_name, description in zip(role_names, role_descriptions)
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
return role_descriptions_dict
|
deep-swarm/camel/agents/search_agent.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from typing import Optional
|
| 15 |
+
|
| 16 |
+
from camel.agents.chat_agent import ChatAgent
|
| 17 |
+
from camel.messages import BaseMessage
|
| 18 |
+
from camel.models import BaseModelBackend
|
| 19 |
+
from camel.prompts import TextPrompt
|
| 20 |
+
from camel.types import RoleType
|
| 21 |
+
from camel.utils import create_chunks
|
| 22 |
+
|
| 23 |
+
# AgentOps decorator setting
|
| 24 |
+
try:
|
| 25 |
+
import os
|
| 26 |
+
|
| 27 |
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
| 28 |
+
from agentops import track_agent
|
| 29 |
+
else:
|
| 30 |
+
raise ImportError
|
| 31 |
+
except (ImportError, AttributeError):
|
| 32 |
+
from camel.utils import track_agent
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@track_agent(name="SearchAgent")
|
| 36 |
+
class SearchAgent(ChatAgent):
|
| 37 |
+
r"""An agent that summarizes text based on a query and evaluates the
|
| 38 |
+
relevance of an answer.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 42 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 43 |
+
`GPT_4O_MINI`)
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(
|
| 47 |
+
self,
|
| 48 |
+
model: Optional[BaseModelBackend] = None,
|
| 49 |
+
) -> None:
|
| 50 |
+
system_message = BaseMessage(
|
| 51 |
+
role_name="Assistant",
|
| 52 |
+
role_type=RoleType.ASSISTANT,
|
| 53 |
+
meta_dict=None,
|
| 54 |
+
content="You are a helpful assistant.",
|
| 55 |
+
)
|
| 56 |
+
super().__init__(system_message, model=model)
|
| 57 |
+
|
| 58 |
+
def summarize_text(self, text: str, query: str) -> str:
|
| 59 |
+
r"""Summarize the information from the text, base on the query.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
text (str): Text to summarize.
|
| 63 |
+
query (str): What information you want.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
str: Strings with information.
|
| 67 |
+
"""
|
| 68 |
+
self.reset()
|
| 69 |
+
|
| 70 |
+
summary_prompt = TextPrompt(
|
| 71 |
+
'''Gather information from this text that relative to the
|
| 72 |
+
question, but do not directly answer the question.\nquestion:
|
| 73 |
+
{query}\ntext '''
|
| 74 |
+
)
|
| 75 |
+
summary_prompt = summary_prompt.format(query=query)
|
| 76 |
+
# Max length of each chunk
|
| 77 |
+
max_len = 3000
|
| 78 |
+
results = ""
|
| 79 |
+
chunks = create_chunks(text, max_len)
|
| 80 |
+
# Summarize
|
| 81 |
+
for i, chunk in enumerate(chunks, start=1):
|
| 82 |
+
prompt = summary_prompt + str(i) + ": " + chunk
|
| 83 |
+
user_msg = BaseMessage.make_user_message(
|
| 84 |
+
role_name="User",
|
| 85 |
+
content=prompt,
|
| 86 |
+
)
|
| 87 |
+
result = self.step(user_msg).msg.content
|
| 88 |
+
results += result + "\n"
|
| 89 |
+
|
| 90 |
+
# Final summarization
|
| 91 |
+
final_prompt = TextPrompt(
|
| 92 |
+
'''Here are some summarized texts which split from one text. Using
|
| 93 |
+
the information to answer the question. If can't find the answer,
|
| 94 |
+
you must answer "I can not find the answer to the query" and
|
| 95 |
+
explain why.\n Query:\n{query}.\n\nText:\n'''
|
| 96 |
+
)
|
| 97 |
+
final_prompt = final_prompt.format(query=query)
|
| 98 |
+
prompt = final_prompt + results
|
| 99 |
+
|
| 100 |
+
user_msg = BaseMessage.make_user_message(
|
| 101 |
+
role_name="User",
|
| 102 |
+
content=prompt,
|
| 103 |
+
)
|
| 104 |
+
response = self.step(user_msg).msg.content
|
| 105 |
+
|
| 106 |
+
return response
|
| 107 |
+
|
| 108 |
+
def continue_search(self, query: str, answer: str) -> bool:
|
| 109 |
+
r"""Ask whether to continue search or not based on the provided answer.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
query (str): The question.
|
| 113 |
+
answer (str): The answer to the question.
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
bool: `True` if the user want to continue search, `False`
|
| 117 |
+
otherwise.
|
| 118 |
+
"""
|
| 119 |
+
prompt = TextPrompt(
|
| 120 |
+
"Do you think the ANSWER can answer the QUERY? "
|
| 121 |
+
"Use only 'yes' or 'no' to answer.\n"
|
| 122 |
+
"===== QUERY =====\n{query}\n\n"
|
| 123 |
+
"===== ANSWER =====\n{answer}"
|
| 124 |
+
)
|
| 125 |
+
prompt = prompt.format(query=query, answer=answer)
|
| 126 |
+
user_msg = BaseMessage.make_user_message(
|
| 127 |
+
role_name="User",
|
| 128 |
+
content=prompt,
|
| 129 |
+
)
|
| 130 |
+
response = self.step(user_msg).msg.content
|
| 131 |
+
if "yes" in str(response).lower():
|
| 132 |
+
return False
|
| 133 |
+
return True
|
deep-swarm/camel/agents/task_agent.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from typing import Any, Dict, List, Optional, Union
|
| 15 |
+
|
| 16 |
+
from camel.agents.chat_agent import ChatAgent
|
| 17 |
+
from camel.messages import BaseMessage
|
| 18 |
+
from camel.models import BaseModelBackend
|
| 19 |
+
from camel.prompts import PromptTemplateGenerator, TextPrompt
|
| 20 |
+
from camel.types import RoleType, TaskType
|
| 21 |
+
from camel.utils import get_task_list
|
| 22 |
+
|
| 23 |
+
# AgentOps decorator setting
|
| 24 |
+
try:
|
| 25 |
+
import os
|
| 26 |
+
|
| 27 |
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
| 28 |
+
from agentops import track_agent
|
| 29 |
+
else:
|
| 30 |
+
raise ImportError
|
| 31 |
+
except (ImportError, AttributeError):
|
| 32 |
+
from camel.utils import track_agent
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@track_agent(name="TaskSpecifyAgent")
|
| 36 |
+
class TaskSpecifyAgent(ChatAgent):
|
| 37 |
+
r"""An agent that specifies a given task prompt by prompting the user to
|
| 38 |
+
provide more details.
|
| 39 |
+
|
| 40 |
+
Attributes:
|
| 41 |
+
DEFAULT_WORD_LIMIT (int): The default word limit for the task prompt.
|
| 42 |
+
task_specify_prompt (TextPrompt): The prompt for specifying the task.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 46 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 47 |
+
`GPT_4O_MINI`)
|
| 48 |
+
task_type (TaskType, optional): The type of task for which to generate
|
| 49 |
+
a prompt. (default: :obj:`TaskType.AI_SOCIETY`)
|
| 50 |
+
task_specify_prompt (Union[str, TextPrompt], optional): The prompt for
|
| 51 |
+
specifying the task. (default: :obj:`None`)
|
| 52 |
+
word_limit (int, optional): The word limit for the task prompt.
|
| 53 |
+
(default: :obj:`50`)
|
| 54 |
+
output_language (str, optional): The language to be output by the
|
| 55 |
+
agent. (default: :obj:`None`)
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
DEFAULT_WORD_LIMIT = 50
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
model: Optional[BaseModelBackend] = None,
|
| 63 |
+
task_type: TaskType = TaskType.AI_SOCIETY,
|
| 64 |
+
task_specify_prompt: Optional[Union[str, TextPrompt]] = None,
|
| 65 |
+
word_limit: int = DEFAULT_WORD_LIMIT,
|
| 66 |
+
output_language: Optional[str] = None,
|
| 67 |
+
) -> None:
|
| 68 |
+
self.task_specify_prompt: Union[str, TextPrompt]
|
| 69 |
+
if task_specify_prompt is None:
|
| 70 |
+
task_specify_prompt_template = (
|
| 71 |
+
PromptTemplateGenerator().get_task_specify_prompt(task_type)
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
self.task_specify_prompt = task_specify_prompt_template.format(
|
| 75 |
+
word_limit=word_limit
|
| 76 |
+
)
|
| 77 |
+
else:
|
| 78 |
+
self.task_specify_prompt = TextPrompt(task_specify_prompt)
|
| 79 |
+
|
| 80 |
+
system_message = BaseMessage(
|
| 81 |
+
role_name="Task Specifier",
|
| 82 |
+
role_type=RoleType.ASSISTANT,
|
| 83 |
+
meta_dict=None,
|
| 84 |
+
content="You can make a task more specific.",
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
super().__init__(
|
| 88 |
+
system_message,
|
| 89 |
+
model=model,
|
| 90 |
+
output_language=output_language,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
def run(
|
| 94 |
+
self,
|
| 95 |
+
task_prompt: Union[str, TextPrompt],
|
| 96 |
+
meta_dict: Optional[Dict[str, Any]] = None,
|
| 97 |
+
) -> TextPrompt:
|
| 98 |
+
r"""Specify the given task prompt by providing more details.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
task_prompt (Union[str, TextPrompt]): The original task
|
| 102 |
+
prompt.
|
| 103 |
+
meta_dict (Dict[str, Any], optional): A dictionary containing
|
| 104 |
+
additional information to include in the prompt.
|
| 105 |
+
(default: :obj:`None`)
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
TextPrompt: The specified task prompt.
|
| 109 |
+
"""
|
| 110 |
+
self.reset()
|
| 111 |
+
task_specify_prompt = self.task_specify_prompt.format(task=task_prompt)
|
| 112 |
+
|
| 113 |
+
if meta_dict is not None:
|
| 114 |
+
task_specify_prompt = task_specify_prompt.format(**meta_dict)
|
| 115 |
+
task_msg = BaseMessage.make_user_message(
|
| 116 |
+
role_name="Task Specifier", content=task_specify_prompt
|
| 117 |
+
)
|
| 118 |
+
specifier_response = self.step(task_msg)
|
| 119 |
+
|
| 120 |
+
if specifier_response.terminated:
|
| 121 |
+
raise RuntimeError("Task specification failed.")
|
| 122 |
+
if len(specifier_response.msgs) == 0:
|
| 123 |
+
raise RuntimeError("Got no specification message.")
|
| 124 |
+
|
| 125 |
+
specified_task_msg = specifier_response.msgs[0]
|
| 126 |
+
|
| 127 |
+
return TextPrompt(specified_task_msg.content)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@track_agent(name="TaskPlannerAgent")
|
| 131 |
+
class TaskPlannerAgent(ChatAgent):
|
| 132 |
+
r"""An agent that helps divide a task into subtasks based on the input
|
| 133 |
+
task prompt.
|
| 134 |
+
|
| 135 |
+
Attributes:
|
| 136 |
+
task_planner_prompt (TextPrompt): A prompt for the agent to divide
|
| 137 |
+
the task into subtasks.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
model (BaseModelBackend, optional): The model backend to use for
|
| 141 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 142 |
+
`GPT_4O_MINI`)
|
| 143 |
+
output_language (str, optional): The language to be output by the
|
| 144 |
+
agent. (default: :obj:`None`)
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
def __init__(
|
| 148 |
+
self,
|
| 149 |
+
model: Optional[BaseModelBackend] = None,
|
| 150 |
+
output_language: Optional[str] = None,
|
| 151 |
+
) -> None:
|
| 152 |
+
self.task_planner_prompt = TextPrompt(
|
| 153 |
+
"Divide this task into subtasks: {task}. Be concise."
|
| 154 |
+
)
|
| 155 |
+
system_message = BaseMessage(
|
| 156 |
+
role_name="Task Planner",
|
| 157 |
+
role_type=RoleType.ASSISTANT,
|
| 158 |
+
meta_dict=None,
|
| 159 |
+
content="You are a helpful task planner.",
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
super().__init__(
|
| 163 |
+
system_message,
|
| 164 |
+
model=model,
|
| 165 |
+
output_language=output_language,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
def run(
|
| 169 |
+
self,
|
| 170 |
+
task_prompt: Union[str, TextPrompt],
|
| 171 |
+
) -> TextPrompt:
|
| 172 |
+
r"""Generate subtasks based on the input task prompt.
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
task_prompt (Union[str, TextPrompt]): The prompt for the task to
|
| 176 |
+
be divided into subtasks.
|
| 177 |
+
|
| 178 |
+
Returns:
|
| 179 |
+
TextPrompt: A prompt for the subtasks generated by the agent.
|
| 180 |
+
"""
|
| 181 |
+
# TODO: Maybe include roles information.
|
| 182 |
+
self.reset()
|
| 183 |
+
task_planner_prompt = self.task_planner_prompt.format(task=task_prompt)
|
| 184 |
+
|
| 185 |
+
task_msg = BaseMessage.make_user_message(
|
| 186 |
+
role_name="Task Planner", content=task_planner_prompt
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
task_response = self.step(task_msg)
|
| 190 |
+
|
| 191 |
+
if task_response.terminated:
|
| 192 |
+
raise RuntimeError("Task planning failed.")
|
| 193 |
+
if len(task_response.msgs) == 0:
|
| 194 |
+
raise RuntimeError("Got no task planning message.")
|
| 195 |
+
|
| 196 |
+
sub_tasks_msg = task_response.msgs[0]
|
| 197 |
+
return TextPrompt(sub_tasks_msg.content)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@track_agent(name="TaskCreationAgent")
|
| 201 |
+
class TaskCreationAgent(ChatAgent):
|
| 202 |
+
r"""An agent that helps create new tasks based on the objective
|
| 203 |
+
and last completed task. Compared to :obj:`TaskPlannerAgent`,
|
| 204 |
+
it's still a task planner, but it has more context information
|
| 205 |
+
like last task and incomplete task list. Modified from
|
| 206 |
+
`BabyAGI <https://github.com/yoheinakajima/babyagi>`_.
|
| 207 |
+
|
| 208 |
+
Attributes:
|
| 209 |
+
task_creation_prompt (TextPrompt): A prompt for the agent to
|
| 210 |
+
create new tasks.
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
role_name (str): The role name of the Agent to create the task.
|
| 214 |
+
objective (Union[str, TextPrompt]): The objective of the Agent to
|
| 215 |
+
perform the task.
|
| 216 |
+
model (BaseModelBackend, optional): The LLM backend to use for
|
| 217 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 218 |
+
`GPT_4O_MINI`)
|
| 219 |
+
output_language (str, optional): The language to be output by the
|
| 220 |
+
agent. (default: :obj:`None`)
|
| 221 |
+
message_window_size (int, optional): The maximum number of previous
|
| 222 |
+
messages to include in the context window. If `None`, no windowing
|
| 223 |
+
is performed. (default: :obj:`None`)
|
| 224 |
+
max_task_num (int, optional): The maximum number of planned
|
| 225 |
+
tasks in one round. (default: :obj:3)
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
def __init__(
|
| 229 |
+
self,
|
| 230 |
+
role_name: str,
|
| 231 |
+
objective: Union[str, TextPrompt],
|
| 232 |
+
model: Optional[BaseModelBackend] = None,
|
| 233 |
+
output_language: Optional[str] = None,
|
| 234 |
+
message_window_size: Optional[int] = None,
|
| 235 |
+
max_task_num: Optional[int] = 3,
|
| 236 |
+
) -> None:
|
| 237 |
+
task_creation_prompt = TextPrompt(
|
| 238 |
+
"""Create new a task with the following objective: {objective}.
|
| 239 |
+
Never forget you are a Task Creator of {role_name}.
|
| 240 |
+
You must instruct me based on my expertise and your needs to solve the task.
|
| 241 |
+
You should consider past solved tasks and in-progress tasks: {task_list}.
|
| 242 |
+
The new created tasks must not overlap with these past tasks.
|
| 243 |
+
The result must be a numbered list in the format:
|
| 244 |
+
|
| 245 |
+
#. First Task
|
| 246 |
+
#. Second Task
|
| 247 |
+
#. Third Task
|
| 248 |
+
|
| 249 |
+
You can only give me up to {max_task_num} tasks at a time. \
|
| 250 |
+
Each task should be concise, concrete and doable for a {role_name}.
|
| 251 |
+
You should make task plan and not ask me questions.
|
| 252 |
+
If you think no new tasks are needed right now, write "No tasks to add."
|
| 253 |
+
Now start to give me new tasks one by one. No more than three tasks.
|
| 254 |
+
Be concrete.
|
| 255 |
+
"""
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
self.task_creation_prompt = task_creation_prompt.format(
|
| 259 |
+
objective=objective, role_name=role_name, max_task_num=max_task_num
|
| 260 |
+
)
|
| 261 |
+
self.objective = objective
|
| 262 |
+
|
| 263 |
+
system_message = BaseMessage(
|
| 264 |
+
role_name="Task Creator",
|
| 265 |
+
role_type=RoleType.ASSISTANT,
|
| 266 |
+
meta_dict=None,
|
| 267 |
+
content="You are a helpful task creator.",
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
super().__init__(
|
| 271 |
+
system_message,
|
| 272 |
+
model=model,
|
| 273 |
+
output_language=output_language,
|
| 274 |
+
message_window_size=message_window_size,
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
def run(
|
| 278 |
+
self,
|
| 279 |
+
task_list: List[str],
|
| 280 |
+
) -> List[str]:
|
| 281 |
+
r"""Generate subtasks based on the previous task results and
|
| 282 |
+
incomplete task list.
|
| 283 |
+
|
| 284 |
+
Args:
|
| 285 |
+
task_list (List[str]): The completed or in-progress
|
| 286 |
+
tasks which should not overlap with new created tasks.
|
| 287 |
+
|
| 288 |
+
Returns:
|
| 289 |
+
List[str]: The new task list generated by the Agent.
|
| 290 |
+
"""
|
| 291 |
+
|
| 292 |
+
if len(task_list) > 0:
|
| 293 |
+
task_creation_prompt = self.task_creation_prompt.format(
|
| 294 |
+
task_list=task_list
|
| 295 |
+
)
|
| 296 |
+
else:
|
| 297 |
+
task_creation_prompt = self.task_creation_prompt.format(
|
| 298 |
+
task_list=""
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
task_msg = BaseMessage.make_user_message(
|
| 302 |
+
role_name="Task Creator", content=task_creation_prompt
|
| 303 |
+
)
|
| 304 |
+
task_response = self.step(task_msg)
|
| 305 |
+
|
| 306 |
+
if task_response.terminated:
|
| 307 |
+
raise RuntimeError("Task creation failed.")
|
| 308 |
+
if len(task_response.msgs) == 0:
|
| 309 |
+
raise RuntimeError("Got no task creation message.")
|
| 310 |
+
|
| 311 |
+
sub_tasks_msg = task_response.msgs[0]
|
| 312 |
+
return get_task_list(sub_tasks_msg.content)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
@track_agent(name="TaskPrioritizationAgent")
|
| 316 |
+
class TaskPrioritizationAgent(ChatAgent):
|
| 317 |
+
r"""An agent that helps re-prioritize the task list and
|
| 318 |
+
returns numbered prioritized list. Modified from
|
| 319 |
+
`BabyAGI <https://github.com/yoheinakajima/babyagi>`_.
|
| 320 |
+
|
| 321 |
+
Attributes:
|
| 322 |
+
task_prioritization_prompt (TextPrompt): A prompt for the agent to
|
| 323 |
+
prioritize tasks.
|
| 324 |
+
|
| 325 |
+
Args:
|
| 326 |
+
objective (Union[str, TextPrompt]): The objective of the Agent to
|
| 327 |
+
perform the task.
|
| 328 |
+
model (BaseModelBackend, optional): The LLM backend to use for
|
| 329 |
+
generating responses. (default: :obj:`OpenAIModel` with
|
| 330 |
+
`GPT_4O_MINI`)
|
| 331 |
+
output_language (str, optional): The language to be output by the
|
| 332 |
+
agent. (default: :obj:`None`)
|
| 333 |
+
message_window_size (int, optional): The maximum number of previous
|
| 334 |
+
messages to include in the context window. If `None`, no windowing
|
| 335 |
+
is performed. (default: :obj:`None`)
|
| 336 |
+
"""
|
| 337 |
+
|
| 338 |
+
def __init__(
|
| 339 |
+
self,
|
| 340 |
+
objective: Union[str, TextPrompt],
|
| 341 |
+
model: Optional[BaseModelBackend] = None,
|
| 342 |
+
output_language: Optional[str] = None,
|
| 343 |
+
message_window_size: Optional[int] = None,
|
| 344 |
+
) -> None:
|
| 345 |
+
task_prioritization_prompt = TextPrompt(
|
| 346 |
+
"""Prioritize the following tasks : {task_list}.
|
| 347 |
+
Consider the ultimate objective of you: {objective}.
|
| 348 |
+
Tasks should be sorted from highest to lowest priority, where higher-priority \
|
| 349 |
+
tasks are those that act as pre-requisites or are more essential for meeting \
|
| 350 |
+
the objective. Return one task per line in your response.
|
| 351 |
+
Do not remove or modify any tasks.
|
| 352 |
+
The result must be a numbered list in the format:
|
| 353 |
+
|
| 354 |
+
#. First task
|
| 355 |
+
#. Second task
|
| 356 |
+
|
| 357 |
+
The entries must be consecutively numbered, starting with 1.
|
| 358 |
+
The number of each entry must be followed by a period.
|
| 359 |
+
Do not include any headers before your ranked list or follow your list \
|
| 360 |
+
with any other output."""
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
self.task_prioritization_prompt = task_prioritization_prompt.format(
|
| 364 |
+
objective=objective
|
| 365 |
+
)
|
| 366 |
+
self.objective = objective
|
| 367 |
+
|
| 368 |
+
system_message = BaseMessage(
|
| 369 |
+
role_name="Task Prioritizer",
|
| 370 |
+
role_type=RoleType.ASSISTANT,
|
| 371 |
+
meta_dict=None,
|
| 372 |
+
content="You are a helpful task prioritizer.",
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
super().__init__(
|
| 376 |
+
system_message,
|
| 377 |
+
model=model,
|
| 378 |
+
output_language=output_language,
|
| 379 |
+
message_window_size=message_window_size,
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
def run(
|
| 383 |
+
self,
|
| 384 |
+
task_list: List[str],
|
| 385 |
+
) -> List[str]:
|
| 386 |
+
r"""Prioritize the task list given the agent objective.
|
| 387 |
+
|
| 388 |
+
Args:
|
| 389 |
+
task_list (List[str]): The unprioritized tasks of agent.
|
| 390 |
+
|
| 391 |
+
Returns:
|
| 392 |
+
List[str]: The new prioritized task list generated by the Agent.
|
| 393 |
+
"""
|
| 394 |
+
task_prioritization_prompt = self.task_prioritization_prompt.format(
|
| 395 |
+
task_list=task_list
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
task_msg = BaseMessage.make_user_message(
|
| 399 |
+
role_name="Task Prioritizer", content=task_prioritization_prompt
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
task_response = self.step(task_msg)
|
| 403 |
+
|
| 404 |
+
if task_response.terminated:
|
| 405 |
+
raise RuntimeError("Task prioritization failed.")
|
| 406 |
+
if len(task_response.msgs) == 0:
|
| 407 |
+
raise RuntimeError("Got no task prioritization message.")
|
| 408 |
+
|
| 409 |
+
sub_tasks_msg = task_response.msgs[0]
|
| 410 |
+
return get_task_list(sub_tasks_msg.content)
|
deep-swarm/camel/agents/tool_agents/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from .base import BaseToolAgent
|
| 15 |
+
from .hugging_face_tool_agent import HuggingFaceToolAgent
|
| 16 |
+
|
| 17 |
+
__all__ = [
|
| 18 |
+
'BaseToolAgent',
|
| 19 |
+
'HuggingFaceToolAgent',
|
| 20 |
+
]
|
deep-swarm/camel/agents/tool_agents/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (383 Bytes). View file
|
|
|
deep-swarm/camel/agents/tool_agents/__pycache__/base.cpython-311.pyc
ADDED
|
Binary file (1.57 kB). View file
|
|
|
deep-swarm/camel/agents/tool_agents/__pycache__/hugging_face_tool_agent.cpython-311.pyc
ADDED
|
Binary file (10 kB). View file
|
|
|
deep-swarm/camel/agents/tool_agents/base.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from camel.agents import BaseAgent
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class BaseToolAgent(BaseAgent):
|
| 18 |
+
r"""Creates a :obj:`BaseToolAgent` object with the specified name and
|
| 19 |
+
description.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
name (str): The name of the tool agent.
|
| 23 |
+
description (str): The description of the tool agent.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, name: str, description: str) -> None:
|
| 27 |
+
self.name = name
|
| 28 |
+
self.description = description
|
| 29 |
+
|
| 30 |
+
def reset(self) -> None:
|
| 31 |
+
r"""Resets the agent to its initial state."""
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
def step(self) -> None:
|
| 35 |
+
r"""Performs a single step of the agent."""
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
def __str__(self) -> str:
|
| 39 |
+
return f"{self.name}: {self.description}"
|
deep-swarm/camel/agents/tool_agents/hugging_face_tool_agent.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from typing import Any, Optional
|
| 15 |
+
|
| 16 |
+
from camel.agents.tool_agents.base import BaseToolAgent
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# flake8: noqa :E501
|
| 20 |
+
class HuggingFaceToolAgent(BaseToolAgent):
|
| 21 |
+
r"""Tool agent for calling HuggingFace models. This agent is a wrapper
|
| 22 |
+
around agents from the `transformers` library. For more information
|
| 23 |
+
about the available models, please see the `transformers` documentation
|
| 24 |
+
at https://huggingface.co/docs/transformers/transformers_agents.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
name (str): The name of the agent.
|
| 28 |
+
*args (Any): Additional positional arguments to pass to the underlying
|
| 29 |
+
Agent class.
|
| 30 |
+
remote (bool, optional): Flag indicating whether to run the agent
|
| 31 |
+
remotely. (default: :obj:`True`)
|
| 32 |
+
**kwargs (Any): Additional keyword arguments to pass to the underlying
|
| 33 |
+
Agent class.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
name: str,
|
| 39 |
+
*args: Any,
|
| 40 |
+
remote: bool = True,
|
| 41 |
+
**kwargs: Any,
|
| 42 |
+
) -> None:
|
| 43 |
+
try:
|
| 44 |
+
# TODO: Support other tool agents
|
| 45 |
+
import transformers
|
| 46 |
+
from packaging import version
|
| 47 |
+
|
| 48 |
+
if version.parse(transformers.__version__) < version.parse(
|
| 49 |
+
"4.31.0"
|
| 50 |
+
):
|
| 51 |
+
raise ValueError(
|
| 52 |
+
"The version of \"transformers\" package should >= 4.31.0"
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
from transformers.tools import OpenAiAgent
|
| 56 |
+
from transformers.tools.agent_types import AgentImage
|
| 57 |
+
except (ImportError, ValueError):
|
| 58 |
+
raise ValueError(
|
| 59 |
+
"Could not import transformers tool agents. "
|
| 60 |
+
"Please setup the environment with "
|
| 61 |
+
"pip install huggingface_hub==0.14.1 transformers==4.31.0 diffusers accelerate==0.20.3 datasets torch soundfile sentencepiece opencv-python"
|
| 62 |
+
)
|
| 63 |
+
self.agent_image_type = AgentImage
|
| 64 |
+
self.agent = OpenAiAgent(*args, **kwargs)
|
| 65 |
+
description = f"""The `{name}` is a tool agent that can perform a variety of tasks including:
|
| 66 |
+
- Document question answering: given a document (such as a PDF) in image format, answer a question on this document
|
| 67 |
+
- Text question answering: given a long text and a question, answer the question in the text
|
| 68 |
+
- Unconditional image captioning: Caption the image!
|
| 69 |
+
- Image question answering: given an image, answer a question on this image
|
| 70 |
+
- Image segmentation: given an image and a prompt, output the segmentation mask of that prompt
|
| 71 |
+
- Speech to text: given an audio recording of a person talking, transcribe the speech into text
|
| 72 |
+
- Text to speech: convert text to speech
|
| 73 |
+
- Zero-shot text classification: given a text and a list of labels, identify to which label the text corresponds the most
|
| 74 |
+
- Text summarization: summarize a long text in one or a few sentences
|
| 75 |
+
- Translation: translate the text into a given language
|
| 76 |
+
- Text downloading: to download a text from a web URL
|
| 77 |
+
- Text to image: generate an image according to a prompt, leveraging stable diffusion
|
| 78 |
+
- Image transformation: modify an image given an initial image and a prompt, leveraging instruct pix2pix stable diffusion
|
| 79 |
+
- Text to video: generate a small video according to a prompt
|
| 80 |
+
|
| 81 |
+
Here are some python code examples of what you can do with this agent:
|
| 82 |
+
|
| 83 |
+
Single execution (step) mode, the single execution method is when using the step() method of the agent:
|
| 84 |
+
```
|
| 85 |
+
# Text to image
|
| 86 |
+
rivers_and_lakes_image = {name}.step("Draw me a picture of rivers and lakes.")
|
| 87 |
+
rivers_and_lakes_image.save("./rivers_and_lakes_image.png")
|
| 88 |
+
|
| 89 |
+
# Text to image -> Image transformation
|
| 90 |
+
sea_add_island_image = {name}.step("Draw me a picture of the sea then transform the picture to add an island")
|
| 91 |
+
sea_add_island_image.save("./sea_add_island_image.png")
|
| 92 |
+
|
| 93 |
+
# If you'd like to keep a state across executions or to pass non-text objects to the agent,
|
| 94 |
+
# you can do so by specifying variables that you would like the agent to use. For example,
|
| 95 |
+
# you could generate the first image of rivers and lakes, and ask the model to update that picture to add an island by doing the following:
|
| 96 |
+
picture = {name}.step("Generate a picture of rivers and lakes.")
|
| 97 |
+
picture.save("./picture.png")
|
| 98 |
+
updated_picture = {name}.step("Transform the image in `picture` to add an island to it.", picture=picture)
|
| 99 |
+
updated_picture.save("./updated_picture.png")
|
| 100 |
+
|
| 101 |
+
capybara_sea_image = {name}.step("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea")
|
| 102 |
+
capybara_sea_image.save("./capybara_sea_image.png")
|
| 103 |
+
|
| 104 |
+
# Document question answering
|
| 105 |
+
answer = {name}.step(
|
| 106 |
+
"In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?",
|
| 107 |
+
document=document,
|
| 108 |
+
)
|
| 109 |
+
print(answer)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
# Text to image
|
| 113 |
+
boat_image = {name}.step("Generate an image of a boat in the water")
|
| 114 |
+
boat_image.save("./boat_image.png")
|
| 115 |
+
|
| 116 |
+
# Unconditional image captioning
|
| 117 |
+
boat_image_caption = {name}.step("Can you caption the `boat_image`?", boat_image=boat_image)
|
| 118 |
+
print(boat_image_caption)
|
| 119 |
+
|
| 120 |
+
# Text to image -> Unconditional image captioning -> Text to speech
|
| 121 |
+
boat_audio = {name}.step("Can you generate an image of a boat? Please read out loud the contents of the image afterwards")
|
| 122 |
+
|
| 123 |
+
# Text downloading
|
| 124 |
+
document = {name}.step("Download the text from http://hf.co")
|
| 125 |
+
print(document)
|
| 126 |
+
|
| 127 |
+
# Text summarization
|
| 128 |
+
summary = {name}.step("Summarize the following text: `document`", document=document)
|
| 129 |
+
print(summary)
|
| 130 |
+
|
| 131 |
+
# Text downloading -> Text summarization -> Text to speech
|
| 132 |
+
audio = {name}.step("Read out loud the summary of http://hf.co")
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
Chat-based execution (chat), the agent also has a chat-based approach, using the chat() method:
|
| 136 |
+
```
|
| 137 |
+
# Clean the chat history
|
| 138 |
+
{name}.reset()
|
| 139 |
+
|
| 140 |
+
# Text to image
|
| 141 |
+
capybara_image = {name}.chat("Show me an an image of a capybara")
|
| 142 |
+
capybara_image.save("./capybara_image.png")
|
| 143 |
+
|
| 144 |
+
# Image transformation
|
| 145 |
+
transformed_capybara_image = {name}.chat("Transform the image so that it snows")
|
| 146 |
+
transformed_capybara_image.save("./transformed_capybara_image.png")
|
| 147 |
+
|
| 148 |
+
# Image segmentation
|
| 149 |
+
segmented_transformed_capybara_image = {name}.chat("Show me a mask of the snowy capybaras")
|
| 150 |
+
segmented_transformed_capybara_image.save("./segmented_transformed_capybara_image.png")
|
| 151 |
+
```
|
| 152 |
+
"""
|
| 153 |
+
super(HuggingFaceToolAgent, self).__init__(name, description)
|
| 154 |
+
self.remote = remote
|
| 155 |
+
|
| 156 |
+
def reset(self) -> None:
|
| 157 |
+
r"""Resets the chat history of the agent."""
|
| 158 |
+
self.agent.prepare_for_new_chat()
|
| 159 |
+
|
| 160 |
+
def step(
|
| 161 |
+
self,
|
| 162 |
+
*args: Any,
|
| 163 |
+
remote: Optional[bool] = None,
|
| 164 |
+
**kwargs: Any,
|
| 165 |
+
) -> Any:
|
| 166 |
+
r"""Runs the agent in single execution mode.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
*args (Any): Positional arguments to pass to the agent.
|
| 170 |
+
remote (bool, optional): Flag indicating whether to run the agent
|
| 171 |
+
remotely. Overrides the default setting. (default: :obj:`None`)
|
| 172 |
+
**kwargs (Any): Keyword arguments to pass to the agent.
|
| 173 |
+
|
| 174 |
+
Returns:
|
| 175 |
+
str: The response from the agent.
|
| 176 |
+
"""
|
| 177 |
+
if remote is None:
|
| 178 |
+
remote = self.remote
|
| 179 |
+
agent_output = self.agent.run(*args, remote=remote, **kwargs)
|
| 180 |
+
if isinstance(agent_output, self.agent_image_type):
|
| 181 |
+
agent_output = agent_output.to_raw()
|
| 182 |
+
return agent_output
|
| 183 |
+
|
| 184 |
+
def chat(
|
| 185 |
+
self,
|
| 186 |
+
*args: Any,
|
| 187 |
+
remote: Optional[bool] = None,
|
| 188 |
+
**kwargs: Any,
|
| 189 |
+
) -> Any:
|
| 190 |
+
r"""Runs the agent in a chat conversation mode.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
*args (Any): Positional arguments to pass to the agent.
|
| 194 |
+
remote (bool, optional): Flag indicating whether to run the agent
|
| 195 |
+
remotely. Overrides the default setting. (default: :obj:`None`)
|
| 196 |
+
**kwargs (Any): Keyword arguments to pass to the agent.
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
str: The response from the agent.
|
| 200 |
+
"""
|
| 201 |
+
if remote is None:
|
| 202 |
+
remote = self.remote
|
| 203 |
+
agent_output = self.agent.chat(*args, remote=remote, **kwargs)
|
| 204 |
+
if isinstance(agent_output, self.agent_image_type):
|
| 205 |
+
agent_output = agent_output.to_raw()
|
| 206 |
+
return agent_output
|
deep-swarm/camel/benchmarks/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
|
| 15 |
+
from .base import BaseBenchmark
|
| 16 |
+
|
| 17 |
+
__all__ = ["BaseBenchmark"]
|
deep-swarm/camel/benchmarks/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (267 Bytes). View file
|
|
|
deep-swarm/camel/benchmarks/__pycache__/base.cpython-311.pyc
ADDED
|
Binary file (5.94 kB). View file
|
|
|
deep-swarm/camel/benchmarks/base.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
from abc import ABC, abstractmethod
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
from typing import Any, Dict, List, Literal, Optional
|
| 19 |
+
|
| 20 |
+
from camel.agents import ChatAgent
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class BaseBenchmark(ABC):
|
| 26 |
+
r"""Base class for benchmarks.
|
| 27 |
+
|
| 28 |
+
Attributes:
|
| 29 |
+
name (str): Name of the benchmark.
|
| 30 |
+
data_dir (str): Path to the data directory.
|
| 31 |
+
save_to (str): Path to save the results.
|
| 32 |
+
processes (int): Number of processes to use for parallel
|
| 33 |
+
processing. :(default: :obj:`1`)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self, name: str, data_dir: str, save_to: str, processes: int = 1
|
| 38 |
+
):
|
| 39 |
+
r"""Initialize the benchmark.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
name (str): Name of the benchmark.
|
| 43 |
+
data_dir (str): Path to the data directory.
|
| 44 |
+
save_to (str): Path to save the results.
|
| 45 |
+
processes (int): Number of processes to use for parallel
|
| 46 |
+
processing. :(default: :obj:`1`)
|
| 47 |
+
|
| 48 |
+
"""
|
| 49 |
+
self.name = name
|
| 50 |
+
self.data_dir = Path(data_dir)
|
| 51 |
+
self.processes = processes
|
| 52 |
+
self.save_to = save_to
|
| 53 |
+
if not self.data_dir.exists():
|
| 54 |
+
logger.info(
|
| 55 |
+
f"Data directory {data_dir} does not exist. Creating it."
|
| 56 |
+
)
|
| 57 |
+
self.data_dir.mkdir(parents=True, exist_ok=True)
|
| 58 |
+
if not self.data_dir.is_dir():
|
| 59 |
+
raise NotADirectoryError(
|
| 60 |
+
f"Data directory {data_dir} is not a directory"
|
| 61 |
+
)
|
| 62 |
+
self._data: Dict[str, List[Dict[str, Any]]] = dict()
|
| 63 |
+
self._results: List[Dict[str, Any]] = []
|
| 64 |
+
|
| 65 |
+
@abstractmethod
|
| 66 |
+
def download(self) -> "BaseBenchmark":
|
| 67 |
+
r"""Download the benchmark data.
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
BaseBenchmark: The benchmark instance.
|
| 71 |
+
"""
|
| 72 |
+
pass
|
| 73 |
+
|
| 74 |
+
@abstractmethod
|
| 75 |
+
def load(self, force_download: bool = False) -> "BaseBenchmark":
|
| 76 |
+
r"""Load the benchmark data.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
force_download (bool): Whether to force download the data.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
BaseBenchmark: The benchmark instance.
|
| 83 |
+
"""
|
| 84 |
+
pass
|
| 85 |
+
|
| 86 |
+
@property
|
| 87 |
+
def train(self) -> List[Dict[str, Any]]:
|
| 88 |
+
r"""Get the training data.
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
List[Dict[str, Any]]: The training data.
|
| 92 |
+
"""
|
| 93 |
+
if not self._data:
|
| 94 |
+
logger.info("Data not loaded. Loading data.")
|
| 95 |
+
self.load()
|
| 96 |
+
return self._data["train"]
|
| 97 |
+
|
| 98 |
+
@property
|
| 99 |
+
def valid(self) -> List[Dict[str, Any]]:
|
| 100 |
+
r"""Get the validation data.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
List[Dict[str, Any]]: The validation data.
|
| 104 |
+
"""
|
| 105 |
+
if not self._data:
|
| 106 |
+
logger.info("Data not loaded. Loading data.")
|
| 107 |
+
self.load()
|
| 108 |
+
return self._data["valid"]
|
| 109 |
+
|
| 110 |
+
@property
|
| 111 |
+
def test(self) -> List[Dict[str, Any]]:
|
| 112 |
+
r"""Get the test data.
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
List[Dict[str, Any]]: The test data.
|
| 116 |
+
"""
|
| 117 |
+
if not self._data:
|
| 118 |
+
logger.info("Data not loaded. Loading data.")
|
| 119 |
+
self.load()
|
| 120 |
+
return self._data["test"]
|
| 121 |
+
|
| 122 |
+
@abstractmethod
|
| 123 |
+
def run(
|
| 124 |
+
self,
|
| 125 |
+
agent: ChatAgent,
|
| 126 |
+
on: Literal["train", "valid", "test"],
|
| 127 |
+
randomize: bool = False,
|
| 128 |
+
subset: Optional[int] = None,
|
| 129 |
+
*args,
|
| 130 |
+
**kwargs,
|
| 131 |
+
) -> "BaseBenchmark":
|
| 132 |
+
r"""Run the benchmark.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
agent (ChatAgent): The chat agent.
|
| 136 |
+
on (str): The data split to run the benchmark on.
|
| 137 |
+
randomize (bool): Whether to randomize the data.
|
| 138 |
+
subset (int): The subset of the data to run the benchmark on.
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
BaseBenchmark: The benchmark instance.
|
| 142 |
+
"""
|
| 143 |
+
pass
|
| 144 |
+
|
| 145 |
+
@property
|
| 146 |
+
def results(self) -> List[Dict[str, Any]]:
|
| 147 |
+
r"""Get the results.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
List[Dict[str, Any]]: The results.
|
| 151 |
+
"""
|
| 152 |
+
return self._results
|
deep-swarm/camel/bots/__init__.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from .discord_app import DiscordApp
|
| 15 |
+
from .slack.models import (
|
| 16 |
+
SlackAppMentionEventBody,
|
| 17 |
+
SlackAppMentionEventProfile,
|
| 18 |
+
SlackAuthProfile,
|
| 19 |
+
SlackEventBody,
|
| 20 |
+
SlackEventProfile,
|
| 21 |
+
)
|
| 22 |
+
from .slack.slack_app import SlackApp
|
| 23 |
+
from .telegram_bot import TelegramBot
|
| 24 |
+
|
| 25 |
+
__all__ = [
|
| 26 |
+
'DiscordApp',
|
| 27 |
+
'SlackApp',
|
| 28 |
+
'SlackAppMentionEventBody',
|
| 29 |
+
'SlackAppMentionEventProfile',
|
| 30 |
+
'SlackAuthProfile',
|
| 31 |
+
'SlackEventBody',
|
| 32 |
+
'SlackEventProfile',
|
| 33 |
+
'TelegramBot',
|
| 34 |
+
]
|
deep-swarm/camel/bots/discord_app.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
import logging
|
| 15 |
+
import os
|
| 16 |
+
from typing import TYPE_CHECKING, List, Optional
|
| 17 |
+
|
| 18 |
+
from camel.utils import dependencies_required
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from discord import Message
|
| 22 |
+
|
| 23 |
+
logging.basicConfig(level=logging.INFO)
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class DiscordApp:
|
| 28 |
+
r"""A class representing a Discord app that uses the `discord.py` library
|
| 29 |
+
to interact with Discord servers.
|
| 30 |
+
|
| 31 |
+
This bot can respond to messages in specific channels and only reacts to
|
| 32 |
+
messages that mention the bot.
|
| 33 |
+
|
| 34 |
+
Attributes:
|
| 35 |
+
channel_ids (Optional[List[int]]): A list of allowed channel IDs. If
|
| 36 |
+
provided, the bot will only respond to messages in these channels.
|
| 37 |
+
token (Optional[str]): The Discord bot token used for authentication.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
@dependencies_required('discord')
|
| 41 |
+
def __init__(
|
| 42 |
+
self,
|
| 43 |
+
channel_ids: Optional[List[int]] = None,
|
| 44 |
+
token: Optional[str] = None,
|
| 45 |
+
) -> None:
|
| 46 |
+
r"""Initialize the DiscordApp instance by setting up the Discord client
|
| 47 |
+
and event handlers.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
channel_ids (Optional[List[int]]): A list of allowed channel IDs.
|
| 51 |
+
The bot will only respond to messages in these channels if
|
| 52 |
+
provided.
|
| 53 |
+
token (Optional[str]): The Discord bot token for authentication.
|
| 54 |
+
If not provided, the token will be retrieved from the
|
| 55 |
+
environment variable `DISCORD_TOKEN`.
|
| 56 |
+
|
| 57 |
+
Raises:
|
| 58 |
+
ValueError: If the `DISCORD_TOKEN` is not found in environment
|
| 59 |
+
variables.
|
| 60 |
+
"""
|
| 61 |
+
self.token = token or os.getenv('DISCORD_TOKEN')
|
| 62 |
+
self.channel_ids = channel_ids
|
| 63 |
+
|
| 64 |
+
if not self.token:
|
| 65 |
+
raise ValueError(
|
| 66 |
+
"`DISCORD_TOKEN` not found in environment variables. Get it"
|
| 67 |
+
" here: `https://discord.com/developers/applications`."
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
import discord
|
| 71 |
+
|
| 72 |
+
intents = discord.Intents.default()
|
| 73 |
+
intents.message_content = True
|
| 74 |
+
self._client = discord.Client(intents=intents)
|
| 75 |
+
|
| 76 |
+
# Register event handlers
|
| 77 |
+
self._client.event(self.on_ready)
|
| 78 |
+
self._client.event(self.on_message)
|
| 79 |
+
|
| 80 |
+
async def start(self):
|
| 81 |
+
r"""Asynchronously start the Discord bot using its token.
|
| 82 |
+
|
| 83 |
+
This method starts the bot and logs into Discord asynchronously using
|
| 84 |
+
the provided token. It should be awaited when used in an async
|
| 85 |
+
environment.
|
| 86 |
+
"""
|
| 87 |
+
await self._client.start(self.token)
|
| 88 |
+
|
| 89 |
+
def run(self) -> None:
|
| 90 |
+
r"""Start the Discord bot using its token.
|
| 91 |
+
|
| 92 |
+
This method starts the bot and logs into Discord synchronously using
|
| 93 |
+
the provided token. It blocks execution and keeps the bot running.
|
| 94 |
+
"""
|
| 95 |
+
self._client.run(self.token) # type: ignore[arg-type]
|
| 96 |
+
|
| 97 |
+
async def on_ready(self) -> None:
|
| 98 |
+
r"""Event handler that is called when the bot has successfully
|
| 99 |
+
connected to the Discord server.
|
| 100 |
+
|
| 101 |
+
When the bot is ready and logged into Discord, it prints a message
|
| 102 |
+
displaying the bot's username.
|
| 103 |
+
"""
|
| 104 |
+
logger.info(f'We have logged in as {self._client.user}')
|
| 105 |
+
|
| 106 |
+
async def on_message(self, message: 'Message') -> None:
|
| 107 |
+
r"""Event handler for processing incoming messages.
|
| 108 |
+
|
| 109 |
+
This method is called whenever a new message is received by the bot. It
|
| 110 |
+
will ignore messages sent by the bot itself, only respond to messages
|
| 111 |
+
in allowed channels (if specified), and only to messages that mention
|
| 112 |
+
the bot.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
message (discord.Message): The message object received from
|
| 116 |
+
Discord.
|
| 117 |
+
"""
|
| 118 |
+
# If the message author is the bot itself,
|
| 119 |
+
# do not respond to this message
|
| 120 |
+
if message.author == self._client.user:
|
| 121 |
+
return
|
| 122 |
+
|
| 123 |
+
# If allowed channel IDs are provided,
|
| 124 |
+
# only respond to messages in those channels
|
| 125 |
+
if self.channel_ids and message.channel.id not in self.channel_ids:
|
| 126 |
+
return
|
| 127 |
+
|
| 128 |
+
# Only respond to messages that mention the bot
|
| 129 |
+
if not self._client.user or not self._client.user.mentioned_in(
|
| 130 |
+
message
|
| 131 |
+
):
|
| 132 |
+
return
|
| 133 |
+
|
| 134 |
+
logger.info(f"Received message: {message.content}")
|
| 135 |
+
|
| 136 |
+
@property
|
| 137 |
+
def client(self):
|
| 138 |
+
return self._client
|
deep-swarm/camel/bots/slack/__init__.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from .models import (
|
| 15 |
+
SlackAppMentionEventBody,
|
| 16 |
+
SlackAppMentionEventProfile,
|
| 17 |
+
SlackAuthProfile,
|
| 18 |
+
SlackEventBody,
|
| 19 |
+
SlackEventProfile,
|
| 20 |
+
)
|
| 21 |
+
from .slack_app import SlackApp
|
| 22 |
+
|
| 23 |
+
__all__ = [
|
| 24 |
+
'SlackApp',
|
| 25 |
+
'SlackAppMentionEventBody',
|
| 26 |
+
'SlackAppMentionEventProfile',
|
| 27 |
+
'SlackAuthProfile',
|
| 28 |
+
'SlackEventBody',
|
| 29 |
+
'SlackEventProfile',
|
| 30 |
+
]
|
deep-swarm/camel/bots/slack/models.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from typing import Optional
|
| 15 |
+
|
| 16 |
+
from pydantic import BaseModel
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class SlackAuthProfile(BaseModel):
|
| 20 |
+
r"""Represents the authorization profile within a Slack event.
|
| 21 |
+
|
| 22 |
+
Events will contain a single, compact authorizations field that shows one
|
| 23 |
+
installation of your app that the event is visible to.
|
| 24 |
+
In other words, lists of authorizations will be truncated to one element.
|
| 25 |
+
|
| 26 |
+
If there's more than one installing party that your app is keeping track
|
| 27 |
+
of, it's best not to rely on the single party listed in authorizations to
|
| 28 |
+
be any particular one.
|
| 29 |
+
|
| 30 |
+
To get a full list of who can see events, call the apps.event.
|
| 31 |
+
authorizations.list method after obtaining an app-level token. Read more on
|
| 32 |
+
the changes here; they have taken effect for existing apps as of
|
| 33 |
+
February 24, 2021.
|
| 34 |
+
|
| 35 |
+
References:
|
| 36 |
+
|
| 37 |
+
- https://api.slack.com/apis/events-api#authorizations
|
| 38 |
+
- https://api.slack.com/changelog/2020-09-15-events-api-truncate-authed-users#no_context
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
enterprise_id: Optional[str] = None
|
| 42 |
+
"""The ID of the enterprise associated with the authorization."""
|
| 43 |
+
|
| 44 |
+
team_id: str
|
| 45 |
+
"""The ID of the team associated with the authorization."""
|
| 46 |
+
|
| 47 |
+
user_id: str
|
| 48 |
+
"""The ID of the user associated with the authorization."""
|
| 49 |
+
|
| 50 |
+
is_bot: bool
|
| 51 |
+
"""Whether the authorized user is a bot."""
|
| 52 |
+
|
| 53 |
+
is_enterprise_install: bool
|
| 54 |
+
"""Whether the authorization is for an enterprise installation."""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class SlackEventProfile(BaseModel):
|
| 58 |
+
r"""Represents the detailed profile of a Slack event, including user,
|
| 59 |
+
message, and context data.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
user: str
|
| 63 |
+
"""The ID of the user associated with the event."""
|
| 64 |
+
|
| 65 |
+
type: str
|
| 66 |
+
"""The type of the event (e.g., 'message')."""
|
| 67 |
+
|
| 68 |
+
ts: str
|
| 69 |
+
"""A timestamp representing when the event was triggered."""
|
| 70 |
+
|
| 71 |
+
thread_ts: Optional[str] = None
|
| 72 |
+
"""The timestamp of the parent message in a thread."""
|
| 73 |
+
|
| 74 |
+
client_msg_id: str
|
| 75 |
+
"""A unique ID generated by the client for the message (if available)."""
|
| 76 |
+
|
| 77 |
+
text: str
|
| 78 |
+
"""The message content text."""
|
| 79 |
+
|
| 80 |
+
team: str
|
| 81 |
+
"""The ID of the team that the event is associated with."""
|
| 82 |
+
|
| 83 |
+
blocks: list
|
| 84 |
+
"""The list of message blocks, providing structured information."""
|
| 85 |
+
|
| 86 |
+
channel: str
|
| 87 |
+
"""The ID of the Slack channel where the event happened."""
|
| 88 |
+
|
| 89 |
+
event_ts: str
|
| 90 |
+
"""The event-specific timestamp when it occurred."""
|
| 91 |
+
|
| 92 |
+
channel_type: Optional[str]
|
| 93 |
+
"""The type of Slack channel (e.g., 'channel', 'im')."""
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class SlackEventBody(BaseModel):
|
| 97 |
+
r"""Represents the entire body of a Slack event, including the event
|
| 98 |
+
profile, authorization, and context.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
token: str
|
| 102 |
+
"""The token to verify the source of the event."""
|
| 103 |
+
|
| 104 |
+
team_id: str
|
| 105 |
+
"""The ID of the team where the event is happening."""
|
| 106 |
+
|
| 107 |
+
context_team_id: Optional[str]
|
| 108 |
+
"""The team ID for the shared channel context, if applicable."""
|
| 109 |
+
|
| 110 |
+
context_enterprise_id: Optional[str] = None
|
| 111 |
+
"""The enterprise ID for the shared channel context, if applicable."""
|
| 112 |
+
|
| 113 |
+
api_app_id: str
|
| 114 |
+
"""The unique identifier for the Slack app that received the event."""
|
| 115 |
+
|
| 116 |
+
event: SlackEventProfile
|
| 117 |
+
"""A detailed profile of the event"""
|
| 118 |
+
|
| 119 |
+
type: str
|
| 120 |
+
"""The overall type of event received (e.g., 'event_callback')."""
|
| 121 |
+
|
| 122 |
+
event_id: str
|
| 123 |
+
"""A unique identifier assigned to this event by Slack."""
|
| 124 |
+
|
| 125 |
+
event_time: int
|
| 126 |
+
"""The timestamp (in seconds) representing when the event was triggered."""
|
| 127 |
+
|
| 128 |
+
authorizations: Optional[list[SlackAuthProfile]] = None
|
| 129 |
+
"""An optional list of authorizations that describe which installation can
|
| 130 |
+
see the event."""
|
| 131 |
+
|
| 132 |
+
is_ext_shared_channel: bool
|
| 133 |
+
"""Indicates if the event is part of a shared channel between different
|
| 134 |
+
organizations."""
|
| 135 |
+
|
| 136 |
+
event_context: str
|
| 137 |
+
"""A unique string representing the context of the event."""
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class SlackAppMentionEventProfile(SlackEventProfile):
|
| 141 |
+
r"""Represents the detailed profile of a Slack event where the app was
|
| 142 |
+
mentioned in a message.
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
channel_type: Optional[str] = None
|
| 146 |
+
"""The type of Slack channel. it's None for app mentions."""
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class SlackAppMentionEventBody(SlackEventBody):
|
| 150 |
+
r"""Represents the entire body of a Slack event where the app was mentioned
|
| 151 |
+
in a message.
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
context_team_id: Optional[str] = None
|
| 155 |
+
"""A detailed profile of the event. it's None for app mentions."""
|
| 156 |
+
|
| 157 |
+
event: SlackAppMentionEventProfile
|
| 158 |
+
"""A detailed profile of the event"""
|
deep-swarm/camel/bots/slack/slack_app.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
import logging
|
| 15 |
+
import os
|
| 16 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
| 17 |
+
|
| 18 |
+
from slack_sdk.oauth.installation_store.async_installation_store import (
|
| 19 |
+
AsyncInstallationStore,
|
| 20 |
+
)
|
| 21 |
+
from starlette import requests, responses
|
| 22 |
+
|
| 23 |
+
from camel.bots.slack.models import (
|
| 24 |
+
SlackAppMentionEventBody,
|
| 25 |
+
SlackAppMentionEventProfile,
|
| 26 |
+
SlackEventBody,
|
| 27 |
+
SlackEventProfile,
|
| 28 |
+
)
|
| 29 |
+
from camel.utils import dependencies_required
|
| 30 |
+
|
| 31 |
+
if TYPE_CHECKING:
|
| 32 |
+
from slack_bolt.context.async_context import AsyncBoltContext
|
| 33 |
+
from slack_bolt.context.say.async_say import AsyncSay
|
| 34 |
+
from slack_sdk.web.async_client import AsyncWebClient
|
| 35 |
+
|
| 36 |
+
logging.basicConfig(level=logging.INFO)
|
| 37 |
+
logger = logging.getLogger(__name__)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class SlackApp:
|
| 41 |
+
r"""Represents a Slack app that is powered by a Slack Bolt `AsyncApp`.
|
| 42 |
+
|
| 43 |
+
This class is responsible for initializing and managing the Slack
|
| 44 |
+
application by setting up event handlers, running the app server, and
|
| 45 |
+
handling events such as messages and mentions from Slack.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
token (Optional[str]): Slack API token for authentication.
|
| 49 |
+
scopes (Optional[str]): Slack app scopes for permissions.
|
| 50 |
+
signing_secret (Optional[str]): Signing secret for verifying Slack
|
| 51 |
+
requests.
|
| 52 |
+
client_id (Optional[str]): Slack app client ID.
|
| 53 |
+
client_secret (Optional[str]): Slack app client secret.
|
| 54 |
+
redirect_uri_path (str): The URI path for OAuth redirect, defaults to
|
| 55 |
+
"/slack/oauth_redirect".
|
| 56 |
+
installation_store (Optional[AsyncInstallationStore]): The installation
|
| 57 |
+
store for handling OAuth installations.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
@dependencies_required('slack_bolt')
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
token: Optional[str] = None,
|
| 64 |
+
scopes: Optional[str] = None,
|
| 65 |
+
signing_secret: Optional[str] = None,
|
| 66 |
+
client_id: Optional[str] = None,
|
| 67 |
+
client_secret: Optional[str] = None,
|
| 68 |
+
redirect_uri_path: str = "/slack/oauth_redirect",
|
| 69 |
+
installation_store: Optional[AsyncInstallationStore] = None,
|
| 70 |
+
) -> None:
|
| 71 |
+
r"""Initializes the SlackApp instance by setting up the Slack Bolt app
|
| 72 |
+
and configuring event handlers and OAuth settings.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
token (Optional[str]): The Slack API token.
|
| 76 |
+
scopes (Optional[str]): The scopes for Slack app permissions.
|
| 77 |
+
signing_secret (Optional[str]): The signing secret for verifying
|
| 78 |
+
requests.
|
| 79 |
+
client_id (Optional[str]): The Slack app client ID.
|
| 80 |
+
client_secret (Optional[str]): The Slack app client secret.
|
| 81 |
+
redirect_uri_path (str): The URI path for handling OAuth redirects
|
| 82 |
+
(default is "/slack/oauth_redirect").
|
| 83 |
+
installation_store (Optional[AsyncInstallationStore]): An optional
|
| 84 |
+
installation store for OAuth installations.
|
| 85 |
+
"""
|
| 86 |
+
from slack_bolt.adapter.starlette.async_handler import (
|
| 87 |
+
AsyncSlackRequestHandler,
|
| 88 |
+
)
|
| 89 |
+
from slack_bolt.app.async_app import AsyncApp
|
| 90 |
+
from slack_bolt.oauth.async_oauth_settings import AsyncOAuthSettings
|
| 91 |
+
|
| 92 |
+
self.token: Optional[str] = token or os.getenv("SLACK_TOKEN")
|
| 93 |
+
self.scopes: Optional[str] = scopes or os.getenv("SLACK_SCOPES")
|
| 94 |
+
self.signing_secret: Optional[str] = signing_secret or os.getenv(
|
| 95 |
+
"SLACK_SIGNING_SECRET"
|
| 96 |
+
)
|
| 97 |
+
self.client_id: Optional[str] = client_id or os.getenv(
|
| 98 |
+
"SLACK_CLIENT_ID"
|
| 99 |
+
)
|
| 100 |
+
self.client_secret: Optional[str] = client_secret or os.getenv(
|
| 101 |
+
"SLACK_CLIENT_SECRET"
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
if not all([self.token, self.scopes, self.signing_secret]):
|
| 105 |
+
raise ValueError(
|
| 106 |
+
"`SLACK_TOKEN`, `SLACK_SCOPES`, and `SLACK_SIGNING_SECRET` "
|
| 107 |
+
"environment variables must be set. Get it here: "
|
| 108 |
+
"`https://api.slack.com/apps`."
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# Setup OAuth settings if client ID and secret are provided
|
| 112 |
+
if self.client_id and self.client_secret:
|
| 113 |
+
self._app = AsyncApp(
|
| 114 |
+
oauth_settings=AsyncOAuthSettings(
|
| 115 |
+
client_id=self.client_id,
|
| 116 |
+
client_secret=self.client_secret,
|
| 117 |
+
scopes=self.scopes,
|
| 118 |
+
redirect_uri_path=redirect_uri_path,
|
| 119 |
+
),
|
| 120 |
+
logger=logger,
|
| 121 |
+
signing_secret=self.signing_secret,
|
| 122 |
+
installation_store=installation_store,
|
| 123 |
+
token=self.token,
|
| 124 |
+
)
|
| 125 |
+
else:
|
| 126 |
+
# Initialize Slack Bolt AsyncApp with settings
|
| 127 |
+
self._app = AsyncApp(
|
| 128 |
+
logger=logger,
|
| 129 |
+
signing_secret=self.signing_secret,
|
| 130 |
+
installation_store=installation_store,
|
| 131 |
+
token=self.token,
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
self._handler = AsyncSlackRequestHandler(self._app)
|
| 135 |
+
self.setup_handlers()
|
| 136 |
+
|
| 137 |
+
def setup_handlers(self) -> None:
|
| 138 |
+
r"""Sets up the event handlers for Slack events, such as `app_mention`
|
| 139 |
+
and `message`.
|
| 140 |
+
|
| 141 |
+
This method registers the `app_mention` and `on_message` event handlers
|
| 142 |
+
with the Slack Bolt app to respond to Slack events.
|
| 143 |
+
"""
|
| 144 |
+
self._app.event("app_mention")(self.app_mention)
|
| 145 |
+
self._app.event("message")(self.on_message)
|
| 146 |
+
|
| 147 |
+
def run(
|
| 148 |
+
self,
|
| 149 |
+
port: int = 3000,
|
| 150 |
+
path: str = "/slack/events",
|
| 151 |
+
host: Optional[str] = None,
|
| 152 |
+
) -> None:
|
| 153 |
+
r"""Starts the Slack Bolt app server to listen for incoming Slack
|
| 154 |
+
events.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
port (int): The port on which the server should run (default is
|
| 158 |
+
3000).
|
| 159 |
+
path (str): The endpoint path for receiving Slack events (default
|
| 160 |
+
is "/slack/events").
|
| 161 |
+
host (Optional[str]): The hostname to bind the server (default is
|
| 162 |
+
None).
|
| 163 |
+
"""
|
| 164 |
+
self._app.start(port=port, path=path, host=host)
|
| 165 |
+
|
| 166 |
+
async def handle_request(
|
| 167 |
+
self, request: requests.Request
|
| 168 |
+
) -> responses.Response:
|
| 169 |
+
r"""Handles incoming requests from Slack through the request handler.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
request (Request): A Starlette request object representing the
|
| 173 |
+
incoming request.
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
The response generated by the Slack Bolt handler.
|
| 177 |
+
"""
|
| 178 |
+
return await self._handler.handle(request)
|
| 179 |
+
|
| 180 |
+
async def app_mention(
|
| 181 |
+
self,
|
| 182 |
+
context: "AsyncBoltContext",
|
| 183 |
+
client: "AsyncWebClient",
|
| 184 |
+
event: Dict[str, Any],
|
| 185 |
+
body: Dict[str, Any],
|
| 186 |
+
say: "AsyncSay",
|
| 187 |
+
) -> None:
|
| 188 |
+
r"""Event handler for `app_mention` events.
|
| 189 |
+
|
| 190 |
+
This method is triggered when someone mentions the app in Slack.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
context (AsyncBoltContext): The Slack Bolt context for the event.
|
| 194 |
+
client (AsyncWebClient): The Slack Web API client.
|
| 195 |
+
event (Dict[str, Any]): The event data for the app mention.
|
| 196 |
+
body (Dict[str, Any]): The full request body from Slack.
|
| 197 |
+
say (AsyncSay): A function to send a response back to the channel.
|
| 198 |
+
"""
|
| 199 |
+
event_profile = SlackAppMentionEventProfile(**event)
|
| 200 |
+
event_body = SlackAppMentionEventBody(**body)
|
| 201 |
+
|
| 202 |
+
logger.info(f"app_mention, context: {context}")
|
| 203 |
+
logger.info(f"app_mention, client: {client}")
|
| 204 |
+
logger.info(f"app_mention, event_profile: {event_profile}")
|
| 205 |
+
logger.info(f"app_mention, event_body: {event_body}")
|
| 206 |
+
logger.info(f"app_mention, say: {say}")
|
| 207 |
+
|
| 208 |
+
async def on_message(
|
| 209 |
+
self,
|
| 210 |
+
context: "AsyncBoltContext",
|
| 211 |
+
client: "AsyncWebClient",
|
| 212 |
+
event: Dict[str, Any],
|
| 213 |
+
body: Dict[str, Any],
|
| 214 |
+
say: "AsyncSay",
|
| 215 |
+
) -> None:
|
| 216 |
+
r"""Event handler for `message` events.
|
| 217 |
+
|
| 218 |
+
This method is triggered when the app receives a message in Slack.
|
| 219 |
+
|
| 220 |
+
Args:
|
| 221 |
+
context (AsyncBoltContext): The Slack Bolt context for the event.
|
| 222 |
+
client (AsyncWebClient): The Slack Web API client.
|
| 223 |
+
event (Dict[str, Any]): The event data for the message.
|
| 224 |
+
body (Dict[str, Any]): The full request body from Slack.
|
| 225 |
+
say (AsyncSay): A function to send a response back to the channel.
|
| 226 |
+
"""
|
| 227 |
+
await context.ack()
|
| 228 |
+
|
| 229 |
+
event_profile = SlackEventProfile(**event)
|
| 230 |
+
event_body = SlackEventBody(**body)
|
| 231 |
+
|
| 232 |
+
logger.info(f"on_message, context: {context}")
|
| 233 |
+
logger.info(f"on_message, client: {client}")
|
| 234 |
+
logger.info(f"on_message, event_profile: {event_profile}")
|
| 235 |
+
logger.info(f"on_message, event_body: {event_body}")
|
| 236 |
+
logger.info(f"on_message, say: {say}")
|
| 237 |
+
|
| 238 |
+
logger.info(f"Received message: {event_profile.text}")
|
| 239 |
+
|
| 240 |
+
def mention_me(
|
| 241 |
+
self, context: "AsyncBoltContext", body: SlackEventBody
|
| 242 |
+
) -> bool:
|
| 243 |
+
r"""Check if the bot is mentioned in the message.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
context (AsyncBoltContext): The Slack Bolt context for the event.
|
| 247 |
+
body (SlackEventBody): The body of the Slack event.
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
bool: True if the bot is mentioned in the message, False otherwise.
|
| 251 |
+
"""
|
| 252 |
+
message = body.event.text
|
| 253 |
+
bot_user_id = context.bot_user_id
|
| 254 |
+
mention = f"<@{bot_user_id}>"
|
| 255 |
+
return mention in message
|
deep-swarm/camel/bots/telegram_bot.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
import os
|
| 15 |
+
from typing import TYPE_CHECKING, Optional
|
| 16 |
+
|
| 17 |
+
from camel.agents import ChatAgent
|
| 18 |
+
from camel.messages import BaseMessage
|
| 19 |
+
from camel.utils import dependencies_required
|
| 20 |
+
|
| 21 |
+
# Conditionally import telebot types only for type checking
|
| 22 |
+
if TYPE_CHECKING:
|
| 23 |
+
from telebot.types import ( # type: ignore[import-untyped]
|
| 24 |
+
Message,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TelegramBot:
|
| 29 |
+
r"""Represents a Telegram bot that is powered by an agent.
|
| 30 |
+
|
| 31 |
+
Attributes:
|
| 32 |
+
chat_agent (ChatAgent): Chat agent that will power the bot.
|
| 33 |
+
telegram_token (str, optional): The bot token.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
@dependencies_required('telebot')
|
| 37 |
+
def __init__(
|
| 38 |
+
self,
|
| 39 |
+
chat_agent: ChatAgent,
|
| 40 |
+
telegram_token: Optional[str] = None,
|
| 41 |
+
) -> None:
|
| 42 |
+
self.chat_agent = chat_agent
|
| 43 |
+
|
| 44 |
+
if not telegram_token:
|
| 45 |
+
self.token = os.getenv('TELEGRAM_TOKEN')
|
| 46 |
+
if not self.token:
|
| 47 |
+
raise ValueError(
|
| 48 |
+
"`TELEGRAM_TOKEN` not found in environment variables. "
|
| 49 |
+
"Get it from t.me/BotFather."
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
self.token = telegram_token
|
| 53 |
+
|
| 54 |
+
import telebot # type: ignore[import-untyped]
|
| 55 |
+
|
| 56 |
+
self.bot = telebot.TeleBot(token=self.token)
|
| 57 |
+
|
| 58 |
+
# Register the message handler within the constructor
|
| 59 |
+
self.bot.message_handler(func=lambda message: True)(self.on_message)
|
| 60 |
+
|
| 61 |
+
def run(self) -> None:
|
| 62 |
+
r"""Start the Telegram bot."""
|
| 63 |
+
print("Telegram bot is running...")
|
| 64 |
+
self.bot.infinity_polling()
|
| 65 |
+
|
| 66 |
+
def on_message(self, message: 'Message') -> None:
|
| 67 |
+
r"""Handles incoming messages from the user.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
message (types.Message): The incoming message object.
|
| 71 |
+
"""
|
| 72 |
+
self.chat_agent.reset()
|
| 73 |
+
|
| 74 |
+
if not message.text:
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
user_msg = BaseMessage.make_user_message(
|
| 78 |
+
role_name="User", content=message.text
|
| 79 |
+
)
|
| 80 |
+
assistant_response = self.chat_agent.step(user_msg)
|
| 81 |
+
|
| 82 |
+
self.bot.reply_to(message, assistant_response.msg.content)
|
deep-swarm/camel/configs/__init__.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
#
|
| 6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 7 |
+
#
|
| 8 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
# See the License for the specific language governing permissions and
|
| 12 |
+
# limitations under the License.
|
| 13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
| 14 |
+
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
| 15 |
+
from .base_config import BaseConfig
|
| 16 |
+
from .cohere_config import COHERE_API_PARAMS, CohereConfig
|
| 17 |
+
from .deepseek_config import DEEPSEEK_API_PARAMS, DeepSeekConfig
|
| 18 |
+
from .gemini_config import Gemini_API_PARAMS, GeminiConfig
|
| 19 |
+
from .groq_config import GROQ_API_PARAMS, GroqConfig
|
| 20 |
+
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
| 21 |
+
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
| 22 |
+
from .nvidia_config import NVIDIA_API_PARAMS, NvidiaConfig
|
| 23 |
+
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
| 24 |
+
from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig
|
| 25 |
+
from .qwen_config import QWEN_API_PARAMS, QwenConfig
|
| 26 |
+
from .reka_config import REKA_API_PARAMS, RekaConfig
|
| 27 |
+
from .samba_config import (
|
| 28 |
+
SAMBA_CLOUD_API_PARAMS,
|
| 29 |
+
SAMBA_VERSE_API_PARAMS,
|
| 30 |
+
SambaCloudAPIConfig,
|
| 31 |
+
SambaVerseAPIConfig,
|
| 32 |
+
)
|
| 33 |
+
from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
|
| 34 |
+
from .vllm_config import VLLM_API_PARAMS, VLLMConfig
|
| 35 |
+
from .yi_config import YI_API_PARAMS, YiConfig
|
| 36 |
+
from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
|
| 37 |
+
|
| 38 |
+
__all__ = [
|
| 39 |
+
'BaseConfig',
|
| 40 |
+
'ChatGPTConfig',
|
| 41 |
+
'OPENAI_API_PARAMS',
|
| 42 |
+
'AnthropicConfig',
|
| 43 |
+
'ANTHROPIC_API_PARAMS',
|
| 44 |
+
'GROQ_API_PARAMS',
|
| 45 |
+
'GroqConfig',
|
| 46 |
+
'LiteLLMConfig',
|
| 47 |
+
'LITELLM_API_PARAMS',
|
| 48 |
+
'NvidiaConfig',
|
| 49 |
+
'NVIDIA_API_PARAMS',
|
| 50 |
+
'OllamaConfig',
|
| 51 |
+
'OLLAMA_API_PARAMS',
|
| 52 |
+
'ZhipuAIConfig',
|
| 53 |
+
'ZHIPUAI_API_PARAMS',
|
| 54 |
+
'GeminiConfig',
|
| 55 |
+
'Gemini_API_PARAMS',
|
| 56 |
+
'VLLMConfig',
|
| 57 |
+
'VLLM_API_PARAMS',
|
| 58 |
+
'MistralConfig',
|
| 59 |
+
'MISTRAL_API_PARAMS',
|
| 60 |
+
'RekaConfig',
|
| 61 |
+
'REKA_API_PARAMS',
|
| 62 |
+
'SambaVerseAPIConfig',
|
| 63 |
+
'SAMBA_VERSE_API_PARAMS',
|
| 64 |
+
'SambaCloudAPIConfig',
|
| 65 |
+
'SAMBA_CLOUD_API_PARAMS',
|
| 66 |
+
'TogetherAIConfig',
|
| 67 |
+
'TOGETHERAI_API_PARAMS',
|
| 68 |
+
'CohereConfig',
|
| 69 |
+
'COHERE_API_PARAMS',
|
| 70 |
+
'YiConfig',
|
| 71 |
+
'YI_API_PARAMS',
|
| 72 |
+
'QwenConfig',
|
| 73 |
+
'QWEN_API_PARAMS',
|
| 74 |
+
'DeepSeekConfig',
|
| 75 |
+
'DEEPSEEK_API_PARAMS',
|
| 76 |
+
]
|
deep-swarm/camel/configs/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (2.29 kB). View file
|
|
|
deep-swarm/camel/configs/__pycache__/anthropic_config.cpython-311.pyc
ADDED
|
Binary file (3.52 kB). View file
|
|
|
deep-swarm/camel/configs/__pycache__/base_config.cpython-311.pyc
ADDED
|
Binary file (3.2 kB). View file
|
|
|
deep-swarm/camel/configs/__pycache__/cohere_config.cpython-311.pyc
ADDED
|
Binary file (4.24 kB). View file
|
|
|
deep-swarm/camel/configs/__pycache__/deepseek_config.cpython-311.pyc
ADDED
|
Binary file (7.49 kB). View file
|
|
|
deep-swarm/camel/configs/__pycache__/gemini_config.cpython-311.pyc
ADDED
|
Binary file (6.27 kB). View file
|
|
|