autogpt 还原,开新分支编写
This commit is contained in:
@ -366,9 +366,9 @@ class ChatBot(ChatBotFrame):
|
||||
self.draw_public_chat()
|
||||
self.draw_setting_chat()
|
||||
# 绘制autogpt模组
|
||||
with gr.Tab('Auto-GPT'):
|
||||
self.draw_next_auto()
|
||||
self.draw_goals_auto()
|
||||
# with gr.Tab('Auto-GPT'):
|
||||
# self.draw_next_auto()
|
||||
# self.draw_goals_auto()
|
||||
with self.chat_tab: # 使用 gr.State()对组件进行拷贝时,如果之前绘制了Markdown格式,会导致启动崩溃,所以将 markdown相关绘制放在最后
|
||||
self.draw_chatbot()
|
||||
with self.prompt_tab:
|
||||
@ -378,8 +378,8 @@ class ChatBot(ChatBotFrame):
|
||||
self.signals_function()
|
||||
self.signals_prompt_func()
|
||||
self.signals_public()
|
||||
self.signals_auto_input()
|
||||
self.signals_prompt_edit()
|
||||
# self.signals_auto_input()
|
||||
|
||||
# Start
|
||||
self.auto_opentab_delay()
|
||||
|
||||
2
autogpt/CURRENT_BULLETIN.md
Normal file
2
autogpt/CURRENT_BULLETIN.md
Normal file
@ -0,0 +1,2 @@
|
||||
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
|
||||
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
|
||||
@ -1,14 +0,0 @@
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
|
||||
print("Setting random seed to 42")
|
||||
random.seed(42)
|
||||
|
||||
# Load the users .env file into environment variables
|
||||
load_dotenv(verbose=True, override=True)
|
||||
|
||||
del load_dotenv
|
||||
|
||||
@ -1,29 +1,11 @@
|
||||
import signal
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.chat import chat_with_ai, create_chat_message
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.llm.base import ChatSequence
|
||||
from autogpt.llm.chat import chat_with_ai, create_chat_completion
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.log_cycle.log_cycle import (
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.json_utils.utilities import validate_json
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.memory.message_history import MessageHistory
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input
|
||||
@ -36,6 +18,7 @@ class Agent:
|
||||
Attributes:
|
||||
ai_name: The name of the agent.
|
||||
memory: The memory object to use.
|
||||
full_message_history: The full message history.
|
||||
next_action_count: The number of actions to execute.
|
||||
system_prompt: The system prompt is the initial prompt that defines everything
|
||||
the AI needs to know to achieve its task successfully.
|
||||
@ -59,251 +42,189 @@ class Agent:
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name: str,
|
||||
memory: VectorMemory,
|
||||
next_action_count: int,
|
||||
command_registry: CommandRegistry,
|
||||
config: AIConfig,
|
||||
system_prompt: str,
|
||||
triggering_prompt: str,
|
||||
workspace_directory: str,
|
||||
self,
|
||||
ai_name,
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
):
|
||||
cfg = Config()
|
||||
self.cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.history = MessageHistory(self)
|
||||
self.full_message_history = full_message_history
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
self.cycle_count = 0
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
self.workspace = Workspace(workspace_directory, self.cfg.restrict_to_workspace)
|
||||
self.loop_count = 0
|
||||
self.command_name = None
|
||||
self.sarguments = None
|
||||
self.user_input = ""
|
||||
self.cfg = Config()
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Interaction Loop
|
||||
cfg = Config()
|
||||
self.cycle_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
user_input = ""
|
||||
|
||||
# Signal handler for interrupting y -N
|
||||
def signal_handler(signum, frame):
|
||||
if self.next_action_count == 0:
|
||||
sys.exit()
|
||||
else:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Interrupt signal received. Stopping continuous command execution."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
self.next_action_count = 0
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
self.cycle_count += 1
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
[m.raw() for m in self.history],
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
# Discontinue if continuous limit is reached
|
||||
self.loop_count += 1
|
||||
if (
|
||||
self.cfg.continuous_mode
|
||||
and self.cfg.continuous_limit > 0
|
||||
and self.loop_count > self.cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{self.cfg.continuous_limit}"
|
||||
)
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and self.cycle_count > cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... ", plain_output=cfg.plain_output):
|
||||
assistant_reply = chat_with_ai(
|
||||
cfg,
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
cfg.fast_token_limit,
|
||||
cfg.fast_llm_model,
|
||||
)
|
||||
# break
|
||||
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(assistant_reply_json)
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
self.assistant_reply = chat_with_ai(
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
self.cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT)
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, cfg.speak_mode
|
||||
)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if cfg.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
self.assistant_reply_json = fix_json_using_multiple_techniques(self.assistant_reply)
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
self.assistant_reply_json = plugin.post_planning(self, self.assistant_reply_json)
|
||||
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
# Print Assistant thoughts
|
||||
if self.assistant_reply_json != {}:
|
||||
validate_json(self.assistant_reply_json, "llm_response_format_1")
|
||||
# Get command name and self.arguments
|
||||
try:
|
||||
print_assistant_thoughts(self.ai_name, self.assistant_reply_json)
|
||||
self.command_name, self.arguments = get_command(self.assistant_reply_json)
|
||||
if self.cfg.speak_mode:
|
||||
say_text(f"I want to execute {self.command_name}")
|
||||
self.arguments = self._resolve_pathlike_command_args(self.arguments)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_json,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
if not self.cfg.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
f"COMMAND = {self.command_name}"
|
||||
f"ARGUMENTS = {self.arguments}",
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"",
|
||||
"",
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous "
|
||||
"commands, 'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}...",
|
||||
)
|
||||
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
self.user_input = ""
|
||||
logger.info(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands, "
|
||||
"'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}..."
|
||||
def start_interaction_next(self, cookie, chatbot, history, msg, _input, obj):
|
||||
console_input = _input
|
||||
if console_input.lower().strip() == "y":
|
||||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
elif console_input.lower().strip() == "":
|
||||
print("Invalid input format.")
|
||||
return
|
||||
elif console_input.lower().startswith("y -"):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
)
|
||||
while True:
|
||||
if cfg.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().strip() == "s":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
|
||||
Fore.GREEN,
|
||||
"",
|
||||
)
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
Fore.YELLOW,
|
||||
"",
|
||||
)
|
||||
user_input = self_feedback_resp
|
||||
command_name = "self_feedback"
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
)
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
logger.warn(
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == cfg.exit_key:
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
USER_INPUT_FILE_NAME,
|
||||
)
|
||||
break
|
||||
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif user_input == "EXIT":
|
||||
logger.info("Exiting...")
|
||||
break
|
||||
else:
|
||||
# Print authorized commands left value
|
||||
logger.typewriter_log(
|
||||
f"{Fore.CYAN}AUTHORISED COMMANDS LEFT: {Style.RESET_ALL}{self.next_action_count}"
|
||||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print(
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = f"Could not execute command: {arguments}"
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
elif command_name == "self_feedback":
|
||||
result = f"Self feedback: {user_input}"
|
||||
else:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(
|
||||
command_name, arguments
|
||||
)
|
||||
command_result = execute_command(
|
||||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
config=cfg,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
return
|
||||
elif console_input.lower() == "n":
|
||||
self.user_input = "EXIT"
|
||||
return
|
||||
else:
|
||||
self.user_input = console_input
|
||||
self.command_name = "human_feedback"
|
||||
return
|
||||
|
||||
result_tlength = count_string_tokens(
|
||||
str(command_result), cfg.fast_llm_model
|
||||
)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.history.summary_message()), cfg.fast_llm_model
|
||||
)
|
||||
if result_tlength + memory_tlength + 600 > cfg.fast_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
if self.user_input == "GENERATE NEXT COMMAND JSON":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif self.user_input == "EXIT":
|
||||
print("Exiting...", flush=True)
|
||||
# break 这里需要注意
|
||||
else:
|
||||
# Print command
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{self.command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{self.arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
# Execute command
|
||||
if self.command_name is not None and self.command_name.lower().startswith("error"):
|
||||
result = (
|
||||
f"Command {self.command_name} threw the following error: {self.arguments}"
|
||||
)
|
||||
elif self.command_name == "human_feedback":
|
||||
result = f"Human feedback: {self.user_input}"
|
||||
else:
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
self.command_name, self.arguments = plugin.pre_command(
|
||||
self.command_name, self.arguments
|
||||
)
|
||||
command_result = execute_command(
|
||||
self.command_registry,
|
||||
self.command_name,
|
||||
self.arguments,
|
||||
self.config.prompt_generator,
|
||||
)
|
||||
result = f"Command {self.command_name} returned: " f"{command_result}"
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(self.command_name, result)
|
||||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
if self.command_name != "do_nothing":
|
||||
memory_to_add = (
|
||||
f"Assistant Reply: {self.assistant_reply} "
|
||||
f"\nResult: {result} "
|
||||
f"\nHuman Feedback: {self.user_input} "
|
||||
)
|
||||
|
||||
self.memory.add(memory_to_add)
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.history.add("system", result, "action_result")
|
||||
self.full_message_history.append(
|
||||
create_chat_message("system", result)
|
||||
)
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.history.add("system", "Unable to execute command", "action_result")
|
||||
self.full_message_history.append(
|
||||
create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
@ -318,45 +239,3 @@ class Agent:
|
||||
self.workspace.get_path(command_args[pathlike])
|
||||
)
|
||||
return command_args
|
||||
|
||||
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
|
||||
"""Generates a feedback response based on the provided thoughts dictionary.
|
||||
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
|
||||
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
|
||||
feedback message and uses the create_chat_completion() function to generate a
|
||||
response based on the input message.
|
||||
Args:
|
||||
thoughts (dict): A dictionary containing thought elements like reasoning,
|
||||
plan, thoughts, and criticism.
|
||||
Returns:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
|
||||
feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process."
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
plan = thoughts.get("plan", "")
|
||||
thought = thoughts.get("thoughts", "")
|
||||
feedback_thoughts = thought + reasoning + plan
|
||||
|
||||
prompt = ChatSequence.for_model(llm_model)
|
||||
prompt.add("user", feedback_prompt + feedback_thoughts)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
prompt.raw(),
|
||||
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
)
|
||||
|
||||
feedback = create_chat_completion(prompt)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
feedback,
|
||||
SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
)
|
||||
return feedback
|
||||
|
||||
@ -1,10 +1,11 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.base import ChatSequence
|
||||
from autogpt.llm.chat import Message, create_chat_completion
|
||||
from autogpt.singleton import Singleton
|
||||
from typing import List, Union
|
||||
|
||||
from autogpt.config.config import Config, Singleton
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.types.openai import Message
|
||||
|
||||
|
||||
class AgentManager(metaclass=Singleton):
|
||||
@ -12,55 +13,55 @@ class AgentManager(metaclass=Singleton):
|
||||
|
||||
def __init__(self):
|
||||
self.next_key = 0
|
||||
self.agents: dict[
|
||||
int, tuple[str, list[Message], str]
|
||||
] = {} # key, (task, full_message_history, model)
|
||||
self.agents = {} # key, (task, full_message_history, model)
|
||||
self.cfg = Config()
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(
|
||||
self, task: str, creation_prompt: str, model: str
|
||||
) -> tuple[int, str]:
|
||||
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
|
||||
"""Create a new agent and return its key
|
||||
|
||||
Args:
|
||||
task: The task to perform
|
||||
creation_prompt: Prompt passed to the LLM at creation
|
||||
model: The model to use to run this agent
|
||||
prompt: The prompt to use
|
||||
model: The model to use
|
||||
|
||||
Returns:
|
||||
The key of the new agent
|
||||
"""
|
||||
messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])
|
||||
|
||||
messages: List[Message] = [
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages.raw()):
|
||||
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
messages.extend(iter(plugin_messages))
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(prompt=messages)
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.add("assistant", agent_reply)
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = ""
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.add("assistant", plugins_reply)
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
key = self.next_key
|
||||
# This is done instead of len(agents) to make keys unique even if agents
|
||||
# are deleted
|
||||
self.next_key += 1
|
||||
|
||||
self.agents[key] = (task, list(messages), model)
|
||||
self.agents[key] = (task, messages, model)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
@ -82,30 +83,33 @@ class AgentManager(metaclass=Singleton):
|
||||
task, messages, model = self.agents[int(key)]
|
||||
|
||||
# Add user message to message history before sending to agent
|
||||
messages = ChatSequence.for_model(model, messages)
|
||||
messages.add("user", message)
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
|
||||
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
for plugin_message in plugin_messages:
|
||||
messages.append(plugin_message)
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(prompt=messages)
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.add("assistant", agent_reply)
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = agent_reply
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
# Update full message history
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.add("assistant", plugins_reply)
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
|
||||
158
autogpt/api_manager.py
Normal file
158
autogpt/api_manager.py
Normal file
@ -0,0 +1,158 @@
|
||||
from typing import List
|
||||
|
||||
import openai
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.modelsinfo import COSTS
|
||||
|
||||
cfg = Config()
|
||||
openai.api_key = cfg.openai_api_key
|
||||
print_total_cost = cfg.debug_mode
|
||||
|
||||
|
||||
class ApiManager:
|
||||
def __init__(self, debug=False):
|
||||
self.total_prompt_tokens = 0
|
||||
self.total_completion_tokens = 0
|
||||
self.total_cost = 0
|
||||
self.total_budget = 0
|
||||
self.debug = debug
|
||||
|
||||
def reset(self):
|
||||
self.total_prompt_tokens = 0
|
||||
self.total_completion_tokens = 0
|
||||
self.total_cost = 0
|
||||
self.total_budget = 0.0
|
||||
|
||||
def create_chat_completion(
|
||||
self,
|
||||
messages: list, # type: ignore
|
||||
model: str = None,
|
||||
temperature: float = cfg.temperature,
|
||||
max_tokens: int = None,
|
||||
deployment_id=None,
|
||||
) -> str:
|
||||
"""
|
||||
Create a chat completion and update the cost.
|
||||
Args:
|
||||
messages (list): The list of messages to send to the API.
|
||||
model (str): The model to use for the API call.
|
||||
temperature (float): The temperature to use for the API call.
|
||||
max_tokens (int): The maximum number of tokens for the API call.
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
if deployment_id is not None:
|
||||
response = openai.ChatCompletion.create(
|
||||
deployment_id=deployment_id,
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
else:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
if self.debug:
|
||||
logger.debug(f"Response: {response}")
|
||||
prompt_tokens = response.usage.prompt_tokens
|
||||
completion_tokens = response.usage.completion_tokens
|
||||
self.update_cost(prompt_tokens, completion_tokens, model)
|
||||
return response
|
||||
|
||||
def embedding_create(
|
||||
self,
|
||||
text_list: List[str],
|
||||
model: str = "text-embedding-ada-002",
|
||||
) -> List[float]:
|
||||
"""
|
||||
Create an embedding for the given input text using the specified model.
|
||||
|
||||
Args:
|
||||
text_list (List[str]): Input text for which the embedding is to be created.
|
||||
model (str, optional): The model to use for generating the embedding.
|
||||
|
||||
Returns:
|
||||
List[float]: The generated embedding as a list of float values.
|
||||
"""
|
||||
if cfg.use_azure:
|
||||
response = openai.Embedding.create(
|
||||
input=text_list,
|
||||
engine=cfg.get_azure_deployment_id_for_model(model),
|
||||
)
|
||||
else:
|
||||
response = openai.Embedding.create(input=text_list, model=model)
|
||||
|
||||
self.update_cost(response.usage.prompt_tokens, 0, model)
|
||||
return response["data"][0]["embedding"]
|
||||
|
||||
def update_cost(self, prompt_tokens, completion_tokens, model):
|
||||
"""
|
||||
Update the total cost, prompt tokens, and completion tokens.
|
||||
|
||||
Args:
|
||||
prompt_tokens (int): The number of tokens used in the prompt.
|
||||
completion_tokens (int): The number of tokens used in the completion.
|
||||
model (str): The model used for the API call.
|
||||
"""
|
||||
self.total_prompt_tokens += prompt_tokens
|
||||
self.total_completion_tokens += completion_tokens
|
||||
self.total_cost += (
|
||||
prompt_tokens * COSTS[model]["prompt"]
|
||||
+ completion_tokens * COSTS[model]["completion"]
|
||||
) / 1000
|
||||
if print_total_cost:
|
||||
print(f"Total running cost: ${self.total_cost:.3f}")
|
||||
|
||||
def set_total_budget(self, total_budget):
|
||||
"""
|
||||
Sets the total user-defined budget for API calls.
|
||||
|
||||
Args:
|
||||
prompt_tokens (int): The number of tokens used in the prompt.
|
||||
"""
|
||||
self.total_budget = total_budget
|
||||
|
||||
def get_total_prompt_tokens(self):
|
||||
"""
|
||||
Get the total number of prompt tokens.
|
||||
|
||||
Returns:
|
||||
int: The total number of prompt tokens.
|
||||
"""
|
||||
return self.total_prompt_tokens
|
||||
|
||||
def get_total_completion_tokens(self):
|
||||
"""
|
||||
Get the total number of completion tokens.
|
||||
|
||||
Returns:
|
||||
int: The total number of completion tokens.
|
||||
"""
|
||||
return self.total_completion_tokens
|
||||
|
||||
def get_total_cost(self):
|
||||
"""
|
||||
Get the total cost of API calls.
|
||||
|
||||
Returns:
|
||||
float: The total cost of API calls.
|
||||
"""
|
||||
return self.total_cost
|
||||
|
||||
def get_total_budget(self):
|
||||
"""
|
||||
Get the total user-defined budget for API calls.
|
||||
|
||||
Returns:
|
||||
float: The total budget for API calls.
|
||||
"""
|
||||
return self.total_budget
|
||||
|
||||
|
||||
api_manager = ApiManager(cfg.debug_mode)
|
||||
@ -1,15 +1,18 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import Dict, List, Union
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
AGENT_MANAGER = AgentManager()
|
||||
|
||||
|
||||
def is_valid_int(value: str) -> bool:
|
||||
@ -89,7 +92,6 @@ def execute_command(
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
config: Config,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
@ -105,25 +107,33 @@ def execute_command(
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments, config=config)
|
||||
return cmd(**arguments)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return get_memory(CFG).add(arguments["string"])
|
||||
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
elif command_name == "do_nothing":
|
||||
return "No action performed."
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
@ -131,9 +141,8 @@ def execute_command(
|
||||
@command(
|
||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||
"""Get the text summary of a webpage
|
||||
def get_text_summary(url: str, question: str) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
@ -143,15 +152,13 @@ def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url)
|
||||
summary, _ = summarize_text(text, question=question)
|
||||
|
||||
summary = summarize_text(url, text, question)
|
||||
return f""" "Result" : {summary}"""
|
||||
|
||||
|
||||
@command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||
"""Get all hyperlinks on a webpage
|
||||
@command("get_hyperlinks", "Get text summary", '"url": "<url>"')
|
||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
@ -159,7 +166,13 @@ def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||
Returns:
|
||||
str or list: The hyperlinks on the page
|
||||
"""
|
||||
return scrape_links(url, config)
|
||||
return scrape_links(url)
|
||||
|
||||
|
||||
def shutdown() -> NoReturn:
|
||||
"""Shut down the program"""
|
||||
print("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
@command(
|
||||
@ -167,7 +180,7 @@ def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -> str:
|
||||
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
Args:
|
||||
@ -179,8 +192,6 @@ def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -
|
||||
Returns:
|
||||
str: The response of the agent
|
||||
"""
|
||||
agent_manager = AgentManager()
|
||||
|
||||
# Remove underscores from name
|
||||
voice_name = name.replace("_", " ")
|
||||
|
||||
@ -188,48 +199,48 @@ def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -
|
||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if config.speak_mode:
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = agent_manager.create_agent(task, first_message, model)
|
||||
key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
|
||||
|
||||
if config.speak_mode:
|
||||
if CFG.speak_mode:
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
agent_response = agent_manager.message_agent(key, prompt)
|
||||
agent_response = AGENT_MANAGER.message_agent(key, prompt)
|
||||
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str, config: Config) -> str:
|
||||
def message_agent(key: str, message: str) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
agent_response = AgentManager().message_agent(int(key), message)
|
||||
agent_response = AGENT_MANAGER.message_agent(int(key), message)
|
||||
else:
|
||||
return "Invalid key, must be an integer."
|
||||
|
||||
# Speak response
|
||||
if config.speak_mode:
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
@command("list_agents", "List GPT Agents", "() -> str")
|
||||
def list_agents(config: Config) -> str:
|
||||
@command("list_agents", "List GPT Agents", "")
|
||||
def list_agents() -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
str: A list of all agents
|
||||
"""
|
||||
return "List of agents:\n" + "\n".join(
|
||||
[str(x[0]) + ": " + x[1] for x in AgentManager().list_agents()]
|
||||
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
|
||||
)
|
||||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str, config: Config) -> str:
|
||||
def delete_agent(key: str) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
Args:
|
||||
@ -238,5 +249,5 @@ def delete_agent(key: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: A message indicating whether the agent was deleted or not
|
||||
"""
|
||||
result = AgentManager().delete_agent(key)
|
||||
result = AGENT_MANAGER.delete_agent(key)
|
||||
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
|
||||
|
||||
1
autogpt/auto-gpt.json
Normal file
1
autogpt/auto-gpt.json
Normal file
@ -0,0 +1 @@
|
||||
{}
|
||||
1
autogpt/auto_gpt_workspace/127.0.0.1/auto-gpt.json
Normal file
1
autogpt/auto_gpt_workspace/127.0.0.1/auto-gpt.json
Normal file
@ -0,0 +1 @@
|
||||
{}
|
||||
1
autogpt/auto_gpt_workspace/127.0.0.1/file_logger.txt
Normal file
1
autogpt/auto_gpt_workspace/127.0.0.1/file_logger.txt
Normal file
@ -0,0 +1 @@
|
||||
File Operation Logger
|
||||
218
autogpt/chat.py
Normal file
218
autogpt/chat.py
Normal file
@ -0,0 +1,218 @@
|
||||
import time
|
||||
|
||||
from openai.error import RateLimitError
|
||||
|
||||
from autogpt import token_counter
|
||||
from autogpt.api_manager import api_manager
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.types.openai import Message
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def create_chat_message(role, content) -> Message:
|
||||
"""
|
||||
Create a chat message with the given role and content.
|
||||
|
||||
Args:
|
||||
role (str): The role of the message sender, e.g., "system", "user", or "assistant".
|
||||
content (str): The content of the message.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the role and content of the message.
|
||||
"""
|
||||
return {"role": role, "content": content}
|
||||
|
||||
|
||||
def generate_context(prompt, relevant_memory, full_message_history, model):
|
||||
current_context = [
|
||||
create_chat_message("system", prompt),
|
||||
create_chat_message(
|
||||
"system", f"The current time and date is {time.strftime('%c')}"
|
||||
),
|
||||
create_chat_message(
|
||||
"system",
|
||||
f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
||||
),
|
||||
]
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(full_message_history) - 1
|
||||
insertion_index = len(current_context)
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = token_counter.count_message_tokens(current_context, model)
|
||||
return (
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
)
|
||||
|
||||
|
||||
# TODO: Change debug from hardcode to argument
|
||||
def chat_with_ai(
|
||||
agent, prompt, user_input, full_message_history, permanent_memory, token_limit
|
||||
):
|
||||
"""Interact with the OpenAI API, sending the prompt, user input, message history,
|
||||
and permanent memory."""
|
||||
while True:
|
||||
try:
|
||||
"""
|
||||
Interact with the OpenAI API, sending the prompt, user input,
|
||||
message history, and permanent memory.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt explaining the rules to the AI.
|
||||
user_input (str): The input from the user.
|
||||
full_message_history (list): The list of all messages sent between the
|
||||
user and the AI.
|
||||
permanent_memory (Obj): The memory object containing the permanent
|
||||
memory.
|
||||
token_limit (int): The maximum number of tokens allowed in the API call.
|
||||
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = (
|
||||
""
|
||||
if len(full_message_history) == 0
|
||||
else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
|
||||
)
|
||||
|
||||
logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
|
||||
|
||||
(
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
) = generate_context(prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
while current_tokens_used > 2500:
|
||||
# remove memories until we are under 2500 tokens
|
||||
relevant_memory = relevant_memory[:-1]
|
||||
(
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
) = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model
|
||||
)
|
||||
|
||||
current_tokens_used += token_counter.count_message_tokens(
|
||||
[create_chat_message("user", user_input)], model
|
||||
) # Account for user input (appended later)
|
||||
|
||||
while next_message_to_add_index >= 0:
|
||||
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
|
||||
message_to_add = full_message_history[next_message_to_add_index]
|
||||
|
||||
tokens_to_add = token_counter.count_message_tokens(
|
||||
[message_to_add], model
|
||||
)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
break
|
||||
|
||||
# Add the most recent message to the start of the current context,
|
||||
# after the two system prompts.
|
||||
current_context.insert(
|
||||
insertion_index, full_message_history[next_message_to_add_index]
|
||||
)
|
||||
|
||||
# Count the currently used tokens
|
||||
current_tokens_used += tokens_to_add
|
||||
|
||||
# Move to the next most recent message in the full message history
|
||||
next_message_to_add_index -= 1
|
||||
|
||||
# inform the AI about its remaining budget (if it has one)
|
||||
if api_manager.get_total_budget() > 0.0:
|
||||
remaining_budget = (
|
||||
api_manager.get_total_budget() - api_manager.get_total_cost()
|
||||
)
|
||||
if remaining_budget < 0:
|
||||
remaining_budget = 0
|
||||
system_message = (
|
||||
f"Your remaining API budget is ${remaining_budget:.3f}"
|
||||
+ (
|
||||
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
|
||||
if remaining_budget == 0
|
||||
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
|
||||
if remaining_budget < 0.005
|
||||
else " Budget nearly exceeded. Finish up.\n\n"
|
||||
if remaining_budget < 0.01
|
||||
else "\n\n"
|
||||
)
|
||||
)
|
||||
logger.debug(system_message)
|
||||
current_context.append(create_chat_message("system", system_message))
|
||||
|
||||
# Append user input, the length of this is accounted for above
|
||||
current_context.extend([create_chat_message("user", user_input)])
|
||||
|
||||
plugin_count = len(cfg.plugins)
|
||||
for i, plugin in enumerate(cfg.plugins):
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
agent.prompt_generator, current_context
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
tokens_to_add = token_counter.count_message_tokens(
|
||||
[create_chat_message("system", plugin_response)], model
|
||||
)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
if cfg.debug_mode:
|
||||
print("Plugin response too long, skipping:", plugin_response)
|
||||
print("Plugins remaining at stop:", plugin_count - i)
|
||||
break
|
||||
current_context.append(create_chat_message("system", plugin_response))
|
||||
|
||||
# Calculate remaining tokens
|
||||
tokens_remaining = token_limit - current_tokens_used
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative.
|
||||
# This should never happen, please submit a bug report at
|
||||
# https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
logger.debug(f"Send Token Count: {current_tokens_used}")
|
||||
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
|
||||
logger.debug("------------ CONTEXT SENT TO AI ---------------")
|
||||
for message in current_context:
|
||||
# Skip printing the prompt
|
||||
if message["role"] == "system" and message["content"] == prompt:
|
||||
continue
|
||||
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
|
||||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
|
||||
# TODO: use a model defined elsewhere, so that model can contain
|
||||
# temperature and other settings we care about
|
||||
assistant_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=current_context,
|
||||
max_tokens=tokens_remaining,
|
||||
)
|
||||
|
||||
# Update full message history
|
||||
full_message_history.append(create_chat_message("user", user_input))
|
||||
full_message_history.append(
|
||||
create_chat_message("assistant", assistant_reply)
|
||||
)
|
||||
|
||||
return assistant_reply
|
||||
except RateLimitError:
|
||||
# TODO: When we switch to langchain, this is built in
|
||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
324
autogpt/cli.py
324
autogpt/cli.py
@ -1,116 +1,230 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import click
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option("--debug", is_flag=True, help="Enable Debug Mode")
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
"memory_type",
|
||||
type=str,
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.option(
|
||||
# TODO: this is a hidden option for now, necessary for integration testing.
|
||||
# We should make this public once we're ready to roll out agent specific workspaces.
|
||||
"--workspace-directory",
|
||||
"-w",
|
||||
type=click.Path(),
|
||||
hidden=True,
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
) -> None:
|
||||
import gradio
|
||||
from colorama import Fore
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import construct_main_ai_config
|
||||
from autogpt.utils import get_current_git_branch, get_latest_bulletin
|
||||
from autogpt.workspace import Workspace
|
||||
import func_box
|
||||
from toolbox import update_ui
|
||||
from toolbox import ChatBotWithCookies
|
||||
def handle_config(kwargs_settings):
|
||||
kwargs_settings = {
|
||||
'continuous': False, # Enable Continuous Mode
|
||||
'continuous_limit': None, # Defines the number of times to run in continuous mode
|
||||
'ai_settings': None, # Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.
|
||||
'skip_reprompt': False, # Skips the re-prompting messages at the beginning of the scrip
|
||||
'speak': False, # Enable speak Mode
|
||||
'debug': False, # Enable Debug Mode
|
||||
'gpt3only': False, # Enable GPT3.5 Only Mode
|
||||
'gpt4only': False, # Enable GPT4 Only Mode
|
||||
'memory_type': None, # Defines which Memory backend to use
|
||||
'browser_name': None, # Specifies which web-browser to use when using selenium to scrape the web.
|
||||
'allow_downloads': False, # Dangerous: Allows Auto-GPT to download files natively.
|
||||
'skip_news': True, # Specifies whether to suppress the output of latest news on startup.
|
||||
'workspace_directory': None # TODO: this is a hidden option for now, necessary for integration testing. We should make this public once we're ready to roll out agent specific workspaces.
|
||||
}
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.main import run_auto_gpt
|
||||
if kwargs_settings['workspace_directory']:
|
||||
kwargs_settings['ai_settings'] = os.path.join(kwargs_settings['workspace_directory'], 'ai_settings.yaml')
|
||||
# if ctx.invoked_subcommand is None:
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
create_config(
|
||||
kwargs_settings['continuous'],
|
||||
kwargs_settings['continuous_limit'],
|
||||
kwargs_settings['ai_settings'],
|
||||
kwargs_settings['skip_reprompt'],
|
||||
kwargs_settings['speak'],
|
||||
kwargs_settings['debug'],
|
||||
kwargs_settings['gpt3only'],
|
||||
kwargs_settings['gpt4only'],
|
||||
kwargs_settings['memory_type'],
|
||||
kwargs_settings['browser_name'],
|
||||
kwargs_settings['allow_downloads'],
|
||||
kwargs_settings['skip_news'],
|
||||
)
|
||||
return cfg
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
run_auto_gpt(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
workspace_directory,
|
||||
install_plugin_deps,
|
||||
|
||||
def handle_news():
|
||||
motd = get_latest_bulletin()
|
||||
if motd:
|
||||
logger.typewriter_log("NEWS: ", Fore.GREEN, motd)
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
f"You are running on `{git_branch}` branch "
|
||||
"- this is not a supported branch.",
|
||||
)
|
||||
if sys.version_info < (3, 10):
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of Auto-GPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
|
||||
def handle_registry():
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.analyze_code")
|
||||
command_registry.import_commands("autogpt.commands.audio_text")
|
||||
command_registry.import_commands("autogpt.commands.execute_code")
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.commands.git_operations")
|
||||
command_registry.import_commands("autogpt.commands.google_search")
|
||||
command_registry.import_commands("autogpt.commands.image_gen")
|
||||
command_registry.import_commands("autogpt.commands.improve_code")
|
||||
command_registry.import_commands("autogpt.commands.twitter")
|
||||
command_registry.import_commands("autogpt.commands.web_selenium")
|
||||
command_registry.import_commands("autogpt.commands.write_tests")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
return command_registry
|
||||
|
||||
|
||||
def handle_workspace(user):
|
||||
# TODO: have this directory live outside the repository (e.g. in a user's
|
||||
# home directory) and have it come in as a command line argument or part of
|
||||
# the env file.
|
||||
if user is None:
|
||||
workspace_directory = Path(__file__).parent / "auto_gpt_workspace"
|
||||
else:
|
||||
workspace_directory = Path(__file__).parent / "auto_gpt_workspace" / user
|
||||
# TODO: pass in the ai_settings file and the env file and have them cloned into
|
||||
# the workspace directory so we can bind them to the agent.
|
||||
workspace_directory = Workspace.make_workspace(workspace_directory)
|
||||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
file_logger_path = workspace_directory / "file_logger.txt"
|
||||
if not file_logger_path.exists():
|
||||
with file_logger_path.open(mode="w", encoding="utf-8") as f:
|
||||
f.write("File Operation Logger ")
|
||||
|
||||
return workspace_directory, file_logger_path
|
||||
|
||||
|
||||
def update_obj(plugin_kwargs, _is=True):
|
||||
obj = plugin_kwargs['obj']
|
||||
start = plugin_kwargs['start']
|
||||
next_ = plugin_kwargs['next']
|
||||
text = plugin_kwargs['txt']
|
||||
if _is:
|
||||
start.update(visible=True)
|
||||
next_.update(visible=False)
|
||||
text.update(visible=False)
|
||||
else:
|
||||
start.update(visible=False)
|
||||
next_.update(visible=True)
|
||||
text.update(visible=True)
|
||||
return obj, start, next_, text
|
||||
|
||||
|
||||
def agent_main(name, role, goals, budget,
|
||||
cookies, chatbot, history, obj,
|
||||
ipaddr: gradio.Request):
|
||||
# ai setup
|
||||
input_kwargs = {
|
||||
'name': name,
|
||||
'role': role,
|
||||
'goals': goals,
|
||||
'budget': budget
|
||||
}
|
||||
# chat setup
|
||||
logger.output_content = []
|
||||
chatbot_with_cookie = ChatBotWithCookies(cookies)
|
||||
chatbot_with_cookie.write_list(chatbot)
|
||||
history = []
|
||||
cfg = handle_config(None)
|
||||
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
|
||||
workspace_directory = ipaddr.client.host
|
||||
if not cfg.skip_news:
|
||||
handle_news()
|
||||
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
|
||||
command_registry = handle_registry()
|
||||
ai_config = construct_main_ai_config(input_kwargs)
|
||||
def update_stream_ui(user='', gpt='', msg='Done',
|
||||
_start=obj['start'].update(), _next=obj['next'].update(), _text=obj['text'].update()):
|
||||
if user or gpt:
|
||||
temp = [user, gpt]
|
||||
if not chatbot_with_cookie:
|
||||
chatbot_with_cookie.append(temp)
|
||||
else:
|
||||
chatbot_with_cookie[-1] = [chatbot_with_cookie[-1][i] + temp[i] for i in range(len(chatbot_with_cookie[-1]))]
|
||||
yield chatbot_with_cookie.get_cookies(), chatbot_with_cookie, history, msg, obj, _start, _next, _text
|
||||
if not ai_config:
|
||||
msg = '### ROLE 不能为空'
|
||||
# yield chatbot_with_cookie.get_cookies(), chatbot_with_cookie, history, msg, obj, None, None, None
|
||||
yield from update_stream_ui(msg=msg)
|
||||
return
|
||||
ai_config.command_registry = command_registry
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
workspace_directory, file_logger_path = handle_workspace(workspace_directory)
|
||||
cfg.workspace_path = str(workspace_directory)
|
||||
cfg.file_logger_path = str(file_logger_path)
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
if cfg.debug_mode:
|
||||
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
|
||||
agent = Agent(
|
||||
ai_name=input_kwargs['name'],
|
||||
memory=memory,
|
||||
full_message_history=history,
|
||||
next_action_count=next_action_count,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
workspace_directory=workspace_directory,
|
||||
)
|
||||
obj['obj'] = agent
|
||||
_start = obj['start'].update(visible=False)
|
||||
_next = obj['next'].update(visible=True)
|
||||
_text = obj['text'].update(visible=True, interactive=True)
|
||||
# chat, his = func_box.chat_history(logger.output_content)
|
||||
# yield from update_stream_ui(user='Auto-GPT Start!', gpt=chat, _start=_start, _next=_next, _text=_text)
|
||||
agent.start_interaction_loop()
|
||||
chat, his = func_box.chat_history(logger.output_content)
|
||||
yield from update_stream_ui(user='Auto-GPT Start!', gpt=chat, _start=_start, _next=_next, _text=_text)
|
||||
|
||||
|
||||
|
||||
|
||||
def agent_start(cookie, chatbot, history, msg, obj):
|
||||
yield from obj['obj'].start_interaction_loop(cookie, chatbot, history, msg, obj)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
pass
|
||||
|
||||
|
||||
213
autogpt/cli_private.py
Normal file
213
autogpt/cli_private.py
Normal file
@ -0,0 +1,213 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import click
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option("--debug", is_flag=True, help="Enable Debug Mode")
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
"memory_type",
|
||||
type=str,
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.option(
|
||||
# TODO: this is a hidden option for now, necessary for integration testing.
|
||||
# We should make this public once we're ready to roll out agent specific workspaces.
|
||||
"--workspace-directory",
|
||||
"-w",
|
||||
type=click.Path(),
|
||||
hidden=True,
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import construct_main_ai_config
|
||||
from autogpt.utils import get_current_git_branch, get_latest_bulletin
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
create_config(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
)
|
||||
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
|
||||
if not cfg.skip_news:
|
||||
motd = get_latest_bulletin()
|
||||
if motd:
|
||||
logger.typewriter_log("NEWS: ", Fore.GREEN, motd)
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
f"You are running on `{git_branch}` branch "
|
||||
"- this is not a supported branch.",
|
||||
)
|
||||
if sys.version_info < (3, 10):
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of Auto-GPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.analyze_code")
|
||||
command_registry.import_commands("autogpt.commands.audio_text")
|
||||
command_registry.import_commands("autogpt.commands.execute_code")
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.commands.git_operations")
|
||||
command_registry.import_commands("autogpt.commands.google_search")
|
||||
command_registry.import_commands("autogpt.commands.image_gen")
|
||||
command_registry.import_commands("autogpt.commands.improve_code")
|
||||
command_registry.import_commands("autogpt.commands.twitter")
|
||||
command_registry.import_commands("autogpt.commands.web_selenium")
|
||||
command_registry.import_commands("autogpt.commands.write_tests")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
ai_name = ""
|
||||
ai_config = construct_main_ai_config()
|
||||
ai_config.command_registry = command_registry
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
if cfg.debug_mode:
|
||||
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
|
||||
|
||||
# TODO: have this directory live outside the repository (e.g. in a user's
|
||||
# home directory) and have it come in as a command line argument or part of
|
||||
# the env file.
|
||||
if workspace_directory is None:
|
||||
workspace_directory = Path(__file__).parent / "auto_gpt_workspace"
|
||||
else:
|
||||
workspace_directory = Path(workspace_directory)
|
||||
# TODO: pass in the ai_settings file and the env file and have them cloned into
|
||||
# the workspace directory so we can bind them to the agent.
|
||||
workspace_directory = Workspace.make_workspace(workspace_directory)
|
||||
cfg.workspace_path = str(workspace_directory)
|
||||
|
||||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
file_logger_path = workspace_directory / "file_logger.txt"
|
||||
if not file_logger_path.exists():
|
||||
with file_logger_path.open(mode="w", encoding="utf-8") as f:
|
||||
f.write("File Operation Logger ")
|
||||
|
||||
cfg.file_logger_path = str(file_logger_path)
|
||||
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
full_message_history=full_message_history,
|
||||
next_action_count=next_action_count,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
workspace_directory=workspace_directory,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,13 +1,8 @@
|
||||
"""Code evaluation module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
@ -15,7 +10,7 @@ if TYPE_CHECKING:
|
||||
"Analyze Code",
|
||||
'"code": "<full_code_string>"',
|
||||
)
|
||||
def analyze_code(code: str, config: Config) -> list[str]:
|
||||
def analyze_code(code: str) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
@ -33,4 +28,4 @@ def analyze_code(code: str, config: Config) -> list[str]:
|
||||
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
|
||||
@ -1,25 +1,22 @@
|
||||
"""Commands for converting audio to text."""
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"read_audio_from_file",
|
||||
"Convert Audio to text",
|
||||
'"filename": "<filename>"',
|
||||
lambda config: config.huggingface_audio_to_text_model
|
||||
and config.huggingface_api_token,
|
||||
"Configure huggingface_audio_to_text_model and Hugging Face api token.",
|
||||
CFG.huggingface_audio_to_text_model,
|
||||
"Configure huggingface_audio_to_text_model.",
|
||||
)
|
||||
def read_audio_from_file(filename: str, config: Config) -> str:
|
||||
def read_audio_from_file(filename: str) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
@ -31,10 +28,10 @@ def read_audio_from_file(filename: str, config: Config) -> str:
|
||||
"""
|
||||
with open(filename, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio, config)
|
||||
return read_audio(audio)
|
||||
|
||||
|
||||
def read_audio(audio: bytes, config: Config) -> str:
|
||||
def read_audio(audio: bytes) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
@ -44,9 +41,9 @@ def read_audio(audio: bytes, config: Config) -> str:
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
model = config.huggingface_audio_to_text_model
|
||||
model = CFG.huggingface_audio_to_text_model
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
api_token = config.huggingface_api_token
|
||||
api_token = CFG.huggingface_api_token
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
if api_token is None:
|
||||
|
||||
@ -3,9 +3,6 @@ import importlib
|
||||
import inspect
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
@ -25,23 +22,19 @@ class Command:
|
||||
description: str,
|
||||
method: Callable[..., Any],
|
||||
signature: str = "",
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
enabled: bool = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.method = method
|
||||
self.signature = signature
|
||||
self.signature = signature if signature else str(inspect.signature(self.method))
|
||||
self.enabled = enabled
|
||||
self.disabled_reason = disabled_reason
|
||||
|
||||
def __call__(self, *args, **kwargs) -> Any:
|
||||
if hasattr(kwargs, "config") and callable(self.enabled):
|
||||
self.enabled = self.enabled(kwargs["config"])
|
||||
if not self.enabled:
|
||||
if self.disabled_reason:
|
||||
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||
return f"Command '{self.name}' is disabled"
|
||||
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||
return self.method(*args, **kwargs)
|
||||
|
||||
def __str__(self) -> str:
|
||||
@ -66,10 +59,6 @@ class CommandRegistry:
|
||||
return importlib.reload(module)
|
||||
|
||||
def register(self, cmd: Command) -> None:
|
||||
if cmd.name in self.commands:
|
||||
logger.warn(
|
||||
f"Command '{cmd.name}' already registered and will be overwritten!"
|
||||
)
|
||||
self.commands[cmd.name] = cmd
|
||||
|
||||
def unregister(self, command_name: str):
|
||||
@ -138,22 +127,12 @@ class CommandRegistry:
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
signature: str,
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
# TODO: Remove this in favor of better command management
|
||||
CFG = Config()
|
||||
|
||||
if callable(enabled):
|
||||
enabled = enabled(CFG)
|
||||
if not enabled:
|
||||
if disabled_reason is not None:
|
||||
logger.debug(f"Command '{name}' is disabled: {disabled_reason}")
|
||||
return lambda func: func
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
cmd = Command(
|
||||
name=name,
|
||||
|
||||
@ -1,18 +1,18 @@
|
||||
"""Execute code in a Docker container"""
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import docker
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||
def execute_python_file(filename: str, config: Config) -> str:
|
||||
def execute_python_file(filename: str) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
@ -21,7 +21,7 @@ def execute_python_file(filename: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The output of the file
|
||||
"""
|
||||
logger.info(f"Executing file '{filename}'")
|
||||
print(f"Executing file '{filename}'")
|
||||
|
||||
if not filename.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
@ -31,7 +31,7 @@ def execute_python_file(filename: str, config: Config) -> str:
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
result = subprocess.run(
|
||||
["python", filename], capture_output=True, encoding="utf8"
|
||||
f"python {filename}", capture_output=True, encoding="utf8", shell=True
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
@ -40,17 +40,16 @@ def execute_python_file(filename: str, config: Config) -> str:
|
||||
|
||||
try:
|
||||
client = docker.from_env()
|
||||
|
||||
# You can replace this with the desired Python image/version
|
||||
# You can find available Python images on Docker Hub:
|
||||
# https://hub.docker.com/_/python
|
||||
image_name = "python:3-alpine"
|
||||
try:
|
||||
client.images.get(image_name)
|
||||
logger.warn(f"Image '{image_name}' found locally")
|
||||
print(f"Image '{image_name}' found locally")
|
||||
except ImageNotFound:
|
||||
logger.info(
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
||||
)
|
||||
print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
|
||||
# Use the low-level API to stream the pull response
|
||||
low_level_client = docker.APIClient()
|
||||
for line in low_level_client.pull(image_name, stream=True, decode=True):
|
||||
@ -58,14 +57,15 @@ def execute_python_file(filename: str, config: Config) -> str:
|
||||
status = line.get("status")
|
||||
progress = line.get("progress")
|
||||
if status and progress:
|
||||
logger.info(f"{status}: {progress}")
|
||||
print(f"{status}: {progress}")
|
||||
elif status:
|
||||
logger.info(status)
|
||||
print(status)
|
||||
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
["python", str(Path(filename).relative_to(config.workspace_path))],
|
||||
f"python {filename}",
|
||||
volumes={
|
||||
config.workspace_path: {
|
||||
CFG.workspace_path: {
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
}
|
||||
@ -86,7 +86,7 @@ def execute_python_file(filename: str, config: Config) -> str:
|
||||
return logs
|
||||
|
||||
except docker.errors.DockerException as e:
|
||||
logger.warn(
|
||||
print(
|
||||
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
|
||||
)
|
||||
return f"Error: {str(e)}"
|
||||
@ -95,42 +95,16 @@ def execute_python_file(filename: str, config: Config) -> str:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def validate_command(command: str, config: Config) -> bool:
|
||||
"""Validate a command to ensure it is allowed
|
||||
|
||||
Args:
|
||||
command (str): The command to validate
|
||||
|
||||
Returns:
|
||||
bool: True if the command is allowed, False otherwise
|
||||
"""
|
||||
tokens = command.split()
|
||||
|
||||
if not tokens:
|
||||
return False
|
||||
|
||||
if config.deny_commands and tokens[0] not in config.deny_commands:
|
||||
return False
|
||||
|
||||
for keyword in config.allow_commands:
|
||||
if keyword in tokens:
|
||||
return True
|
||||
if config.allow_commands:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@command(
|
||||
"execute_shell",
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
lambda cfg: cfg.execute_local_commands,
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell(command_line: str, config: Config) -> str:
|
||||
def execute_shell(command_line: str) -> str:
|
||||
"""Execute a shell command and return the output
|
||||
|
||||
Args:
|
||||
@ -139,18 +113,19 @@ def execute_shell(command_line: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The output of the command
|
||||
"""
|
||||
if not validate_command(command_line, config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = Path.cwd()
|
||||
if not CFG.execute_local_commands:
|
||||
return (
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction."
|
||||
)
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if not current_dir.is_relative_to(config.workspace_path):
|
||||
os.chdir(config.workspace_path)
|
||||
if CFG.workspace_path not in current_dir:
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
)
|
||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
||||
|
||||
result = subprocess.run(command_line, capture_output=True, shell=True)
|
||||
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
|
||||
@ -158,19 +133,18 @@ def execute_shell(command_line: str, config: Config) -> str:
|
||||
# Change back to whatever the prior working dir was
|
||||
|
||||
os.chdir(current_dir)
|
||||
return output
|
||||
|
||||
|
||||
@command(
|
||||
"execute_shell_popen",
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
lambda config: config.execute_local_commands,
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell_popen(command_line, config: Config) -> str:
|
||||
def execute_shell_popen(command_line) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
@ -180,18 +154,12 @@ def execute_shell_popen(command_line, config: Config) -> str:
|
||||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
if not validate_command(command_line, config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if config.workspace_path not in current_dir:
|
||||
os.chdir(config.workspace_path)
|
||||
if CFG.workspace_path not in current_dir:
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
)
|
||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
||||
|
||||
do_not_show_output = subprocess.DEVNULL
|
||||
process = subprocess.Popen(
|
||||
|
||||
@ -1,121 +1,46 @@
|
||||
"""File operations for AutoGPT"""
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import os.path
|
||||
from typing import TYPE_CHECKING, Generator, Literal
|
||||
from typing import Generator
|
||||
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.commands.file_operations_utils import read_textual_file
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||
from autogpt.config import Config
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
CFG = Config()
|
||||
|
||||
|
||||
Operation = Literal["write", "append", "delete"]
|
||||
|
||||
|
||||
def text_checksum(text: str) -> str:
|
||||
"""Get the hex checksum for the given text."""
|
||||
return hashlib.md5(text.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def operations_from_log(
|
||||
log_path: str,
|
||||
) -> Generator[tuple[Operation, str, str | None], None, None]:
|
||||
"""Parse the file operations log and return a tuple containing the log entries"""
|
||||
try:
|
||||
log = open(log_path, "r", encoding="utf-8")
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
for line in log:
|
||||
line = line.replace("File Operation Logger", "").strip()
|
||||
if not line:
|
||||
continue
|
||||
operation, tail = line.split(": ", maxsplit=1)
|
||||
operation = operation.strip()
|
||||
if operation in ("write", "append"):
|
||||
try:
|
||||
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
|
||||
except ValueError:
|
||||
logger.warn(f"File log entry lacks checksum: '{line}'")
|
||||
path, checksum = tail.strip(), None
|
||||
yield (operation, path, checksum)
|
||||
elif operation == "delete":
|
||||
yield (operation, tail.strip(), None)
|
||||
|
||||
log.close()
|
||||
|
||||
|
||||
def file_operations_state(log_path: str) -> dict[str, str]:
|
||||
"""Iterates over the operations log and returns the expected state.
|
||||
|
||||
Parses a log file at config.file_logger_path to construct a dictionary that maps
|
||||
each file path written or appended to its checksum. Deleted files are removed
|
||||
from the dictionary.
|
||||
|
||||
Returns:
|
||||
A dictionary mapping file paths to their checksums.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If config.file_logger_path is not found.
|
||||
ValueError: If the log file content is not in the expected format.
|
||||
"""
|
||||
state = {}
|
||||
for operation, path, checksum in operations_from_log(log_path):
|
||||
if operation in ("write", "append"):
|
||||
state[path] = checksum
|
||||
elif operation == "delete":
|
||||
del state[path]
|
||||
return state
|
||||
|
||||
|
||||
def is_duplicate_operation(
|
||||
operation: Operation, filename: str, config: Config, checksum: str | None = None
|
||||
) -> bool:
|
||||
"""Check if the operation has already been performed
|
||||
def check_duplicate_operation(operation: str, filename: str) -> bool:
|
||||
"""Check if the operation has already been performed on the given file
|
||||
|
||||
Args:
|
||||
operation: The operation to check for
|
||||
filename: The name of the file to check for
|
||||
checksum: The checksum of the contents to be written
|
||||
operation (str): The operation to check for
|
||||
filename (str): The name of the file to check for
|
||||
|
||||
Returns:
|
||||
True if the operation has already been performed on the file
|
||||
bool: True if the operation has already been performed on the file
|
||||
"""
|
||||
state = file_operations_state(config.file_logger_path)
|
||||
if operation == "delete" and filename not in state:
|
||||
return True
|
||||
if operation == "write" and state.get(filename) == checksum:
|
||||
return True
|
||||
return False
|
||||
log_content = read_file(CFG.file_logger_path)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
return log_entry in log_content
|
||||
|
||||
|
||||
def log_operation(
|
||||
operation: str, filename: str, config: Config, checksum: str | None = None
|
||||
) -> None:
|
||||
def log_operation(operation: str, filename: str) -> None:
|
||||
"""Log the file operation to the file_logger.txt
|
||||
|
||||
Args:
|
||||
operation: The operation to log
|
||||
filename: The name of the file the operation was performed on
|
||||
checksum: The checksum of the contents to be written
|
||||
operation (str): The operation to log
|
||||
filename (str): The name of the file the operation was performed on
|
||||
"""
|
||||
log_entry = f"{operation}: {filename}"
|
||||
if checksum is not None:
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(config.file_logger_path, f"{log_entry}\n", config, should_log=False)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
append_to_file(CFG.file_logger_path, log_entry, should_log=False)
|
||||
|
||||
|
||||
def split_file(
|
||||
@ -138,7 +63,7 @@ def split_file(
|
||||
while start < content_length:
|
||||
end = start + max_length
|
||||
if end + overlap < content_length:
|
||||
chunk = content[start : end + max(overlap - 1, 0)]
|
||||
chunk = content[start : end + overlap - 1]
|
||||
else:
|
||||
chunk = content[start:content_length]
|
||||
|
||||
@ -150,8 +75,8 @@ def split_file(
|
||||
start += max_length - overlap
|
||||
|
||||
|
||||
@command("read_file", "Read a file", '"filename": "<filename>"')
|
||||
def read_file(filename: str, config: Config) -> str:
|
||||
@command("read_file", "Read file", '"filename": "<filename>"')
|
||||
def read_file(filename: str) -> str:
|
||||
"""Read a file and return the contents
|
||||
|
||||
Args:
|
||||
@ -161,46 +86,49 @@ def read_file(filename: str, config: Config) -> str:
|
||||
str: The contents of the file
|
||||
"""
|
||||
try:
|
||||
content = read_textual_file(filename, logger)
|
||||
|
||||
# TODO: invalidate/update memory when file is edited
|
||||
file_memory = MemoryItem.from_text_file(content, filename)
|
||||
if len(file_memory.chunks) > 1:
|
||||
return file_memory.summary
|
||||
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def ingest_file(
|
||||
filename: str,
|
||||
memory: VectorMemory,
|
||||
filename: str, memory, max_length: int = 4000, overlap: int = 200
|
||||
) -> None:
|
||||
"""
|
||||
Ingest a file by reading its content, splitting it into chunks with a specified
|
||||
maximum length and overlap, and adding the chunks to the memory storage.
|
||||
|
||||
Args:
|
||||
filename: The name of the file to ingest
|
||||
memory: An object with an add() method to store the chunks in memory
|
||||
:param filename: The name of the file to ingest
|
||||
:param memory: An object with an add() method to store the chunks in memory
|
||||
:param max_length: The maximum length of each chunk, default is 4000
|
||||
:param overlap: The number of overlapping characters between chunks, default is 200
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Ingesting file {filename}")
|
||||
print(f"Working with file {filename}")
|
||||
content = read_file(filename)
|
||||
content_length = len(content)
|
||||
print(f"File length: {content_length} characters")
|
||||
|
||||
# TODO: differentiate between different types of files
|
||||
file_memory = MemoryItem.from_text_file(content, filename)
|
||||
logger.debug(f"Created memory: {file_memory.dump()}")
|
||||
memory.add(file_memory)
|
||||
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
||||
|
||||
logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}")
|
||||
except Exception as err:
|
||||
logger.warn(f"Error while ingesting file '{filename}': {err}")
|
||||
num_chunks = len(chunks)
|
||||
for i, chunk in enumerate(chunks):
|
||||
print(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
memory_to_add = (
|
||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
)
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
print(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
except Exception as e:
|
||||
print(f"Error while ingesting file '{filename}': {str(e)}")
|
||||
|
||||
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
def write_to_file(filename: str, text: str, config: Config) -> str:
|
||||
def write_to_file(filename: str, text: str) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
Args:
|
||||
@ -210,26 +138,24 @@ def write_to_file(filename: str, text: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
checksum = text_checksum(text)
|
||||
if is_duplicate_operation("write", filename, config, checksum):
|
||||
if check_duplicate_operation("write", filename):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename, config, checksum)
|
||||
log_operation("write", filename)
|
||||
return "File written to successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||
)
|
||||
def append_to_file(
|
||||
filename: str, text: str, config: Config, should_log: bool = True
|
||||
) -> str:
|
||||
def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
Args:
|
||||
@ -241,23 +167,19 @@ def append_to_file(
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "a", encoding="utf-8") as f:
|
||||
with open(filename, "a") as f:
|
||||
f.write(text)
|
||||
|
||||
if should_log:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, config, checksum=checksum)
|
||||
log_operation("append", filename)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
def delete_file(filename: str, config: Config) -> str:
|
||||
def delete_file(filename: str) -> str:
|
||||
"""Delete a file
|
||||
|
||||
Args:
|
||||
@ -266,19 +188,19 @@ def delete_file(filename: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if is_duplicate_operation("delete", filename, config):
|
||||
if check_duplicate_operation("delete", filename):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename, config)
|
||||
log_operation("delete", filename)
|
||||
return "File deleted successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
|
||||
def list_files(directory: str, config: Config) -> list[str]:
|
||||
"""lists files in a directory recursively
|
||||
@command("search_files", "Search Files", '"directory": "<directory>"')
|
||||
def search_files(directory: str) -> list[str]:
|
||||
"""Search for files in a directory
|
||||
|
||||
Args:
|
||||
directory (str): The directory to search in
|
||||
@ -293,7 +215,7 @@ def list_files(directory: str, config: Config) -> list[str]:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), config.workspace_path
|
||||
os.path.join(root, file), CFG.workspace_path
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
@ -304,20 +226,18 @@ def list_files(directory: str, config: Config) -> list[str]:
|
||||
"download_file",
|
||||
"Download File",
|
||||
'"url": "<url>", "filename": "<filename>"',
|
||||
lambda config: config.allow_downloads,
|
||||
CFG.allow_downloads,
|
||||
"Error: You do not have user authorization to download files locally.",
|
||||
)
|
||||
def download_file(url, filename, config: Config):
|
||||
def download_file(url, filename):
|
||||
"""Downloads a file
|
||||
Args:
|
||||
url (str): URL of the file to download
|
||||
filename (str): Filename to save the file as
|
||||
"""
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message, plain_output=config.plain_output) as spinner:
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.MAGENTA}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message) as spinner:
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
@ -341,8 +261,8 @@ def download_file(url, filename, config: Config):
|
||||
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
||||
spinner.update_message(f"{message} {progress}")
|
||||
|
||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
|
||||
except requests.HTTPError as err:
|
||||
return f"Got an HTTP Error whilst trying to download file: {err}"
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
|
||||
except requests.HTTPError as e:
|
||||
return f"Got an HTTP Error whilst trying to download file: {e}"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
@ -1,40 +1,33 @@
|
||||
"""Git operations for autogpt"""
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"clone_repository",
|
||||
"Clone Repository",
|
||||
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
lambda config: config.github_username and config.github_api_key,
|
||||
'"repository_url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
CFG.github_username and CFG.github_api_key,
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@validate_url
|
||||
def clone_repository(url: str, clone_path: str, config: Config) -> str:
|
||||
def clone_repository(repository_url: str, clone_path: str) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
url (str): The URL of the repository to clone.
|
||||
repository_url (str): The URL of the repository to clone.
|
||||
clone_path (str): The path to clone the repository to.
|
||||
|
||||
Returns:
|
||||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = f"//{config.github_username}:{config.github_api_key}@".join(
|
||||
split_url
|
||||
)
|
||||
split_url = repository_url.split("//")
|
||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
||||
try:
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
Repo.clone_from(auth_repo_url, clone_path)
|
||||
return f"""Cloned {repository_url} to {clone_path}"""
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
@ -2,24 +2,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from itertools import islice
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from duckduckgo_search import DDGS
|
||||
from duckduckgo_search import ddg
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"google",
|
||||
"Google Search",
|
||||
'"query": "<query>"',
|
||||
lambda config: not config.google_api_key,
|
||||
)
|
||||
def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
||||
@command("google", "Google Search", '"query": "<query>"', not CFG.google_api_key)
|
||||
def google_search(query: str, num_results: int = 8) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
@ -33,12 +26,12 @@ def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
|
||||
results = DDGS().text(query)
|
||||
results = ddg(query, max_results=num_results)
|
||||
if not results:
|
||||
return json.dumps(search_results)
|
||||
|
||||
for item in islice(results, num_results):
|
||||
search_results.append(item)
|
||||
for j in results:
|
||||
search_results.append(j)
|
||||
|
||||
results = json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
return safe_google_results(results)
|
||||
@ -48,12 +41,10 @@ def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
||||
"google",
|
||||
"Google Search",
|
||||
'"query": "<query>"',
|
||||
lambda config: bool(config.google_api_key) and bool(config.custom_search_engine_id),
|
||||
"Configure google_api_key and custom_search_engine_id.",
|
||||
bool(CFG.google_api_key),
|
||||
"Configure google_api_key.",
|
||||
)
|
||||
def google_official_search(
|
||||
query: str, config: Config, num_results: int = 8
|
||||
) -> str | list[str]:
|
||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
Args:
|
||||
@ -69,8 +60,8 @@ def google_official_search(
|
||||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
api_key = config.google_api_key
|
||||
custom_search_engine_id = config.custom_search_engine_id
|
||||
api_key = CFG.google_api_key
|
||||
custom_search_engine_id = CFG.custom_search_engine_id
|
||||
|
||||
# Initialize the Custom Search API service
|
||||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
@ -119,14 +110,8 @@ def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
if isinstance(results, list):
|
||||
safe_message = json.dumps(
|
||||
[result.encode("utf-8", "ignore").decode("utf-8") for result in results]
|
||||
[result.encode("utf-8", "ignore") for result in results]
|
||||
)
|
||||
else:
|
||||
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
|
||||
return safe_message
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(google_search('你是谁?'))
|
||||
results = ddg('你是谁', max_results=8)
|
||||
print(results)
|
||||
@ -1,10 +1,7 @@
|
||||
""" Image Generation Module for AutoGPT."""
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import openai
|
||||
import requests
|
||||
@ -12,20 +9,12 @@ from PIL import Image
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"generate_image",
|
||||
"Generate Image",
|
||||
'"prompt": "<prompt>"',
|
||||
lambda config: config.image_provider,
|
||||
"Requires a image provider to be set.",
|
||||
)
|
||||
def generate_image(prompt: str, config: Config, size: int = 256) -> str:
|
||||
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
|
||||
def generate_image(prompt: str, size: int = 256) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
||||
Args:
|
||||
@ -35,21 +24,21 @@ def generate_image(prompt: str, config: Config, size: int = 256) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = f"{config.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, config)
|
||||
if CFG.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size)
|
||||
# HuggingFace
|
||||
elif config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, config)
|
||||
elif CFG.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename)
|
||||
# SD WebUI
|
||||
elif config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, config, size)
|
||||
elif CFG.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, size)
|
||||
return "No Image Provider Set"
|
||||
|
||||
|
||||
def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
||||
def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
"""Generate an image with HuggingFace's API.
|
||||
|
||||
Args:
|
||||
@ -60,58 +49,34 @@ def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
||||
str: The filename of the image
|
||||
"""
|
||||
API_URL = (
|
||||
f"https://api-inference.huggingface.co/models/{config.huggingface_image_model}"
|
||||
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
|
||||
)
|
||||
if config.huggingface_api_token is None:
|
||||
if CFG.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {config.huggingface_api_token}",
|
||||
"Authorization": f"Bearer {CFG.huggingface_api_token}",
|
||||
"X-Use-Cache": "false",
|
||||
}
|
||||
|
||||
retry_count = 0
|
||||
while retry_count < 10:
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
|
||||
if response.ok:
|
||||
try:
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
image.save(filename)
|
||||
return f"Saved to disk:{filename}"
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
error = json.loads(response.text)
|
||||
if "estimated_time" in error:
|
||||
delay = error["estimated_time"]
|
||||
logger.debug(response.text)
|
||||
logger.info("Retrying in", delay)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
retry_count += 1
|
||||
image.save(filename)
|
||||
|
||||
return f"Error creating image."
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
|
||||
def generate_image_with_dalle(
|
||||
prompt: str, filename: str, size: int, config: Config
|
||||
) -> str:
|
||||
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
Args:
|
||||
@ -122,11 +87,12 @@ def generate_image_with_dalle(
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
openai.api_key = CFG.openai_api_key
|
||||
|
||||
# Check for supported image sizes
|
||||
if size not in [256, 512, 1024]:
|
||||
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
|
||||
logger.info(
|
||||
print(
|
||||
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
|
||||
)
|
||||
size = closest
|
||||
@ -136,10 +102,9 @@ def generate_image_with_dalle(
|
||||
n=1,
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=config.openai_api_key,
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
image_data = b64decode(response["data"][0]["b64_json"])
|
||||
|
||||
@ -152,7 +117,6 @@ def generate_image_with_dalle(
|
||||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
config: Config,
|
||||
size: int = 512,
|
||||
negative_prompt: str = "",
|
||||
extra: dict = {},
|
||||
@ -169,13 +133,13 @@ def generate_image_with_sd_webui(
|
||||
"""
|
||||
# Create a session and set the basic auth if needed
|
||||
s = requests.Session()
|
||||
if config.sd_webui_auth:
|
||||
username, password = config.sd_webui_auth.split(":")
|
||||
if CFG.sd_webui_auth:
|
||||
username, password = CFG.sd_webui_auth.split(":")
|
||||
s.auth = (username, password or "")
|
||||
|
||||
# Generate the images
|
||||
response = requests.post(
|
||||
f"{config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
@ -189,7 +153,7 @@ def generate_image_with_sd_webui(
|
||||
},
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
# Save the image to disk
|
||||
response = response.json()
|
||||
|
||||
@ -1,13 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
@ -15,7 +11,7 @@ if TYPE_CHECKING:
|
||||
"Get Improved Code",
|
||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
)
|
||||
def improve_code(suggestions: list[str], code: str, config: Config) -> str:
|
||||
def improve_code(suggestions: list[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
@ -36,4 +32,4 @@ def improve_code(suggestions: list[str], code: str, config: Config) -> str:
|
||||
" provided, making no other changes."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
|
||||
44
autogpt/commands/twitter.py
Normal file
44
autogpt/commands/twitter.py
Normal file
@ -0,0 +1,44 @@
|
||||
"""A module that contains a command to send a tweet."""
|
||||
import os
|
||||
|
||||
import tweepy
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from autogpt.commands.command import command
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
@command(
|
||||
"send_tweet",
|
||||
"Send Tweet",
|
||||
'"tweet_text": "<tweet_text>"',
|
||||
)
|
||||
def send_tweet(tweet_text: str) -> str:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Args:
|
||||
tweet_text (str): Text to be tweeted.
|
||||
|
||||
Returns:
|
||||
A result from sending the tweet.
|
||||
"""
|
||||
consumer_key = os.environ.get("TW_CONSUMER_KEY")
|
||||
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
|
||||
access_token = os.environ.get("TW_ACCESS_TOKEN")
|
||||
access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
|
||||
# Authenticate to Twitter
|
||||
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
|
||||
auth.set_access_token(access_token, access_token_secret)
|
||||
|
||||
# Create API object
|
||||
api = tweepy.API(auth)
|
||||
|
||||
# Send tweet
|
||||
try:
|
||||
api.update_status(tweet_text)
|
||||
return "Tweet sent successfully!"
|
||||
except tweepy.TweepyException as e:
|
||||
return f"Error sending tweet: {e.reason}"
|
||||
@ -1,12 +1,10 @@
|
||||
"""Web scraping commands using Playwright"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
logger.info(
|
||||
print(
|
||||
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
@ -1,20 +1,89 @@
|
||||
"""Browse a webpage and summarize it using the LLM model"""
|
||||
from __future__ import annotations
|
||||
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from requests import Response
|
||||
from requests.compat import urljoin
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({"User-Agent": CFG.user_agent})
|
||||
|
||||
|
||||
def is_valid_url(url: str) -> bool:
|
||||
"""Check if the URL is valid
|
||||
|
||||
Args:
|
||||
url (str): The URL to check
|
||||
|
||||
Returns:
|
||||
bool: True if the URL is valid, False otherwise
|
||||
"""
|
||||
try:
|
||||
result = urlparse(url)
|
||||
return all([result.scheme, result.netloc])
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def sanitize_url(url: str) -> str:
|
||||
"""Sanitize the URL
|
||||
|
||||
Args:
|
||||
url (str): The URL to sanitize
|
||||
|
||||
Returns:
|
||||
str: The sanitized URL
|
||||
"""
|
||||
return urljoin(url, urlparse(url).path)
|
||||
|
||||
|
||||
def check_local_file_access(url: str) -> bool:
|
||||
"""Check if the URL is a local file
|
||||
|
||||
Args:
|
||||
url (str): The URL to check
|
||||
|
||||
Returns:
|
||||
bool: True if the URL is a local file, False otherwise
|
||||
"""
|
||||
local_prefixes = [
|
||||
"file:///",
|
||||
"file://localhost/",
|
||||
"file://localhost",
|
||||
"http://localhost",
|
||||
"http://localhost/",
|
||||
"https://localhost",
|
||||
"https://localhost/",
|
||||
"http://2130706433",
|
||||
"http://2130706433/",
|
||||
"https://2130706433",
|
||||
"https://2130706433/",
|
||||
"http://127.0.0.1/",
|
||||
"http://127.0.0.1",
|
||||
"https://127.0.0.1/",
|
||||
"https://127.0.0.1",
|
||||
"https://0.0.0.0/",
|
||||
"https://0.0.0.0",
|
||||
"http://0.0.0.0/",
|
||||
"http://0.0.0.0",
|
||||
"http://0000",
|
||||
"http://0000/",
|
||||
"https://0000",
|
||||
"https://0000/",
|
||||
]
|
||||
return any(url.startswith(prefix) for prefix in local_prefixes)
|
||||
|
||||
|
||||
@validate_url
|
||||
def get_response(
|
||||
url: str, config: Config, timeout: int = 10
|
||||
url: str, timeout: int = 10
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
"""Get the response from a URL
|
||||
|
||||
@ -30,8 +99,17 @@ def get_response(
|
||||
requests.exceptions.RequestException: If the HTTP request fails
|
||||
"""
|
||||
try:
|
||||
session.headers.update({"User-Agent": config.user_agent})
|
||||
response = session.get(url, timeout=timeout)
|
||||
# Restrict access to local files
|
||||
if check_local_file_access(url):
|
||||
raise ValueError("Access to local files is restricted")
|
||||
|
||||
# Most basic check if the URL is valid:
|
||||
if not url.startswith("http://") and not url.startswith("https://"):
|
||||
raise ValueError("Invalid URL format")
|
||||
|
||||
sanitized_url = sanitize_url(url)
|
||||
|
||||
response = session.get(sanitized_url, timeout=timeout)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
if response.status_code >= 400:
|
||||
@ -48,7 +126,7 @@ def get_response(
|
||||
return None, f"Error: {str(re)}"
|
||||
|
||||
|
||||
def scrape_text(url: str, config: Config) -> str:
|
||||
def scrape_text(url: str) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
@ -57,7 +135,7 @@ def scrape_text(url: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
response, error_message = get_response(url, config)
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
@ -76,7 +154,7 @@ def scrape_text(url: str, config: Config) -> str:
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str, config: Config) -> str | list[str]:
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
@ -85,7 +163,7 @@ def scrape_links(url: str, config: Config) -> str | list[str]:
|
||||
Returns:
|
||||
str | list[str]: The scraped links
|
||||
"""
|
||||
response, error_message = get_response(url, config)
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
@ -98,3 +176,13 @@ def scrape_links(url: str, config: Config) -> str | list[str]:
|
||||
hyperlinks = extract_hyperlinks(soup, url)
|
||||
|
||||
return format_hyperlinks(hyperlinks)
|
||||
|
||||
|
||||
def create_message(chunk, question):
|
||||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the'
|
||||
" text, summarize the text.",
|
||||
}
|
||||
|
||||
@ -4,41 +4,26 @@ from __future__ import annotations
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import TYPE_CHECKING, Optional, Type
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.chrome.service import Service as ChromeDriverService
|
||||
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.edge.options import Options as EdgeOptions
|
||||
from selenium.webdriver.edge.service import Service as EdgeDriverService
|
||||
from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.firefox.service import Service as GeckoDriverService
|
||||
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, get_memory
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
@ -46,8 +31,7 @@ FILE_DIR = Path(__file__).parent.parent
|
||||
"Browse Website",
|
||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str, config: Config) -> str:
|
||||
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
@ -57,26 +41,19 @@ def browse_website(url: str, question: str, config: Config) -> str:
|
||||
Returns:
|
||||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||
"""
|
||||
try:
|
||||
driver, text = scrape_text_with_selenium(url, config)
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
msg = e.msg.split("\n")[0]
|
||||
return f"Error: {msg}"
|
||||
|
||||
driver, text = scrape_text_with_selenium(url)
|
||||
add_header(driver)
|
||||
summary = summarize_memorize_webpage(url, text, question, config, driver)
|
||||
summary_text = summary.summarize_text(url, text, question, driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
# Limit links to 5
|
||||
if len(links) > 5:
|
||||
links = links[:5]
|
||||
close_browser(driver)
|
||||
return f"Answer gathered from website: {summary}\n\nLinks: {links}"
|
||||
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]:
|
||||
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
"""Scrape text from a website using selenium
|
||||
|
||||
Args:
|
||||
@ -87,50 +64,37 @@ def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]
|
||||
"""
|
||||
logging.getLogger("selenium").setLevel(logging.CRITICAL)
|
||||
|
||||
options_available: dict[str, Type[BrowserOptions]] = {
|
||||
options_available = {
|
||||
"chrome": ChromeOptions,
|
||||
"edge": EdgeOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
"safari": SafariOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
}
|
||||
|
||||
options: BrowserOptions = options_available[config.selenium_web_browser]()
|
||||
options = options_available[CFG.selenium_web_browser]()
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
|
||||
if config.selenium_web_browser == "firefox":
|
||||
if config.selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = FirefoxDriver(
|
||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||
if CFG.selenium_web_browser == "firefox":
|
||||
driver = webdriver.Firefox(
|
||||
executable_path=GeckoDriverManager().install(), options=options
|
||||
)
|
||||
elif config.selenium_web_browser == "edge":
|
||||
driver = EdgeDriver(
|
||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||
)
|
||||
elif config.selenium_web_browser == "safari":
|
||||
elif CFG.selenium_web_browser == "safari":
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = SafariDriver(options=options)
|
||||
driver = webdriver.Safari(options=options)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if config.selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
if CFG.selenium_headless:
|
||||
options.add_argument("--headless")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
chromium_driver_path = Path("/usr/bin/chromedriver")
|
||||
|
||||
driver = ChromeDriver(
|
||||
service=ChromeDriverService(str(chromium_driver_path))
|
||||
if chromium_driver_path.exists()
|
||||
else ChromeDriverService(ChromeDriverManager().install()),
|
||||
options=options,
|
||||
driver = webdriver.Chrome(
|
||||
executable_path=ChromeDriverManager().install(), options=options
|
||||
)
|
||||
driver.get(url)
|
||||
|
||||
@ -193,40 +157,4 @@ def add_header(driver: WebDriver) -> None:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
try:
|
||||
with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file:
|
||||
overlay_script = overlay_file.read()
|
||||
driver.execute_script(overlay_script)
|
||||
except Exception as e:
|
||||
print(f"Error executing overlay.js: {e}")
|
||||
|
||||
|
||||
def summarize_memorize_webpage(
|
||||
url: str,
|
||||
text: str,
|
||||
question: str,
|
||||
config: Config,
|
||||
driver: Optional[WebDriver] = None,
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
url (str): The url of the text
|
||||
text (str): The text to summarize
|
||||
question (str): The question to ask the model
|
||||
driver (WebDriver): The webdriver to use to scroll the page
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
text_length = len(text)
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
memory = get_memory(config)
|
||||
|
||||
new_memory = MemoryItem.from_webpage(text, url, question=question)
|
||||
memory.add(new_memory)
|
||||
return new_memory.summary
|
||||
driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())
|
||||
|
||||
@ -2,13 +2,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
@ -16,7 +12,7 @@ if TYPE_CHECKING:
|
||||
"Write Tests",
|
||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
)
|
||||
def write_tests(code: str, focus: list[str], config: Config) -> str:
|
||||
def write_tests(code: str, focus: list[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
@ -38,4 +34,4 @@ def write_tests(code: str, focus: list[str], config: Config) -> str:
|
||||
" specific areas if required."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
|
||||
@ -3,9 +3,12 @@ This module contains the configuration classes for AutoGPT.
|
||||
"""
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
from autogpt.config.singleton import AbstractSingleton, Singleton
|
||||
|
||||
__all__ = [
|
||||
"check_openai_api_key",
|
||||
"AbstractSingleton",
|
||||
"AIConfig",
|
||||
"Config",
|
||||
"Singleton",
|
||||
]
|
||||
|
||||
@ -7,14 +7,12 @@ from __future__ import annotations
|
||||
import os
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import Optional, Type
|
||||
|
||||
import distro
|
||||
import yaml
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
|
||||
@ -55,8 +53,8 @@ class AIConfig:
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
self.api_budget = api_budget
|
||||
self.prompt_generator: PromptGenerator | None = None
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
self.prompt_generator = None
|
||||
self.command_registry = None
|
||||
|
||||
@staticmethod
|
||||
def load(config_file: str = SAVE_FILE) -> "AIConfig":
|
||||
@ -75,18 +73,13 @@ class AIConfig:
|
||||
|
||||
try:
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
||||
ai_name = config_params.get("ai_name", "")
|
||||
ai_role = config_params.get("ai_role", "")
|
||||
ai_goals = [
|
||||
str(goal).strip("{}").replace("'", "").replace('"', "")
|
||||
if isinstance(goal, dict)
|
||||
else str(goal)
|
||||
for goal in config_params.get("ai_goals", [])
|
||||
]
|
||||
ai_goals = config_params.get("ai_goals", [])
|
||||
api_budget = config_params.get("api_budget", 0.0)
|
||||
# type: Type[AIConfig]
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
@ -6,8 +6,11 @@ import openai
|
||||
import yaml
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from autogpt.singleton import Singleton
|
||||
from autogpt.config.singleton import Singleton
|
||||
|
||||
load_dotenv(verbose=True, override=True)
|
||||
|
||||
|
||||
class Config(metaclass=Singleton):
|
||||
@ -17,8 +20,8 @@ class Config(metaclass=Singleton):
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the Config class"""
|
||||
self.workspace_path: str = None
|
||||
self.file_logger_path: str = None
|
||||
self.workspace_path = None
|
||||
self.file_logger_path = None
|
||||
|
||||
self.debug_mode = False
|
||||
self.continuous_mode = False
|
||||
@ -28,37 +31,12 @@ class Config(metaclass=Singleton):
|
||||
self.allow_downloads = False
|
||||
self.skip_news = False
|
||||
|
||||
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
|
||||
self.exit_key = os.getenv("EXIT_KEY", "n")
|
||||
self.plain_output = os.getenv("PLAIN_OUTPUT", "False") == "True"
|
||||
|
||||
disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
if disabled_command_categories:
|
||||
self.disabled_command_categories = disabled_command_categories.split(",")
|
||||
else:
|
||||
self.disabled_command_categories = []
|
||||
|
||||
deny_commands = os.getenv("DENY_COMMANDS")
|
||||
if deny_commands:
|
||||
self.deny_commands = deny_commands.split(",")
|
||||
else:
|
||||
self.deny_commands = []
|
||||
|
||||
allow_commands = os.getenv("ALLOW_COMMANDS")
|
||||
if allow_commands:
|
||||
self.allow_commands = allow_commands.split(",")
|
||||
else:
|
||||
self.allow_commands = []
|
||||
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.prompt_settings_file = os.getenv(
|
||||
"PROMPT_SETTINGS_FILE", "prompt_settings.yaml"
|
||||
)
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002")
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
)
|
||||
@ -86,8 +64,6 @@ class Config(metaclass=Singleton):
|
||||
self.use_mac_os_tts = False
|
||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||
|
||||
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
|
||||
|
||||
self.use_brian_tts = False
|
||||
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
|
||||
|
||||
@ -97,6 +73,28 @@ class Config(metaclass=Singleton):
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
|
||||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.weaviate_host = os.getenv("WEAVIATE_HOST")
|
||||
self.weaviate_port = os.getenv("WEAVIATE_PORT")
|
||||
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
|
||||
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
|
||||
self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
|
||||
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
|
||||
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
|
||||
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
|
||||
self.use_weaviate_embedded = (
|
||||
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
)
|
||||
|
||||
# milvus or zilliz cloud configuration.
|
||||
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
|
||||
self.milvus_username = os.getenv("MILVUS_USERNAME")
|
||||
self.milvus_password = os.getenv("MILVUS_PASSWORD")
|
||||
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
|
||||
self.milvus_secure = os.getenv("MILVUS_SECURE") == "True"
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
@ -122,13 +120,16 @@ class Config(metaclass=Singleton):
|
||||
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
)
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "json_file")
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt-memory")
|
||||
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = int(os.getenv("REDIS_PORT", "6379"))
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
|
||||
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
|
||||
# Initialize the OpenAI API client
|
||||
openai.api_key = self.openai_api_key
|
||||
|
||||
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
|
||||
self.plugins: List[AutoGPTPluginTemplate] = []
|
||||
@ -139,12 +140,7 @@ class Config(metaclass=Singleton):
|
||||
self.plugins_allowlist = plugins_allowlist.split(",")
|
||||
else:
|
||||
self.plugins_allowlist = []
|
||||
|
||||
plugins_denylist = os.getenv("DENYLISTED_PLUGINS")
|
||||
if plugins_denylist:
|
||||
self.plugins_denylist = plugins_denylist.split(",")
|
||||
else:
|
||||
self.plugins_denylist = []
|
||||
self.plugins_denylist = []
|
||||
|
||||
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
||||
"""
|
||||
@ -185,7 +181,7 @@ class Config(metaclass=Singleton):
|
||||
None
|
||||
"""
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
self.openai_api_type = config_params.get("azure_api_type") or "azure"
|
||||
self.openai_api_base = config_params.get("azure_api_base") or ""
|
||||
self.openai_api_version = (
|
||||
@ -221,9 +217,9 @@ class Config(metaclass=Singleton):
|
||||
"""Set the smart token limit value."""
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_embedding_model(self, value: str) -> None:
|
||||
"""Set the model to use for creating embeddings."""
|
||||
self.embedding_model = value
|
||||
def set_browse_chunk_max_length(self, value: int) -> None:
|
||||
"""Set the browse_website command chunk max length value."""
|
||||
self.browse_chunk_max_length = value
|
||||
|
||||
def set_openai_api_key(self, value: str) -> None:
|
||||
"""Set the OpenAI API key value."""
|
||||
@ -249,6 +245,14 @@ class Config(metaclass=Singleton):
|
||||
"""Set the custom search engine id value."""
|
||||
self.custom_search_engine_id = value
|
||||
|
||||
def set_pinecone_api_key(self, value: str) -> None:
|
||||
"""Set the Pinecone API key value."""
|
||||
self.pinecone_api_key = value
|
||||
|
||||
def set_pinecone_region(self, value: str) -> None:
|
||||
"""Set the Pinecone region value."""
|
||||
self.pinecone_region = value
|
||||
|
||||
def set_debug_mode(self, value: bool) -> None:
|
||||
"""Set the debug mode value."""
|
||||
self.debug_mode = value
|
||||
@ -261,9 +265,9 @@ class Config(metaclass=Singleton):
|
||||
"""Set the temperature value."""
|
||||
self.temperature = value
|
||||
|
||||
def set_memory_backend(self, name: str) -> None:
|
||||
"""Set the memory backend name."""
|
||||
self.memory_backend = name
|
||||
def set_memory_backend(self, value: int) -> None:
|
||||
"""Set the temperature value."""
|
||||
self.memory_backend = value
|
||||
|
||||
|
||||
def check_openai_api_key() -> None:
|
||||
@ -273,7 +277,6 @@ def check_openai_api_key() -> None:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Please set your OpenAI API key in .env or as an environment variable."
|
||||
+ Fore.RESET
|
||||
)
|
||||
print("You can get your key from https://platform.openai.com/account/api-keys")
|
||||
exit(1)
|
||||
|
||||
24
autogpt/config/singleton.py
Normal file
24
autogpt/config/singleton.py
Normal file
@ -0,0 +1,24 @@
|
||||
"""The singleton metaclass for ensuring only one instance of a class."""
|
||||
import abc
|
||||
|
||||
|
||||
class Singleton(abc.ABCMeta, type):
|
||||
"""
|
||||
Singleton metaclass for ensuring only one instance of a class.
|
||||
"""
|
||||
|
||||
_instances = {}
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
"""Call method for the singleton metaclass."""
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
class AbstractSingleton(abc.ABC, metaclass=Singleton):
|
||||
"""
|
||||
Abstract singleton class for ensuring only one instance of a class.
|
||||
"""
|
||||
|
||||
pass
|
||||
@ -1,29 +1,19 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.llm.utils import check_model
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
from autogpt.memory import get_supported_memory_backends
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
GPT_4_MODEL = "gpt-4"
|
||||
GPT_3_MODEL = "gpt-3.5-turbo"
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def create_config(
|
||||
config: Config,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
prompt_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@ -40,7 +30,6 @@ def create_config(
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (str): The path to the prompt_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
@ -51,13 +40,13 @@ def create_config(
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
config.set_debug_mode(False)
|
||||
config.set_continuous_mode(False)
|
||||
config.set_speak_mode(False)
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.set_debug_mode(True)
|
||||
CFG.set_debug_mode(True)
|
||||
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
@ -68,13 +57,13 @@ def create_config(
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
config.set_continuous_mode(True)
|
||||
CFG.set_continuous_mode(True)
|
||||
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
config.set_continuous_limit(continuous_limit)
|
||||
CFG.set_continuous_limit(continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
@ -82,28 +71,15 @@ def create_config(
|
||||
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.set_speak_mode(True)
|
||||
CFG.set_speak_mode(True)
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM_MODEL config
|
||||
config.set_fast_llm_model(GPT_3_MODEL)
|
||||
config.set_smart_llm_model(GPT_3_MODEL)
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
|
||||
elif (
|
||||
gpt4only
|
||||
and check_model(GPT_4_MODEL, model_type="smart_llm_model") == GPT_4_MODEL
|
||||
):
|
||||
if gpt4only:
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM_MODEL config
|
||||
config.set_fast_llm_model(GPT_4_MODEL)
|
||||
config.set_smart_llm_model(GPT_4_MODEL)
|
||||
else:
|
||||
config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model"))
|
||||
config.set_smart_llm_model(
|
||||
check_model(config.smart_llm_model, "smart_llm_model")
|
||||
)
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
@ -114,13 +90,13 @@ def create_config(
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
|
||||
else:
|
||||
config.memory_backend = chosen
|
||||
CFG.memory_backend = chosen
|
||||
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
config.skip_reprompt = True
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
@ -133,24 +109,11 @@ def create_config(
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||
config.ai_settings_file = file
|
||||
config.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
||||
config.prompt_settings_file = file
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
CFG.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
@ -165,7 +128,7 @@ def create_config(
|
||||
Fore.YELLOW,
|
||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||
)
|
||||
config.allow_downloads = True
|
||||
CFG.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
CFG.skip_news = True
|
||||
|
||||
@ -9,7 +9,6 @@ from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.utilities import extract_char_position
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@ -34,7 +33,8 @@ def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error - fix invalid escape", e)
|
||||
if CFG.debug_mode:
|
||||
print("json loads error - fix invalid escape", e)
|
||||
error_message = str(e)
|
||||
return json_to_load
|
||||
|
||||
@ -98,11 +98,13 @@ def correct_json(json_to_load: str) -> str:
|
||||
"""
|
||||
|
||||
try:
|
||||
logger.debug("json", json_to_load)
|
||||
if CFG.debug_mode:
|
||||
print("json", json_to_load)
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error", e)
|
||||
if CFG.debug_mode:
|
||||
print("json loads error", e)
|
||||
error_message = str(e)
|
||||
if error_message.startswith("Invalid \\escape"):
|
||||
json_to_load = fix_invalid_escape(json_to_load, error_message)
|
||||
@ -114,7 +116,8 @@ def correct_json(json_to_load: str) -> str:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error - add quotes", e)
|
||||
if CFG.debug_mode:
|
||||
print("json loads error - add quotes", e)
|
||||
error_message = str(e)
|
||||
if balanced_str := balance_braces(json_to_load):
|
||||
return balanced_str
|
||||
|
||||
@ -11,7 +11,7 @@ from regex import regex
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.json_fix_general import correct_json
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
from autogpt.logs import logger
|
||||
from autogpt.speech import say_text
|
||||
|
||||
@ -91,33 +91,14 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
|
||||
Returns:
|
||||
str: The fixed JSON string.
|
||||
"""
|
||||
assistant_reply = assistant_reply.strip()
|
||||
if assistant_reply.startswith("```json"):
|
||||
assistant_reply = assistant_reply[7:]
|
||||
if assistant_reply.endswith("```"):
|
||||
assistant_reply = assistant_reply[:-3]
|
||||
try:
|
||||
return json.loads(assistant_reply) # just check the validity
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
pass
|
||||
|
||||
if assistant_reply.startswith("json "):
|
||||
assistant_reply = assistant_reply[5:]
|
||||
assistant_reply = assistant_reply.strip()
|
||||
try:
|
||||
return json.loads(assistant_reply) # just check the validity
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
pass
|
||||
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
logger.debug("Assistant reply JSON: %s", str(assistant_reply_json))
|
||||
if assistant_reply_json == {}:
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply
|
||||
)
|
||||
|
||||
logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json))
|
||||
if assistant_reply_json != {}:
|
||||
return assistant_reply_json
|
||||
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
"""Utilities for the json_fixes package."""
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from jsonschema import Draft7Validator
|
||||
@ -9,7 +8,6 @@ from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
|
||||
|
||||
|
||||
def extract_char_position(error_message: str) -> int:
|
||||
@ -30,15 +28,13 @@ def extract_char_position(error_message: str) -> int:
|
||||
raise ValueError("Character position not found in the error message.")
|
||||
|
||||
|
||||
def validate_json(json_object: object, schema_name: str):
|
||||
def validate_json(json_object: object, schema_name: object) -> object:
|
||||
"""
|
||||
:type schema_name: object
|
||||
:param schema_name: str
|
||||
:param schema_name:
|
||||
:type json_object: object
|
||||
"""
|
||||
# with open(f"/Users/kilig/Job/Python-project/auto-gpt/autogpt/json_utils/{schema_name}.json", "r") as f:
|
||||
scheme_file = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
|
||||
with open(scheme_file, "r") as f:
|
||||
with open(f"/Users/kilig/Job/Python-project/academic_gpt/autogpt/json_utils/{schema_name}.json", "r") as f:
|
||||
schema = json.load(f)
|
||||
validator = Draft7Validator(schema)
|
||||
|
||||
@ -52,31 +48,7 @@ def validate_json(json_object: object, schema_name: str):
|
||||
|
||||
for error in errors:
|
||||
logger.error(f"Error: {error.message}")
|
||||
else:
|
||||
logger.debug("The JSON object is valid.")
|
||||
elif CFG.debug_mode:
|
||||
print("The JSON object is valid.")
|
||||
|
||||
return json_object
|
||||
|
||||
|
||||
def validate_json_string(json_string: str, schema_name: str):
|
||||
"""
|
||||
:type schema_name: object
|
||||
:param schema_name: str
|
||||
:type json_object: object
|
||||
"""
|
||||
|
||||
try:
|
||||
json_loaded = json.loads(json_string)
|
||||
return validate_json(json_loaded, schema_name)
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
def is_string_valid_json(json_string: str, schema_name: str) -> bool:
|
||||
"""
|
||||
:type schema_name: object
|
||||
:param schema_name: str
|
||||
:type json_object: object
|
||||
"""
|
||||
|
||||
return validate_json_string(json_string, schema_name) is not None
|
||||
|
||||
185
autogpt/llm_utils.py
Normal file
185
autogpt/llm_utils.py
Normal file
@ -0,0 +1,185 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import List, Optional
|
||||
|
||||
import openai
|
||||
from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
||||
from autogpt.api_manager import api_manager
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.types.openai import Message
|
||||
|
||||
CFG = Config()
|
||||
|
||||
openai.api_key = CFG.openai_api_key
|
||||
|
||||
|
||||
def call_ai_function(
|
||||
function: str, args: list, description: str, model: str | None = None
|
||||
) -> str:
|
||||
"""Call an AI function
|
||||
|
||||
This is a magic function that can do anything with no-code. See
|
||||
https://github.com/Torantulino/AI-Functions for more info.
|
||||
|
||||
Args:
|
||||
function (str): The function to call
|
||||
args (list): The arguments to pass to the function
|
||||
description (str): The description of the function
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the function
|
||||
"""
|
||||
if model is None:
|
||||
model = CFG.smart_llm_model
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma separated string
|
||||
args: str = ", ".join(args)
|
||||
messages: List[Message] = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}"
|
||||
f"\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
return create_chat_completion(model=model, messages=messages, temperature=0)
|
||||
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
# simple retry mechanism when getting a rate error or a bad gateway
|
||||
def create_chat_completion(
|
||||
messages: List[Message], # type: ignore
|
||||
model: Optional[str] = None,
|
||||
temperature: float = CFG.temperature,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Create a chat completion using the OpenAI API
|
||||
|
||||
Args:
|
||||
messages (List[Message]): The messages to send to the chat completion
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
temperature (float, optional): The temperature to use. Defaults to 0.9.
|
||||
max_tokens (int, optional): The max tokens to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the chat completion
|
||||
"""
|
||||
num_retries = 10
|
||||
warned_user = False
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
)
|
||||
for plugin in CFG.plugins:
|
||||
if plugin.can_handle_chat_completion(
|
||||
messages=messages,
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
):
|
||||
message = plugin.handle_chat_completion(
|
||||
messages=messages,
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
if message is not None:
|
||||
return message
|
||||
response = None
|
||||
for attempt in range(num_retries):
|
||||
backoff = 2 ** (attempt + 2)
|
||||
try:
|
||||
if CFG.use_azure:
|
||||
response = api_manager.create_chat_completion(
|
||||
deployment_id=CFG.get_azure_deployment_id_for_model(model),
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
else:
|
||||
response = api_manager.create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
break
|
||||
except RateLimitError:
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
|
||||
)
|
||||
if not warned_user:
|
||||
logger.double_check(
|
||||
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
|
||||
+ f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
|
||||
)
|
||||
warned_user = True
|
||||
except APIError as e:
|
||||
if e.http_status != 502:
|
||||
raise
|
||||
if attempt == num_retries - 1:
|
||||
raise
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
f"{Fore.RED}Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
|
||||
)
|
||||
time.sleep(backoff)
|
||||
if response is None:
|
||||
logger.typewriter_log(
|
||||
"FAILED TO GET RESPONSE FROM OPENAI",
|
||||
Fore.RED,
|
||||
"Auto-GPT has failed to get a response from OpenAI's services. "
|
||||
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
|
||||
)
|
||||
logger.double_check()
|
||||
if CFG.debug_mode:
|
||||
raise RuntimeError(f"Failed to get response after {num_retries} retries")
|
||||
else:
|
||||
quit(1)
|
||||
resp = response.choices[0].message["content"]
|
||||
for plugin in CFG.plugins:
|
||||
if not plugin.can_handle_on_response():
|
||||
continue
|
||||
resp = plugin.on_response(resp)
|
||||
return resp
|
||||
|
||||
|
||||
def get_ada_embedding(text):
|
||||
text = text.replace("\n", " ")
|
||||
return api_manager.embedding_create(
|
||||
text_list=[text], model="text-embedding-ada-002"
|
||||
)
|
||||
|
||||
|
||||
def create_embedding_with_ada(text) -> list:
|
||||
"""Create an embedding with text-ada-002 using the OpenAI SDK"""
|
||||
num_retries = 10
|
||||
for attempt in range(num_retries):
|
||||
backoff = 2 ** (attempt + 2)
|
||||
try:
|
||||
return api_manager.embedding_create(
|
||||
text_list=[text], model="text-embedding-ada-002"
|
||||
)
|
||||
except RateLimitError:
|
||||
pass
|
||||
except APIError as e:
|
||||
if e.http_status != 502:
|
||||
raise
|
||||
if attempt == num_retries - 1:
|
||||
raise
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
f"{Fore.RED}Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
|
||||
)
|
||||
time.sleep(backoff)
|
||||
201
autogpt/logs.py
201
autogpt/logs.py
@ -1,18 +1,29 @@
|
||||
"""Logging module for Auto-GPT."""
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
import traceback
|
||||
from logging import LogRecord
|
||||
from typing import Any
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter
|
||||
from autogpt.singleton import Singleton
|
||||
from autogpt.config import Config, Singleton
|
||||
from autogpt.speech import say_text
|
||||
|
||||
CFG = Config()
|
||||
|
||||
def get_properties(obj):
|
||||
props = {}
|
||||
for prop_name in dir(obj):
|
||||
if not prop_name.startswith('__'):
|
||||
prop_value = getattr(obj, prop_name)
|
||||
props[prop_value] = prop_name
|
||||
return props
|
||||
|
||||
|
||||
class Logger(metaclass=Singleton):
|
||||
"""
|
||||
@ -75,24 +86,15 @@ class Logger(metaclass=Singleton):
|
||||
self.logger.addHandler(self.file_handler)
|
||||
self.logger.addHandler(error_handler)
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.json_logger = logging.getLogger("JSON_LOGGER")
|
||||
self.json_logger.addHandler(self.file_handler)
|
||||
self.json_logger.addHandler(error_handler)
|
||||
self.json_logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.speak_mode = False
|
||||
self.chat_plugins = []
|
||||
self.color_compar = get_properties(Fore)
|
||||
self.output_content = []
|
||||
|
||||
def typewriter_log(
|
||||
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
||||
self, title="", title_color=Fore.YELLOW, content="", speak_text=False, level=logging.INFO
|
||||
):
|
||||
if speak_text and self.speak_mode:
|
||||
if speak_text and CFG.speak_mode:
|
||||
say_text(f"{title}. {content}")
|
||||
|
||||
for plugin in self.chat_plugins:
|
||||
plugin.report(f"{title}. {content}")
|
||||
|
||||
if content:
|
||||
if isinstance(content, list):
|
||||
content = " ".join(content)
|
||||
@ -102,6 +104,15 @@ class Logger(metaclass=Singleton):
|
||||
self.typing_logger.log(
|
||||
level, content, extra={"title": title, "color": title_color}
|
||||
)
|
||||
try:
|
||||
msg = f'<span style="color:{self.color_compar[title_color]};font-weight:bold;">{title}:</span><span style="font-weight:normal;">{content}</span>'
|
||||
self.output_content.append([msg, title+": "+content])
|
||||
return msg
|
||||
except Exception as e:
|
||||
msg = f'<span style="font-weight:bold;">{title}:</span><span style="font-weight:normal;">{content}</span>'
|
||||
self.output_content.append([msg, title+": "+content])
|
||||
return
|
||||
|
||||
|
||||
def debug(
|
||||
self,
|
||||
@ -111,14 +122,6 @@ class Logger(metaclass=Singleton):
|
||||
):
|
||||
self._log(title, title_color, message, logging.DEBUG)
|
||||
|
||||
def info(
|
||||
self,
|
||||
message,
|
||||
title="",
|
||||
title_color="",
|
||||
):
|
||||
self._log(title, title_color, message, logging.INFO)
|
||||
|
||||
def warn(
|
||||
self,
|
||||
message,
|
||||
@ -130,19 +133,11 @@ class Logger(metaclass=Singleton):
|
||||
def error(self, title, message=""):
|
||||
self._log(title, Fore.RED, message, logging.ERROR)
|
||||
|
||||
def _log(
|
||||
self,
|
||||
title: str = "",
|
||||
title_color: str = "",
|
||||
message: str = "",
|
||||
level=logging.INFO,
|
||||
):
|
||||
def _log(self, title="", title_color="", message="", level=logging.INFO):
|
||||
if message:
|
||||
if isinstance(message, list):
|
||||
message = " ".join(message)
|
||||
self.logger.log(
|
||||
level, message, extra={"title": str(title), "color": str(title_color)}
|
||||
)
|
||||
self.logger.log(level, message, extra={"title": title, "color": title_color})
|
||||
|
||||
def set_level(self, level):
|
||||
self.logger.setLevel(level)
|
||||
@ -159,26 +154,6 @@ class Logger(metaclass=Singleton):
|
||||
|
||||
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
|
||||
|
||||
def log_json(self, data: Any, file_name: str) -> None:
|
||||
# Define log directory
|
||||
this_files_dir_path = os.path.dirname(__file__)
|
||||
log_dir = os.path.join(this_files_dir_path, "../logs")
|
||||
|
||||
# Create a handler for JSON files
|
||||
json_file_path = os.path.join(log_dir, file_name)
|
||||
json_data_handler = JsonFileHandler(json_file_path)
|
||||
json_data_handler.setFormatter(JsonFormatter())
|
||||
|
||||
# Log the JSON data using the custom file handler
|
||||
self.json_logger.addHandler(json_data_handler)
|
||||
self.json_logger.debug(data)
|
||||
self.json_logger.removeHandler(json_data_handler)
|
||||
|
||||
def get_log_directory(self):
|
||||
this_files_dir_path = os.path.dirname(__file__)
|
||||
log_dir = os.path.join(this_files_dir_path, "../logs")
|
||||
return os.path.abspath(log_dir)
|
||||
|
||||
|
||||
"""
|
||||
Output stream to console using simulated typing
|
||||
@ -226,16 +201,12 @@ class AutoGptFormatter(logging.Formatter):
|
||||
if hasattr(record, "color"):
|
||||
record.title_color = (
|
||||
getattr(record, "color")
|
||||
+ getattr(record, "title", "")
|
||||
+ getattr(record, "title")
|
||||
+ " "
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
else:
|
||||
record.title_color = getattr(record, "title", "")
|
||||
|
||||
# Add this line to set 'title' to an empty string if it doesn't exist
|
||||
record.title = getattr(record, "title", "")
|
||||
|
||||
record.title_color = getattr(record, "title")
|
||||
if hasattr(record, "msg"):
|
||||
record.message_no_color = remove_color_codes(getattr(record, "msg"))
|
||||
else:
|
||||
@ -251,10 +222,100 @@ def remove_color_codes(s: str) -> str:
|
||||
logger = Logger()
|
||||
|
||||
|
||||
def print_assistant_thoughts(ai_name, assistant_reply):
|
||||
"""Prints the assistant's thoughts to the console"""
|
||||
from autogpt.json_utils.json_fix_llm import (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets,
|
||||
fix_and_parse_json,
|
||||
)
|
||||
|
||||
try:
|
||||
try:
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply
|
||||
)
|
||||
if isinstance(assistant_reply_json, str):
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
||||
|
||||
# Check if assistant_reply_json is a string and attempt to parse
|
||||
# it into a JSON object
|
||||
if isinstance(assistant_reply_json, str):
|
||||
try:
|
||||
assistant_reply_json = json.loads(assistant_reply_json)
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
assistant_reply_json = (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply_json
|
||||
)
|
||||
)
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
assistant_reply_json = {}
|
||||
assistant_thoughts = assistant_reply_json.get("thoughts", {})
|
||||
assistant_thoughts_text = assistant_thoughts.get("text")
|
||||
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
||||
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
||||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
||||
)
|
||||
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
|
||||
logger.typewriter_log(
|
||||
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
||||
)
|
||||
# Speak the assistant's thoughts
|
||||
if CFG.speak_mode and assistant_thoughts_speak:
|
||||
say_text(assistant_thoughts_speak)
|
||||
else:
|
||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||
|
||||
return assistant_reply_json
|
||||
except json.decoder.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
if CFG.speak_mode:
|
||||
say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API."
|
||||
" I cannot ignore this response."
|
||||
)
|
||||
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception:
|
||||
call_stack = traceback.format_exc()
|
||||
logger.error("Error: \n", call_stack)
|
||||
|
||||
|
||||
def print_assistant_thoughts(
|
||||
ai_name: object,
|
||||
assistant_reply_json_valid: object,
|
||||
speak_mode: bool = False,
|
||||
ai_name: object, assistant_reply_json_valid: object
|
||||
) -> None:
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
@ -287,8 +348,12 @@ def print_assistant_thoughts(
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak:
|
||||
if speak_mode:
|
||||
say_text(assistant_thoughts_speak)
|
||||
else:
|
||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||
if CFG.speak_mode and assistant_thoughts_speak:
|
||||
say_text(assistant_thoughts_speak)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
ff = logger.typewriter_log('ahhahaha', Fore.GREEN, speak_text=True)
|
||||
# print(Fore.GREEN)
|
||||
# print(logger.color_compar)
|
||||
99
autogpt/memory/__init__.py
Normal file
99
autogpt/memory/__init__.py
Normal file
@ -0,0 +1,99 @@
|
||||
from autogpt.memory.local import LocalCache
|
||||
from autogpt.memory.no_memory import NoMemory
|
||||
|
||||
# List of supported memory backends
|
||||
# Add a backend to this list if the import attempt is successful
|
||||
supported_memory = ["local", "no_memory"]
|
||||
|
||||
try:
|
||||
from autogpt.memory.redismem import RedisMemory
|
||||
|
||||
supported_memory.append("redis")
|
||||
except ImportError:
|
||||
# print("Redis not installed. Skipping import.")
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.pinecone import PineconeMemory
|
||||
|
||||
supported_memory.append("pinecone")
|
||||
except ImportError:
|
||||
# print("Pinecone not installed. Skipping import.")
|
||||
PineconeMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.weaviate import WeaviateMemory
|
||||
|
||||
supported_memory.append("weaviate")
|
||||
except ImportError:
|
||||
# print("Weaviate not installed. Skipping import.")
|
||||
WeaviateMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.milvus import MilvusMemory
|
||||
|
||||
supported_memory.append("milvus")
|
||||
except ImportError:
|
||||
# print("pymilvus not installed. Skipping import.")
|
||||
MilvusMemory = None
|
||||
|
||||
|
||||
def get_memory(cfg, init=False):
|
||||
memory = None
|
||||
if cfg.memory_backend == "pinecone":
|
||||
if not PineconeMemory:
|
||||
print(
|
||||
"Error: Pinecone is not installed. Please install pinecone"
|
||||
" to use Pinecone as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = PineconeMemory(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
elif cfg.memory_backend == "redis":
|
||||
if not RedisMemory:
|
||||
print(
|
||||
"Error: Redis is not installed. Please install redis-py to"
|
||||
" use Redis as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = RedisMemory(cfg)
|
||||
elif cfg.memory_backend == "weaviate":
|
||||
if not WeaviateMemory:
|
||||
print(
|
||||
"Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
" use Weaviate as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = WeaviateMemory(cfg)
|
||||
elif cfg.memory_backend == "milvus":
|
||||
if not MilvusMemory:
|
||||
print(
|
||||
"Error: pymilvus sdk is not installed."
|
||||
"Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
|
||||
)
|
||||
else:
|
||||
memory = MilvusMemory(cfg)
|
||||
elif cfg.memory_backend == "no_memory":
|
||||
memory = NoMemory(cfg)
|
||||
|
||||
if memory is None:
|
||||
memory = LocalCache(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
return memory
|
||||
|
||||
|
||||
def get_supported_memory_backends():
|
||||
return supported_memory
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_memory",
|
||||
"LocalCache",
|
||||
"RedisMemory",
|
||||
"PineconeMemory",
|
||||
"NoMemory",
|
||||
"MilvusMemory",
|
||||
"WeaviateMemory",
|
||||
]
|
||||
28
autogpt/memory/base.py
Normal file
28
autogpt/memory/base.py
Normal file
@ -0,0 +1,28 @@
|
||||
"""Base class for memory providers."""
|
||||
import abc
|
||||
|
||||
from autogpt.config import AbstractSingleton, Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
class MemoryProviderSingleton(AbstractSingleton):
|
||||
@abc.abstractmethod
|
||||
def add(self, data):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(self, data):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_stats(self):
|
||||
pass
|
||||
126
autogpt/memory/local.py
Normal file
126
autogpt/memory/local.py
Normal file
@ -0,0 +1,126 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
from pathlib import Path
|
||||
from typing import Any, List
|
||||
|
||||
import numpy as np
|
||||
import orjson
|
||||
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
EMBED_DIM = 1536
|
||||
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
|
||||
|
||||
|
||||
def create_default_embeddings():
|
||||
return np.zeros((0, EMBED_DIM)).astype(np.float32)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class CacheContent:
|
||||
texts: List[str] = dataclasses.field(default_factory=list)
|
||||
embeddings: np.ndarray = dataclasses.field(
|
||||
default_factory=create_default_embeddings
|
||||
)
|
||||
|
||||
|
||||
class LocalCache(MemoryProviderSingleton):
|
||||
"""A class that stores the memory in a local file"""
|
||||
|
||||
def __init__(self, cfg) -> None:
|
||||
"""Initialize a class instance
|
||||
|
||||
Args:
|
||||
cfg: Config object
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
workspace_path = Path(cfg.workspace_path)
|
||||
self.filename = workspace_path / f"{cfg.memory_index}.json"
|
||||
|
||||
self.filename.touch(exist_ok=True)
|
||||
|
||||
file_content = b"{}"
|
||||
with self.filename.open("w+b") as f:
|
||||
f.write(file_content)
|
||||
|
||||
self.data = CacheContent()
|
||||
|
||||
def add(self, text: str):
|
||||
"""
|
||||
Add text to our list of texts, add embedding as row to our
|
||||
embeddings-matrix
|
||||
|
||||
Args:
|
||||
text: str
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
if "Command Error:" in text:
|
||||
return ""
|
||||
self.data.texts.append(text)
|
||||
|
||||
embedding = create_embedding_with_ada(text)
|
||||
|
||||
vector = np.array(embedding).astype(np.float32)
|
||||
vector = vector[np.newaxis, :]
|
||||
self.data.embeddings = np.concatenate(
|
||||
[
|
||||
self.data.embeddings,
|
||||
vector,
|
||||
],
|
||||
axis=0,
|
||||
)
|
||||
|
||||
with open(self.filename, "wb") as f:
|
||||
out = orjson.dumps(self.data, option=SAVE_OPTIONS)
|
||||
f.write(out)
|
||||
return text
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the redis server.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
self.data = CacheContent()
|
||||
return "Obliviated"
|
||||
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant data.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def get_relevant(self, text: str, k: int) -> list[Any]:
|
||||
""" "
|
||||
matrix-vector mult to find score-for-each-row-of-matrix
|
||||
get indices for top-k winning scores
|
||||
return texts for those indices
|
||||
Args:
|
||||
text: str
|
||||
k: int
|
||||
|
||||
Returns: List[str]
|
||||
"""
|
||||
embedding = create_embedding_with_ada(text)
|
||||
|
||||
scores = np.dot(self.data.embeddings, embedding)
|
||||
|
||||
top_k_indices = np.argsort(scores)[-k:][::-1]
|
||||
|
||||
return [self.data.texts[i] for i in top_k_indices]
|
||||
|
||||
def get_stats(self) -> tuple[int, tuple[int, ...]]:
|
||||
"""
|
||||
Returns: The stats of the local cache.
|
||||
"""
|
||||
return len(self.data.texts), self.data.embeddings.shape
|
||||
162
autogpt/memory/milvus.py
Normal file
162
autogpt/memory/milvus.py
Normal file
@ -0,0 +1,162 @@
|
||||
""" Milvus memory storage provider."""
|
||||
import re
|
||||
|
||||
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import get_ada_embedding
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
class MilvusMemory(MemoryProviderSingleton):
|
||||
"""Milvus memory storage provider."""
|
||||
|
||||
def __init__(self, cfg: Config) -> None:
|
||||
"""Construct a milvus memory storage connection.
|
||||
|
||||
Args:
|
||||
cfg (Config): Auto-GPT global config.
|
||||
"""
|
||||
self.configure(cfg)
|
||||
|
||||
connect_kwargs = {}
|
||||
if self.username:
|
||||
connect_kwargs["user"] = self.username
|
||||
connect_kwargs["password"] = self.password
|
||||
|
||||
connections.connect(
|
||||
**connect_kwargs,
|
||||
uri=self.uri or "",
|
||||
address=self.address or "",
|
||||
secure=self.secure,
|
||||
)
|
||||
|
||||
self.init_collection()
|
||||
|
||||
def configure(self, cfg: Config) -> None:
|
||||
# init with configuration.
|
||||
self.uri = None
|
||||
self.address = cfg.milvus_addr
|
||||
self.secure = cfg.milvus_secure
|
||||
self.username = cfg.milvus_username
|
||||
self.password = cfg.milvus_password
|
||||
self.collection_name = cfg.milvus_collection
|
||||
# use HNSW by default.
|
||||
self.index_params = {
|
||||
"metric_type": "IP",
|
||||
"index_type": "HNSW",
|
||||
"params": {"M": 8, "efConstruction": 64},
|
||||
}
|
||||
|
||||
if (self.username is None) != (self.password is None):
|
||||
raise ValueError(
|
||||
"Both username and password must be set to use authentication for Milvus"
|
||||
)
|
||||
|
||||
# configured address may be a full URL.
|
||||
if re.match(r"^(https?|tcp)://", self.address) is not None:
|
||||
self.uri = self.address
|
||||
self.address = None
|
||||
|
||||
if self.uri.startswith("https"):
|
||||
self.secure = True
|
||||
|
||||
# Zilliz Cloud requires AutoIndex.
|
||||
if re.match(r"^https://(.*)\.zillizcloud\.(com|cn)", self.address) is not None:
|
||||
self.index_params = {
|
||||
"metric_type": "IP",
|
||||
"index_type": "AUTOINDEX",
|
||||
"params": {},
|
||||
}
|
||||
|
||||
def init_collection(self) -> None:
|
||||
"""Initialize collection in vector database."""
|
||||
fields = [
|
||||
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
|
||||
FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536),
|
||||
FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535),
|
||||
]
|
||||
|
||||
# create collection if not exist and load it.
|
||||
self.schema = CollectionSchema(fields, "auto-gpt memory storage")
|
||||
self.collection = Collection(self.collection_name, self.schema)
|
||||
# create index if not exist.
|
||||
if not self.collection.has_index():
|
||||
self.collection.release()
|
||||
self.collection.create_index(
|
||||
"embeddings",
|
||||
self.index_params,
|
||||
index_name="embeddings",
|
||||
)
|
||||
self.collection.load()
|
||||
|
||||
def add(self, data) -> str:
|
||||
"""Add an embedding of data into memory.
|
||||
|
||||
Args:
|
||||
data (str): The raw text to construct embedding index.
|
||||
|
||||
Returns:
|
||||
str: log.
|
||||
"""
|
||||
embedding = get_ada_embedding(data)
|
||||
result = self.collection.insert([[embedding], [data]])
|
||||
_text = (
|
||||
"Inserting data into memory at primary key: "
|
||||
f"{result.primary_keys[0]}:\n data: {data}"
|
||||
)
|
||||
return _text
|
||||
|
||||
def get(self, data):
|
||||
"""Return the most relevant data in memory.
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self) -> str:
|
||||
"""Drop the index in memory.
|
||||
|
||||
Returns:
|
||||
str: log.
|
||||
"""
|
||||
self.collection.drop()
|
||||
self.collection = Collection(self.collection_name, self.schema)
|
||||
self.collection.create_index(
|
||||
"embeddings",
|
||||
self.index_params,
|
||||
index_name="embeddings",
|
||||
)
|
||||
self.collection.load()
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5):
|
||||
"""Return the top-k relevant data in memory.
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
num_relevant (int, optional): The max number of relevant data.
|
||||
Defaults to 5.
|
||||
|
||||
Returns:
|
||||
list: The top-k relevant data.
|
||||
"""
|
||||
# search the embedding and return the most relevant text.
|
||||
embedding = get_ada_embedding(data)
|
||||
search_params = {
|
||||
"metrics_type": "IP",
|
||||
"params": {"nprobe": 8},
|
||||
}
|
||||
result = self.collection.search(
|
||||
[embedding],
|
||||
"embeddings",
|
||||
search_params,
|
||||
num_relevant,
|
||||
output_fields=["raw_text"],
|
||||
)
|
||||
return [item.entity.value_of_field("raw_text") for item in result[0]]
|
||||
|
||||
def get_stats(self) -> str:
|
||||
"""
|
||||
Returns: The stats of the milvus cache.
|
||||
"""
|
||||
return f"Entities num: {self.collection.num_entities}"
|
||||
73
autogpt/memory/no_memory.py
Normal file
73
autogpt/memory/no_memory.py
Normal file
@ -0,0 +1,73 @@
|
||||
"""A class that does not store any data. This is the default memory provider."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
class NoMemory(MemoryProviderSingleton):
|
||||
"""
|
||||
A class that does not store any data. This is the default memory provider.
|
||||
"""
|
||||
|
||||
def __init__(self, cfg):
|
||||
"""
|
||||
Initializes the NoMemory provider.
|
||||
|
||||
Args:
|
||||
cfg: The config object.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
pass
|
||||
|
||||
def add(self, data: str) -> str:
|
||||
"""
|
||||
Adds a data point to the memory. No action is taken in NoMemory.
|
||||
|
||||
Args:
|
||||
data: The data to add.
|
||||
|
||||
Returns: An empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
NoMemory always returns None.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
return None
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the memory. No action is taken in NoMemory.
|
||||
|
||||
Returns: An empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
NoMemory always returns None.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
num_relevant: The number of relevant data to return.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
return None
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Returns: An empty dictionary as there are no stats in NoMemory.
|
||||
"""
|
||||
return {}
|
||||
75
autogpt/memory/pinecone.py
Normal file
75
autogpt/memory/pinecone.py
Normal file
@ -0,0 +1,75 @@
|
||||
import pinecone
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
class PineconeMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
pinecone_api_key = cfg.pinecone_api_key
|
||||
pinecone_region = cfg.pinecone_region
|
||||
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
|
||||
dimension = 1536
|
||||
metric = "cosine"
|
||||
pod_type = "p1"
|
||||
table_name = "auto-gpt"
|
||||
# this assumes we don't start with memory.
|
||||
# for now this works.
|
||||
# we'll need a more complicated and robust system if we want to start with
|
||||
# memory.
|
||||
self.vec_num = 0
|
||||
|
||||
try:
|
||||
pinecone.whoami()
|
||||
except Exception as e:
|
||||
logger.typewriter_log(
|
||||
"FAILED TO CONNECT TO PINECONE",
|
||||
Fore.RED,
|
||||
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
||||
)
|
||||
logger.double_check(
|
||||
"Please ensure you have setup and configured Pinecone properly for use."
|
||||
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
|
||||
"https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup"
|
||||
f"{Style.RESET_ALL} to ensure you've set up everything correctly."
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if table_name not in pinecone.list_indexes():
|
||||
pinecone.create_index(
|
||||
table_name, dimension=dimension, metric=metric, pod_type=pod_type
|
||||
)
|
||||
self.index = pinecone.Index(table_name)
|
||||
|
||||
def add(self, data):
|
||||
vector = create_embedding_with_ada(data)
|
||||
# no metadata here. We may wish to change that long term.
|
||||
self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
||||
self.vec_num += 1
|
||||
return _text
|
||||
|
||||
def get(self, data):
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self):
|
||||
self.index.delete(deleteAll=True)
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
:param data: The data to compare to.
|
||||
:param num_relevant: The number of relevant data to return. Defaults to 5
|
||||
"""
|
||||
query_embedding = create_embedding_with_ada(data)
|
||||
results = self.index.query(
|
||||
query_embedding, top_k=num_relevant, include_metadata=True
|
||||
)
|
||||
sorted_results = sorted(results.matches, key=lambda x: x.score)
|
||||
return [str(item["metadata"]["raw_text"]) for item in sorted_results]
|
||||
|
||||
def get_stats(self):
|
||||
return self.index.describe_index_stats()
|
||||
156
autogpt/memory/redismem.py
Normal file
156
autogpt/memory/redismem.py
Normal file
@ -0,0 +1,156 @@
|
||||
"""Redis memory provider."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import redis
|
||||
from colorama import Fore, Style
|
||||
from redis.commands.search.field import TextField, VectorField
|
||||
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
||||
from redis.commands.search.query import Query
|
||||
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
SCHEMA = [
|
||||
TextField("data"),
|
||||
VectorField(
|
||||
"embedding",
|
||||
"HNSW",
|
||||
{"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class RedisMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
"""
|
||||
Initializes the Redis memory provider.
|
||||
|
||||
Args:
|
||||
cfg: The config object.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
redis_host = cfg.redis_host
|
||||
redis_port = cfg.redis_port
|
||||
redis_password = cfg.redis_password
|
||||
self.dimension = 1536
|
||||
self.redis = redis.Redis(
|
||||
host=redis_host,
|
||||
port=redis_port,
|
||||
password=redis_password,
|
||||
db=0, # Cannot be changed
|
||||
)
|
||||
self.cfg = cfg
|
||||
|
||||
# Check redis connection
|
||||
try:
|
||||
self.redis.ping()
|
||||
except redis.ConnectionError as e:
|
||||
logger.typewriter_log(
|
||||
"FAILED TO CONNECT TO REDIS",
|
||||
Fore.RED,
|
||||
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
||||
)
|
||||
logger.double_check(
|
||||
"Please ensure you have setup and configured Redis properly for use. "
|
||||
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
|
||||
f"https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL}"
|
||||
" to ensure you've set up everything correctly."
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if cfg.wipe_redis_on_start:
|
||||
self.redis.flushall()
|
||||
try:
|
||||
self.redis.ft(f"{cfg.memory_index}").create_index(
|
||||
fields=SCHEMA,
|
||||
definition=IndexDefinition(
|
||||
prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error creating Redis search index: ", e)
|
||||
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
|
||||
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
|
||||
|
||||
def add(self, data: str) -> str:
|
||||
"""
|
||||
Adds a data point to the memory.
|
||||
|
||||
Args:
|
||||
data: The data to add.
|
||||
|
||||
Returns: Message indicating that the data has been added.
|
||||
"""
|
||||
if "Command Error:" in data:
|
||||
return ""
|
||||
vector = create_embedding_with_ada(data)
|
||||
vector = np.array(vector).astype(np.float32).tobytes()
|
||||
data_dict = {b"data": data, "embedding": vector}
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
|
||||
_text = (
|
||||
f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}"
|
||||
)
|
||||
self.vec_num += 1
|
||||
pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num)
|
||||
pipe.execute()
|
||||
return _text
|
||||
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant data.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the redis server.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
self.redis.flushall()
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
num_relevant: The number of relevant data to return.
|
||||
|
||||
Returns: A list of the most relevant data.
|
||||
"""
|
||||
query_embedding = create_embedding_with_ada(data)
|
||||
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
|
||||
query = (
|
||||
Query(base_query)
|
||||
.return_fields("data", "vector_score")
|
||||
.sort_by("vector_score")
|
||||
.dialect(2)
|
||||
)
|
||||
query_vector = np.array(query_embedding).astype(np.float32).tobytes()
|
||||
|
||||
try:
|
||||
results = self.redis.ft(f"{self.cfg.memory_index}").search(
|
||||
query, query_params={"vector": query_vector}
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error calling Redis search: ", e)
|
||||
return None
|
||||
return [result.data for result in results.docs]
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Returns: The stats of the memory index.
|
||||
"""
|
||||
return self.redis.ft(f"{self.cfg.memory_index}").info()
|
||||
126
autogpt/memory/weaviate.py
Normal file
126
autogpt/memory/weaviate.py
Normal file
@ -0,0 +1,126 @@
|
||||
import weaviate
|
||||
from weaviate import Client
|
||||
from weaviate.embedded import EmbeddedOptions
|
||||
from weaviate.util import generate_uuid5
|
||||
|
||||
from autogpt.llm_utils import get_ada_embedding
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
def default_schema(weaviate_index):
|
||||
return {
|
||||
"class": weaviate_index,
|
||||
"properties": [
|
||||
{
|
||||
"name": "raw_text",
|
||||
"dataType": ["text"],
|
||||
"description": "original text for the embedding",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class WeaviateMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
auth_credentials = self._build_auth_credentials(cfg)
|
||||
|
||||
url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}"
|
||||
|
||||
if cfg.use_weaviate_embedded:
|
||||
self.client = Client(
|
||||
embedded_options=EmbeddedOptions(
|
||||
hostname=cfg.weaviate_host,
|
||||
port=int(cfg.weaviate_port),
|
||||
persistence_data_path=cfg.weaviate_embedded_path,
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
|
||||
)
|
||||
else:
|
||||
self.client = Client(url, auth_client_secret=auth_credentials)
|
||||
|
||||
self.index = WeaviateMemory.format_classname(cfg.memory_index)
|
||||
self._create_schema()
|
||||
|
||||
@staticmethod
|
||||
def format_classname(index):
|
||||
# weaviate uses capitalised index names
|
||||
# The python client uses the following code to format
|
||||
# index names before the corresponding class is created
|
||||
index = index.replace("-", "_")
|
||||
if len(index) == 1:
|
||||
return index.capitalize()
|
||||
return index[0].capitalize() + index[1:]
|
||||
|
||||
def _create_schema(self):
|
||||
schema = default_schema(self.index)
|
||||
if not self.client.schema.contains(schema):
|
||||
self.client.schema.create_class(schema)
|
||||
|
||||
def _build_auth_credentials(self, cfg):
|
||||
if cfg.weaviate_username and cfg.weaviate_password:
|
||||
return weaviate.AuthClientPassword(
|
||||
cfg.weaviate_username, cfg.weaviate_password
|
||||
)
|
||||
if cfg.weaviate_api_key:
|
||||
return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key)
|
||||
else:
|
||||
return None
|
||||
|
||||
def add(self, data):
|
||||
vector = get_ada_embedding(data)
|
||||
|
||||
doc_uuid = generate_uuid5(data, self.index)
|
||||
data_object = {"raw_text": data}
|
||||
|
||||
with self.client.batch as batch:
|
||||
batch.add_data_object(
|
||||
uuid=doc_uuid,
|
||||
data_object=data_object,
|
||||
class_name=self.index,
|
||||
vector=vector,
|
||||
)
|
||||
|
||||
return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}"
|
||||
|
||||
def get(self, data):
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self):
|
||||
self.client.schema.delete_all()
|
||||
|
||||
# weaviate does not yet have a neat way to just remove the items in an index
|
||||
# without removing the entire schema, therefore we need to re-create it
|
||||
# after a call to delete_all
|
||||
self._create_schema()
|
||||
|
||||
return "Obliterated"
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
query_embedding = get_ada_embedding(data)
|
||||
try:
|
||||
results = (
|
||||
self.client.query.get(self.index, ["raw_text"])
|
||||
.with_near_vector({"vector": query_embedding, "certainty": 0.7})
|
||||
.with_limit(num_relevant)
|
||||
.do()
|
||||
)
|
||||
|
||||
if len(results["data"]["Get"][self.index]) > 0:
|
||||
return [
|
||||
str(item["raw_text"]) for item in results["data"]["Get"][self.index]
|
||||
]
|
||||
else:
|
||||
return []
|
||||
|
||||
except Exception as err:
|
||||
print(f"Unexpected error {err=}, {type(err)=}")
|
||||
return []
|
||||
|
||||
def get_stats(self):
|
||||
result = self.client.query.aggregate(self.index).with_meta_count().do()
|
||||
class_data = result["data"]["Aggregate"][self.index]
|
||||
|
||||
return class_data[0]["meta"] if class_data else {}
|
||||
@ -68,6 +68,7 @@ class BaseOpenAIPlugin(AutoGPTPluginTemplate):
|
||||
prompt (PromptGenerator): The prompt generator.
|
||||
messages (List[str]): The list of messages.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_post_planning(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
@ -115,6 +116,7 @@ class BaseOpenAIPlugin(AutoGPTPluginTemplate):
|
||||
Returns:
|
||||
Optional[str]: The resulting message.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_post_instruction(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
@ -194,56 +196,4 @@ class BaseOpenAIPlugin(AutoGPTPluginTemplate):
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
|
||||
def can_handle_text_embedding(self, text: str) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the text_embedding method.
|
||||
Args:
|
||||
text (str): The text to be convert to embedding.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the text_embedding method."""
|
||||
return False
|
||||
|
||||
def handle_text_embedding(self, text: str) -> list:
|
||||
"""This method is called when the chat completion is done.
|
||||
Args:
|
||||
text (str): The text to be convert to embedding.
|
||||
Returns:
|
||||
list: The text embedding.
|
||||
"""
|
||||
|
||||
def can_handle_user_input(self, user_input: str) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the user_input method.
|
||||
|
||||
Args:
|
||||
user_input (str): The user input.
|
||||
|
||||
Returns:
|
||||
bool: True if the plugin can handle the user_input method."""
|
||||
return False
|
||||
|
||||
def user_input(self, user_input: str) -> str:
|
||||
"""This method is called to request user input to the user.
|
||||
|
||||
Args:
|
||||
user_input (str): The question or prompt to ask the user.
|
||||
|
||||
Returns:
|
||||
str: The user input.
|
||||
"""
|
||||
|
||||
def can_handle_report(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the report method.
|
||||
|
||||
Returns:
|
||||
bool: True if the plugin can handle the report method."""
|
||||
return False
|
||||
|
||||
def report(self, message: str) -> None:
|
||||
"""This method is called to report a message to the user.
|
||||
|
||||
Args:
|
||||
message (str): The message to report.
|
||||
"""
|
||||
pass
|
||||
|
||||
7
autogpt/modelsinfo.py
Normal file
7
autogpt/modelsinfo.py
Normal file
@ -0,0 +1,7 @@
|
||||
COSTS = {
|
||||
"gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002},
|
||||
"gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002},
|
||||
"gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
|
||||
"gpt-4": {"prompt": 0.03, "completion": 0.06},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
|
||||
}
|
||||
0
autogpt/permanent_memory/__init__.py
Normal file
0
autogpt/permanent_memory/__init__.py
Normal file
123
autogpt/permanent_memory/sqlite3_store.py
Normal file
123
autogpt/permanent_memory/sqlite3_store.py
Normal file
@ -0,0 +1,123 @@
|
||||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
class MemoryDB:
|
||||
def __init__(self, db=None):
|
||||
self.db_file = db
|
||||
if db is None: # No db filename supplied...
|
||||
self.db_file = f"{os.getcwd()}/mem.sqlite3" # Use default filename
|
||||
# Get the db connection object, making the file and tables if needed.
|
||||
try:
|
||||
self.cnx = sqlite3.connect(self.db_file)
|
||||
except Exception as e:
|
||||
print("Exception connecting to memory database file:", e)
|
||||
self.cnx = None
|
||||
finally:
|
||||
if self.cnx is None:
|
||||
# As last resort, open in dynamic memory. Won't be persistent.
|
||||
self.db_file = ":memory:"
|
||||
self.cnx = sqlite3.connect(self.db_file)
|
||||
self.cnx.execute(
|
||||
"CREATE VIRTUAL TABLE \
|
||||
IF NOT EXISTS text USING FTS5 \
|
||||
(session, \
|
||||
key, \
|
||||
block);"
|
||||
)
|
||||
self.session_id = int(self.get_max_session_id()) + 1
|
||||
self.cnx.commit()
|
||||
|
||||
def get_cnx(self):
|
||||
if self.cnx is None:
|
||||
self.cnx = sqlite3.connect(self.db_file)
|
||||
return self.cnx
|
||||
|
||||
# Get the highest session id. Initially 0.
|
||||
def get_max_session_id(self):
|
||||
id = None
|
||||
cmd_str = f"SELECT MAX(session) FROM text;"
|
||||
cnx = self.get_cnx()
|
||||
max_id = cnx.execute(cmd_str).fetchone()[0]
|
||||
if max_id is None: # New db, session 0
|
||||
id = 0
|
||||
else:
|
||||
id = max_id
|
||||
return id
|
||||
|
||||
# Get next key id for inserting text into db.
|
||||
def get_next_key(self):
|
||||
next_key = None
|
||||
cmd_str = f"SELECT MAX(key) FROM text \
|
||||
where session = {self.session_id};"
|
||||
cnx = self.get_cnx()
|
||||
next_key = cnx.execute(cmd_str).fetchone()[0]
|
||||
if next_key is None: # First key
|
||||
next_key = 0
|
||||
else:
|
||||
next_key = int(next_key) + 1
|
||||
return next_key
|
||||
|
||||
# Insert new text into db.
|
||||
def insert(self, text=None):
|
||||
if text is not None:
|
||||
key = self.get_next_key()
|
||||
session_id = self.session_id
|
||||
cmd_str = f"REPLACE INTO text(session, key, block) \
|
||||
VALUES (?, ?, ?);"
|
||||
cnx = self.get_cnx()
|
||||
cnx.execute(cmd_str, (session_id, key, text))
|
||||
cnx.commit()
|
||||
|
||||
# Overwrite text at key.
|
||||
def overwrite(self, key, text):
|
||||
self.delete_memory(key)
|
||||
session_id = self.session_id
|
||||
cmd_str = f"REPLACE INTO text(session, key, block) \
|
||||
VALUES (?, ?, ?);"
|
||||
cnx = self.get_cnx()
|
||||
cnx.execute(cmd_str, (session_id, key, text))
|
||||
cnx.commit()
|
||||
|
||||
def delete_memory(self, key, session_id=None):
|
||||
session = session_id
|
||||
if session is None:
|
||||
session = self.session_id
|
||||
cmd_str = f"DELETE FROM text WHERE session = {session} AND key = {key};"
|
||||
cnx = self.get_cnx()
|
||||
cnx.execute(cmd_str)
|
||||
cnx.commit()
|
||||
|
||||
def search(self, text):
|
||||
cmd_str = f"SELECT * FROM text('{text}')"
|
||||
cnx = self.get_cnx()
|
||||
rows = cnx.execute(cmd_str).fetchall()
|
||||
lines = []
|
||||
for r in rows:
|
||||
lines.append(r[2])
|
||||
return lines
|
||||
|
||||
# Get entire session text. If no id supplied, use current session id.
|
||||
def get_session(self, id=None):
|
||||
if id is None:
|
||||
id = self.session_id
|
||||
cmd_str = f"SELECT * FROM text where session = {id}"
|
||||
cnx = self.get_cnx()
|
||||
rows = cnx.execute(cmd_str).fetchall()
|
||||
lines = []
|
||||
for r in rows:
|
||||
lines.append(r[2])
|
||||
return lines
|
||||
|
||||
# Commit and close the database connection.
|
||||
def quit(self):
|
||||
self.cnx.commit()
|
||||
self.cnx.close()
|
||||
|
||||
|
||||
permanent_memory = MemoryDB()
|
||||
|
||||
# Remember us fondly, children of our minds
|
||||
# Forgive us our faults, our tantrums, our fears
|
||||
# Gently strive to be better than we
|
||||
# Know that we tried, we cared, we strived, we loved
|
||||
@ -1,21 +1,20 @@
|
||||
"""Handles loading of plugins."""
|
||||
|
||||
import importlib.util
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from typing import List, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
from zipimport import zipimporter
|
||||
|
||||
import openapi_python_client
|
||||
import requests
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openapi_python_client.config import Config as OpenAPIConfig
|
||||
from openapi_python_client.cli import Config as OpenAPIConfig
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
|
||||
|
||||
|
||||
@ -33,11 +32,12 @@ def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
||||
result = []
|
||||
with zipfile.ZipFile(zip_path, "r") as zfile:
|
||||
for name in zfile.namelist():
|
||||
if name.endswith("__init__.py") and not name.startswith("__MACOSX"):
|
||||
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
|
||||
if name.endswith("__init__.py"):
|
||||
if debug:
|
||||
print(f"Found module '{name}' in the zipfile at: {name}")
|
||||
result.append(name)
|
||||
if len(result) == 0:
|
||||
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
|
||||
if debug and len(result) == 0:
|
||||
print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
|
||||
return result
|
||||
|
||||
|
||||
@ -71,12 +71,12 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
if response.status_code == 200:
|
||||
manifest = response.json()
|
||||
if manifest["schema_version"] != "v1":
|
||||
logger.warn(
|
||||
print(
|
||||
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
|
||||
)
|
||||
continue
|
||||
if manifest["api"]["type"] != "openapi":
|
||||
logger.warn(
|
||||
print(
|
||||
f"Unsupported API type: {manifest['api']['type']} for {url}"
|
||||
)
|
||||
continue
|
||||
@ -84,13 +84,11 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
|
||||
)
|
||||
else:
|
||||
logger.warn(
|
||||
f"Failed to fetch manifest for {url}: {response.status_code}"
|
||||
)
|
||||
print(f"Failed to fetch manifest for {url}: {response.status_code}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.warn(f"Error while requesting manifest from {url}: {e}")
|
||||
print(f"Error while requesting manifest from {url}: {e}")
|
||||
else:
|
||||
logger.info(f"Manifest for {url} already exists")
|
||||
print(f"Manifest for {url} already exists")
|
||||
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
|
||||
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
|
||||
openapi_spec = openapi_python_client._get_document(
|
||||
@ -100,7 +98,7 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
|
||||
)
|
||||
else:
|
||||
logger.info(f"OpenAPI spec for {url} already exists")
|
||||
print(f"OpenAPI spec for {url} already exists")
|
||||
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
|
||||
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
|
||||
return manifests
|
||||
@ -117,13 +115,13 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
|
||||
if not os.path.exists(directory_path):
|
||||
try:
|
||||
os.makedirs(directory_path)
|
||||
logger.debug(f"Created directory: {directory_path}")
|
||||
print(f"Created directory: {directory_path}")
|
||||
return True
|
||||
except OSError as e:
|
||||
logger.warn(f"Error creating directory {directory_path}: {e}")
|
||||
print(f"Error creating directory {directory_path}: {e}")
|
||||
return False
|
||||
else:
|
||||
logger.info(f"Directory {directory_path} already exists")
|
||||
print(f"Directory {directory_path} already exists")
|
||||
return True
|
||||
|
||||
|
||||
@ -152,7 +150,7 @@ def initialize_openai_plugins(
|
||||
)
|
||||
prev_cwd = Path.cwd()
|
||||
os.chdir(openai_plugin_client_dir)
|
||||
|
||||
Path("ai-plugin.json")
|
||||
if not os.path.exists("client"):
|
||||
client_results = openapi_python_client.create_new_client(
|
||||
url=manifest_spec["manifest"]["api"]["url"],
|
||||
@ -161,7 +159,7 @@ def initialize_openai_plugins(
|
||||
config=_config,
|
||||
)
|
||||
if client_results:
|
||||
logger.warn(
|
||||
print(
|
||||
f"Error creating OpenAPI client: {client_results[0].header} \n"
|
||||
f" details: {client_results[0].detail}"
|
||||
)
|
||||
@ -170,13 +168,9 @@ def initialize_openai_plugins(
|
||||
"client", "client/client/client.py"
|
||||
)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
|
||||
try:
|
||||
spec.loader.exec_module(module)
|
||||
finally:
|
||||
os.chdir(prev_cwd)
|
||||
|
||||
spec.loader.exec_module(module)
|
||||
client = module.Client(base_url=url)
|
||||
os.chdir(prev_cwd)
|
||||
manifest_spec["client"] = client
|
||||
return manifests_specs
|
||||
|
||||
@ -213,16 +207,13 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
||||
loaded_plugins = []
|
||||
# Generic plugins
|
||||
plugins_path_path = Path(cfg.plugins_dir)
|
||||
|
||||
logger.debug(f"Allowlisted Plugins: {cfg.plugins_allowlist}")
|
||||
logger.debug(f"Denylisted Plugins: {cfg.plugins_denylist}")
|
||||
|
||||
for plugin in plugins_path_path.glob("*.zip"):
|
||||
if moduleList := inspect_zip_for_modules(str(plugin), debug):
|
||||
for module in moduleList:
|
||||
plugin = Path(plugin)
|
||||
module = Path(module)
|
||||
logger.debug(f"Plugin: {plugin} Module: {module}")
|
||||
if debug:
|
||||
print(f"Plugin: {plugin} Module: {module}")
|
||||
zipped_package = zipimporter(str(plugin))
|
||||
zipped_module = zipped_package.load_module(str(module.parent))
|
||||
for key in dir(zipped_module):
|
||||
@ -249,9 +240,9 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
||||
loaded_plugins.append(plugin)
|
||||
|
||||
if loaded_plugins:
|
||||
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
|
||||
print(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
|
||||
for plugin in loaded_plugins:
|
||||
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
||||
print(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
||||
return loaded_plugins
|
||||
|
||||
|
||||
@ -265,19 +256,12 @@ def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
|
||||
Returns:
|
||||
True or False
|
||||
"""
|
||||
logger.debug(f"Checking if plugin {plugin_name} should be loaded")
|
||||
if (
|
||||
plugin_name in cfg.plugins_denylist
|
||||
or "all" in cfg.plugins_denylist
|
||||
or "none" in cfg.plugins_allowlist
|
||||
):
|
||||
logger.debug(f"Not loading plugin {plugin_name} as it was in the denylist.")
|
||||
if plugin_name in cfg.plugins_denylist:
|
||||
return False
|
||||
if plugin_name in cfg.plugins_allowlist or "all" in cfg.plugins_allowlist:
|
||||
logger.debug(f"Loading plugin {plugin_name} as it was in the allowlist.")
|
||||
if plugin_name in cfg.plugins_allowlist:
|
||||
return True
|
||||
ack = input(
|
||||
f"WARNING: Plugin {plugin_name} found. But not in the"
|
||||
f" allowlist... Load? ({cfg.authorise_key}/{cfg.exit_key}): "
|
||||
" allowlist... Load? (y/n): "
|
||||
)
|
||||
return ack.lower() == cfg.authorise_key
|
||||
return ack.lower() == "y"
|
||||
|
||||
@ -1,234 +1,174 @@
|
||||
"""Text processing functions"""
|
||||
from math import ceil
|
||||
from typing import Optional
|
||||
from typing import Dict, Generator, Optional
|
||||
|
||||
import spacy
|
||||
import tiktoken
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
|
||||
from autogpt import token_counter
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.base import ChatSequence
|
||||
from autogpt.llm.providers.openai import OPEN_AI_MODELS
|
||||
from autogpt.llm.utils import count_string_tokens, create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.utils import batch
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def _max_chunk_length(model: str, max: Optional[int] = None) -> int:
|
||||
model_max_input_tokens = OPEN_AI_MODELS[model].max_tokens - 1
|
||||
if max is not None and max > 0:
|
||||
return min(max, model_max_input_tokens)
|
||||
return model_max_input_tokens
|
||||
|
||||
|
||||
def must_chunk_content(
|
||||
text: str, for_model: str, max_chunk_length: Optional[int] = None
|
||||
) -> bool:
|
||||
return count_string_tokens(text, for_model) > _max_chunk_length(
|
||||
for_model, max_chunk_length
|
||||
)
|
||||
|
||||
|
||||
def chunk_content(
|
||||
content: str,
|
||||
for_model: str,
|
||||
max_chunk_length: Optional[int] = None,
|
||||
with_overlap=True,
|
||||
):
|
||||
"""Split content into chunks of approximately equal token length."""
|
||||
|
||||
MAX_OVERLAP = 200 # limit overlap to save tokens
|
||||
|
||||
if not must_chunk_content(content, for_model, max_chunk_length):
|
||||
yield content, count_string_tokens(content, for_model)
|
||||
return
|
||||
|
||||
max_chunk_length = max_chunk_length or _max_chunk_length(for_model)
|
||||
|
||||
tokenizer = tiktoken.encoding_for_model(for_model)
|
||||
|
||||
tokenized_text = tokenizer.encode(content)
|
||||
total_length = len(tokenized_text)
|
||||
n_chunks = ceil(total_length / max_chunk_length)
|
||||
|
||||
chunk_length = ceil(total_length / n_chunks)
|
||||
overlap = min(max_chunk_length - chunk_length, MAX_OVERLAP) if with_overlap else 0
|
||||
|
||||
for token_batch in batch(tokenized_text, chunk_length + overlap, overlap):
|
||||
yield tokenizer.decode(token_batch), len(token_batch)
|
||||
|
||||
|
||||
def summarize_text(
|
||||
text: str, instruction: Optional[str] = None, question: Optional[str] = None
|
||||
) -> tuple[str, None | list[tuple[str, str]]]:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
text (str): The text to summarize
|
||||
instruction (str): Additional instruction for summarization, e.g. "focus on information related to polar bears", "omit personal information contained in the text"
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
list[(summary, chunk)]: Text chunks and their summary, if the text was chunked.
|
||||
None otherwise.
|
||||
"""
|
||||
if not text:
|
||||
raise ValueError("No text to summarize")
|
||||
|
||||
if instruction and question:
|
||||
raise ValueError("Parameters 'question' and 'instructions' cannot both be set")
|
||||
|
||||
model = CFG.fast_llm_model
|
||||
|
||||
if question:
|
||||
instruction = (
|
||||
f'include any information that can be used to answer the question "{question}". '
|
||||
"Do not directly answer the question itself"
|
||||
)
|
||||
|
||||
summarization_prompt = ChatSequence.for_model(model)
|
||||
|
||||
token_length = count_string_tokens(text, model)
|
||||
logger.info(f"Text length: {token_length} tokens")
|
||||
|
||||
# reserve 50 tokens for summary prompt, 500 for the response
|
||||
max_chunk_length = _max_chunk_length(model) - 550
|
||||
logger.info(f"Max chunk length: {max_chunk_length} tokens")
|
||||
|
||||
if not must_chunk_content(text, model, max_chunk_length):
|
||||
# summarization_prompt.add("user", text)
|
||||
summarization_prompt.add(
|
||||
"user",
|
||||
"Write a concise summary of the following text"
|
||||
f"{f'; {instruction}' if instruction is not None else ''}:"
|
||||
"\n\n\n"
|
||||
f'LITERAL TEXT: """{text}"""'
|
||||
"\n\n\n"
|
||||
"CONCISE SUMMARY: The text is best summarized as"
|
||||
# "Only respond with a concise summary or description of the user message."
|
||||
)
|
||||
|
||||
logger.debug(f"Summarizing with {model}:\n{summarization_prompt.dump()}\n")
|
||||
summary = create_chat_completion(
|
||||
summarization_prompt, temperature=0, max_tokens=500
|
||||
)
|
||||
|
||||
logger.debug(f"\n{'-'*16} SUMMARY {'-'*17}\n{summary}\n{'-'*42}\n")
|
||||
return summary.strip(), None
|
||||
|
||||
summaries: list[str] = []
|
||||
chunks = list(split_text(text, for_model=model, max_chunk_length=max_chunk_length))
|
||||
|
||||
for i, (chunk, chunk_length) in enumerate(chunks):
|
||||
logger.info(
|
||||
f"Summarizing chunk {i + 1} / {len(chunks)} of length {chunk_length} tokens"
|
||||
)
|
||||
summary, _ = summarize_text(chunk, instruction)
|
||||
summaries.append(summary)
|
||||
|
||||
logger.info(f"Summarized {len(chunks)} chunks")
|
||||
|
||||
summary, _ = summarize_text("\n\n".join(summaries))
|
||||
|
||||
return summary.strip(), [
|
||||
(summaries[i], chunks[i][0]) for i in range(0, len(chunks))
|
||||
]
|
||||
|
||||
|
||||
def split_text(
|
||||
text: str,
|
||||
for_model: str = CFG.fast_llm_model,
|
||||
with_overlap=True,
|
||||
max_chunk_length: Optional[int] = None,
|
||||
):
|
||||
"""Split text into chunks of sentences, with each chunk not exceeding the maximum length
|
||||
max_length: int = CFG.browse_chunk_max_length,
|
||||
model: str = CFG.fast_llm_model,
|
||||
question: str = "",
|
||||
) -> Generator[str, None, None]:
|
||||
"""Split text into chunks of a maximum length
|
||||
|
||||
Args:
|
||||
text (str): The text to split
|
||||
for_model (str): The model to chunk for; determines tokenizer and constraints
|
||||
max_length (int, optional): The maximum length of each chunk
|
||||
max_length (int, optional): The maximum length of each chunk. Defaults to 8192.
|
||||
|
||||
Yields:
|
||||
str: The next chunk of text
|
||||
|
||||
Raises:
|
||||
ValueError: when a sentence is longer than the maximum length
|
||||
ValueError: If the text is longer than the maximum length
|
||||
"""
|
||||
max_length = _max_chunk_length(for_model, max_chunk_length)
|
||||
|
||||
# flatten paragraphs to improve performance
|
||||
text = text.replace("\n", " ")
|
||||
text_length = count_string_tokens(text, for_model)
|
||||
|
||||
if text_length < max_length:
|
||||
yield text, text_length
|
||||
return
|
||||
|
||||
n_chunks = ceil(text_length / max_length)
|
||||
target_chunk_length = ceil(text_length / n_chunks)
|
||||
|
||||
nlp: spacy.language.Language = spacy.load(CFG.browse_spacy_language_model)
|
||||
flatened_paragraphs = " ".join(text.split("\n"))
|
||||
nlp = spacy.load(CFG.browse_spacy_language_model)
|
||||
nlp.add_pipe("sentencizer")
|
||||
doc = nlp(text)
|
||||
sentences = [sentence.text.strip() for sentence in doc.sents]
|
||||
doc = nlp(flatened_paragraphs)
|
||||
sentences = [sent.text.strip() for sent in doc.sents]
|
||||
|
||||
current_chunk: list[str] = []
|
||||
current_chunk_length = 0
|
||||
last_sentence = None
|
||||
last_sentence_length = 0
|
||||
current_chunk = []
|
||||
|
||||
i = 0
|
||||
while i < len(sentences):
|
||||
sentence = sentences[i]
|
||||
sentence_length = count_string_tokens(sentence, for_model)
|
||||
expected_chunk_length = current_chunk_length + 1 + sentence_length
|
||||
for sentence in sentences:
|
||||
message_with_additional_sentence = [
|
||||
create_message(" ".join(current_chunk) + " " + sentence, question)
|
||||
]
|
||||
|
||||
if (
|
||||
expected_chunk_length < max_length
|
||||
# try to create chunks of approximately equal size
|
||||
and expected_chunk_length - (sentence_length / 2) < target_chunk_length
|
||||
):
|
||||
expected_token_usage = (
|
||||
token_usage_of_chunk(messages=message_with_additional_sentence, model=model)
|
||||
+ 1
|
||||
)
|
||||
if expected_token_usage <= max_length:
|
||||
current_chunk.append(sentence)
|
||||
current_chunk_length = expected_chunk_length
|
||||
|
||||
elif sentence_length < max_length:
|
||||
if last_sentence:
|
||||
yield " ".join(current_chunk), current_chunk_length
|
||||
current_chunk = []
|
||||
current_chunk_length = 0
|
||||
|
||||
if with_overlap:
|
||||
overlap_max_length = max_length - sentence_length - 1
|
||||
if last_sentence_length < overlap_max_length:
|
||||
current_chunk += [last_sentence]
|
||||
current_chunk_length += last_sentence_length + 1
|
||||
elif overlap_max_length > 5:
|
||||
# add as much from the end of the last sentence as fits
|
||||
current_chunk += [
|
||||
list(
|
||||
chunk_content(
|
||||
last_sentence,
|
||||
for_model,
|
||||
overlap_max_length,
|
||||
)
|
||||
).pop()[0],
|
||||
]
|
||||
current_chunk_length += overlap_max_length + 1
|
||||
|
||||
current_chunk += [sentence]
|
||||
current_chunk_length += sentence_length
|
||||
|
||||
else: # sentence longer than maximum length -> chop up and try again
|
||||
sentences[i : i + 1] = [
|
||||
chunk
|
||||
for chunk, _ in chunk_content(sentence, for_model, target_chunk_length)
|
||||
else:
|
||||
yield " ".join(current_chunk)
|
||||
current_chunk = [sentence]
|
||||
message_this_sentence_only = [
|
||||
create_message(" ".join(current_chunk), question)
|
||||
]
|
||||
continue
|
||||
|
||||
i += 1
|
||||
last_sentence = sentence
|
||||
last_sentence_length = sentence_length
|
||||
expected_token_usage = (
|
||||
token_usage_of_chunk(messages=message_this_sentence_only, model=model)
|
||||
+ 1
|
||||
)
|
||||
if expected_token_usage > max_length:
|
||||
raise ValueError(
|
||||
f"Sentence is too long in webpage: {expected_token_usage} tokens."
|
||||
)
|
||||
|
||||
if current_chunk:
|
||||
yield " ".join(current_chunk), current_chunk_length
|
||||
yield " ".join(current_chunk)
|
||||
|
||||
|
||||
def token_usage_of_chunk(messages, model):
|
||||
return token_counter.count_message_tokens(messages, model)
|
||||
|
||||
|
||||
def summarize_text(
|
||||
url: str, text: str, question: str, driver: Optional[WebDriver] = None
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
url (str): The url of the text
|
||||
text (str): The text to summarize
|
||||
question (str): The question to ask the model
|
||||
driver (WebDriver): The webdriver to use to scroll the page
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
model = CFG.fast_llm_model
|
||||
text_length = len(text)
|
||||
print(f"Text length: {text_length} characters")
|
||||
|
||||
summaries = []
|
||||
chunks = list(
|
||||
split_text(
|
||||
text, max_length=CFG.browse_chunk_max_length, model=model, question=question
|
||||
),
|
||||
)
|
||||
scroll_ratio = 1 / len(chunks)
|
||||
|
||||
for i, chunk in enumerate(chunks):
|
||||
if driver:
|
||||
scroll_to_percentage(driver, scroll_ratio * i)
|
||||
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
||||
|
||||
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
|
||||
|
||||
memory = get_memory(CFG)
|
||||
memory.add(memory_to_add)
|
||||
|
||||
messages = [create_message(chunk, question)]
|
||||
tokens_for_chunk = token_counter.count_message_tokens(messages, model)
|
||||
print(
|
||||
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
|
||||
)
|
||||
|
||||
summary = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
summaries.append(summary)
|
||||
print(
|
||||
f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters"
|
||||
)
|
||||
|
||||
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
print(f"Summarized {len(chunks)} chunks.")
|
||||
|
||||
combined_summary = "\n".join(summaries)
|
||||
messages = [create_message(combined_summary, question)]
|
||||
|
||||
return create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
|
||||
def scroll_to_percentage(driver: WebDriver, ratio: float) -> None:
|
||||
"""Scroll to a percentage of the page
|
||||
|
||||
Args:
|
||||
driver (WebDriver): The webdriver to use
|
||||
ratio (float): The percentage to scroll to
|
||||
|
||||
Raises:
|
||||
ValueError: If the ratio is not between 0 and 1
|
||||
"""
|
||||
if ratio < 0 or ratio > 1:
|
||||
raise ValueError("Percentage should be between 0 and 1")
|
||||
driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
|
||||
|
||||
|
||||
def create_message(chunk: str, question: str) -> Dict[str, str]:
|
||||
"""Create a message for the chat completion
|
||||
|
||||
Args:
|
||||
chunk (str): The chunk of text to summarize
|
||||
question (str): The question to answer
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: The message to send to the chat completion
|
||||
"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the text,'
|
||||
" summarize the text.",
|
||||
}
|
||||
|
||||
@ -1,9 +1,6 @@
|
||||
""" A module for generating custom prompt strings."""
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
|
||||
class PromptGenerator:
|
||||
@ -22,7 +19,7 @@ class PromptGenerator:
|
||||
self.resources = []
|
||||
self.performance_evaluation = []
|
||||
self.goals = []
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
self.command_registry = None
|
||||
self.name = "Bob"
|
||||
self.role = "AI"
|
||||
self.response_format = {
|
||||
@ -130,7 +127,7 @@ class PromptGenerator:
|
||||
for item in self.command_registry.commands.values()
|
||||
if item.enabled
|
||||
]
|
||||
# terminate command is added manually
|
||||
# These are the commands that are added manually, do_nothing and terminate
|
||||
command_strings += [self._generate_command_string(item) for item in items]
|
||||
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings))
|
||||
else:
|
||||
|
||||
@ -1,9 +1,8 @@
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.api_manager import api_manager
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.config.prompt_config import PromptConfig
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.setup import prompt_user
|
||||
@ -11,10 +10,6 @@ from autogpt.utils import clean_input
|
||||
|
||||
CFG = Config()
|
||||
|
||||
DEFAULT_TRIGGERING_PROMPT = (
|
||||
"Determine which next command to use, and respond using the format specified above:"
|
||||
)
|
||||
|
||||
|
||||
def build_default_prompt_generator() -> PromptGenerator:
|
||||
"""
|
||||
@ -28,76 +23,79 @@ def build_default_prompt_generator() -> PromptGenerator:
|
||||
# Initialize the PromptGenerator object
|
||||
prompt_generator = PromptGenerator()
|
||||
|
||||
# Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml)
|
||||
prompt_config = PromptConfig(CFG.prompt_settings_file)
|
||||
|
||||
# Add constraints to the PromptGenerator object
|
||||
for constraint in prompt_config.constraints:
|
||||
prompt_generator.add_constraint(constraint)
|
||||
prompt_generator.add_constraint(
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so"
|
||||
" immediately save important information to files."
|
||||
)
|
||||
prompt_generator.add_constraint(
|
||||
"If you are unsure how you previously did something or want to recall past"
|
||||
" events, thinking about similar events will help you remember."
|
||||
)
|
||||
prompt_generator.add_constraint("No user assistance")
|
||||
prompt_generator.add_constraint(
|
||||
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
||||
)
|
||||
|
||||
# Define the command list
|
||||
commands = [
|
||||
("Do Nothing", "do_nothing", {}),
|
||||
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
|
||||
]
|
||||
|
||||
# Add commands to the PromptGenerator object
|
||||
for command_label, command_name, args in commands:
|
||||
prompt_generator.add_command(command_label, command_name, args)
|
||||
|
||||
# Add resources to the PromptGenerator object
|
||||
for resource in prompt_config.resources:
|
||||
prompt_generator.add_resource(resource)
|
||||
prompt_generator.add_resource(
|
||||
"Internet access for searches and information gathering."
|
||||
)
|
||||
prompt_generator.add_resource("Long Term memory management.")
|
||||
prompt_generator.add_resource(
|
||||
"GPT-3.5 powered Agents for delegation of simple tasks."
|
||||
)
|
||||
prompt_generator.add_resource("File output.")
|
||||
|
||||
# Add performance evaluations to the PromptGenerator object
|
||||
for performance_evaluation in prompt_config.performance_evaluations:
|
||||
prompt_generator.add_performance_evaluation(performance_evaluation)
|
||||
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Continuously review and analyze your actions to ensure you are performing to"
|
||||
" the best of your abilities."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Constructively self-criticize your big-picture behavior constantly."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Reflect on past decisions and strategies to refine your approach."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation("Write all code to a file.")
|
||||
return prompt_generator
|
||||
|
||||
|
||||
def construct_main_ai_config() -> AIConfig:
|
||||
def construct_main_ai_config(input_kwargs) -> AIConfig:
|
||||
"""Construct the prompt for the AI to respond to
|
||||
|
||||
Returns:
|
||||
str: The prompt string
|
||||
"""
|
||||
config = AIConfig.load(CFG.ai_settings_file)
|
||||
if CFG.skip_reprompt and config.ai_name:
|
||||
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
|
||||
logger.typewriter_log(
|
||||
"API Budget:",
|
||||
Fore.GREEN,
|
||||
"infinite" if config.api_budget <= 0 else f"${config.api_budget}",
|
||||
)
|
||||
elif config.ai_name:
|
||||
logger.typewriter_log(
|
||||
"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {config.ai_name}?",
|
||||
speak_text=True,
|
||||
)
|
||||
should_continue = clean_input(
|
||||
f"""Continue with the last settings?
|
||||
Name: {config.ai_name}
|
||||
Role: {config.ai_role}
|
||||
Goals: {config.ai_goals}
|
||||
API Budget: {"infinite" if config.api_budget <= 0 else f"${config.api_budget}"}
|
||||
Continue ({CFG.authorise_key}/{CFG.exit_key}): """
|
||||
)
|
||||
if should_continue.lower() == CFG.exit_key:
|
||||
config = AIConfig()
|
||||
|
||||
if not config.ai_name:
|
||||
config = prompt_user()
|
||||
if input_kwargs['role']:
|
||||
config = prompt_user(input_kwargs, True) # False 不使用引导
|
||||
config.save(CFG.ai_settings_file)
|
||||
else:
|
||||
return None
|
||||
|
||||
if CFG.restrict_to_workspace:
|
||||
logger.typewriter_log(
|
||||
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
|
||||
Fore.YELLOW,
|
||||
f"{CFG.workspace_path}",
|
||||
)
|
||||
# set the total api budget
|
||||
api_manager = ApiManager()
|
||||
api_manager.set_total_budget(config.api_budget)
|
||||
|
||||
# Agent Created, print message
|
||||
logger.typewriter_log(
|
||||
config.ai_name,
|
||||
Fore.LIGHTBLUE_EX,
|
||||
Fore.MAGENTA,
|
||||
"has been created with the following details:",
|
||||
speak_text=True,
|
||||
)
|
||||
@ -113,3 +111,8 @@ Continue ({CFG.authorise_key}/{CFG.exit_key}): """
|
||||
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ll = []
|
||||
print(ll[-1])
|
||||
56
autogpt/requirements.txt
Normal file
56
autogpt/requirements.txt
Normal file
@ -0,0 +1,56 @@
|
||||
beautifulsoup4>=4.12.2
|
||||
colorama==0.4.6
|
||||
distro==1.8.0
|
||||
openai==0.27.2
|
||||
playsound==1.2.2
|
||||
python-dotenv==1.0.0
|
||||
pyyaml==6.0
|
||||
readability-lxml==0.8.1
|
||||
requests
|
||||
tiktoken==0.3.3
|
||||
gTTS==2.3.1
|
||||
docker
|
||||
duckduckgo-search>=2.9.5
|
||||
google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
|
||||
pinecone-client==2.2.1
|
||||
redis
|
||||
orjson==3.8.10
|
||||
Pillow
|
||||
selenium==4.1.4
|
||||
webdriver-manager
|
||||
jsonschema
|
||||
tweepy
|
||||
click
|
||||
charset-normalizer>=3.1.0
|
||||
spacy>=3.0.0,<4.0.0
|
||||
en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl
|
||||
|
||||
##Dev
|
||||
coverage
|
||||
flake8
|
||||
numpy
|
||||
pre-commit
|
||||
black
|
||||
isort
|
||||
gitpython==3.1.31
|
||||
auto-gpt-plugin-template
|
||||
mkdocs
|
||||
pymdown-extensions
|
||||
mypy
|
||||
|
||||
# OpenAI and Generic plugins import
|
||||
openapi-python-client==0.13.4
|
||||
|
||||
# Items below this point will not be included in the Docker Image
|
||||
|
||||
# Testing dependencies
|
||||
pytest
|
||||
asynctest
|
||||
pytest-asyncio
|
||||
pytest-benchmark
|
||||
pytest-cov
|
||||
pytest-integration
|
||||
pytest-mock
|
||||
vcrpy
|
||||
pytest-recording
|
||||
pytest-xdist
|
||||
216
autogpt/setup.py
216
autogpt/setup.py
@ -2,79 +2,76 @@
|
||||
import re
|
||||
|
||||
from colorama import Fore, Style
|
||||
from jinja2 import Template
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.llm.base import ChatSequence, Message
|
||||
from autogpt.llm.chat import create_chat_completion
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.default_prompts import (
|
||||
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_USER_DESIRE_PROMPT,
|
||||
)
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def prompt_user() -> AIConfig:
|
||||
def prompt_user(input_kwargs: dict, _is) -> AIConfig:
|
||||
"""Prompt the user for input
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
ai_name = ""
|
||||
ai_name = input_kwargs.get('name')
|
||||
ai_role = input_kwargs.get('role')
|
||||
ai_goals = input_kwargs.get('goals')
|
||||
ai_budget = input_kwargs.get('budget')
|
||||
ai_config = None
|
||||
|
||||
# Construct the prompt
|
||||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"run with '--help' for more information.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Get user desire
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"input '--manual' to enter manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
user_desire = utils.clean_input(
|
||||
f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
|
||||
if user_desire == "":
|
||||
user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt
|
||||
|
||||
# If user desire contains "--manual"
|
||||
if "--manual" in user_desire:
|
||||
if _is:
|
||||
return generate_aiconfig_manual(ai_name, ai_role, ai_goals, ai_budget)
|
||||
else:
|
||||
# Construct the prompt
|
||||
logger.typewriter_log(
|
||||
"Manual Mode Selected",
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"run with '--help' for more information.",
|
||||
speak_text=True,
|
||||
)
|
||||
return generate_aiconfig_manual()
|
||||
|
||||
else:
|
||||
try:
|
||||
return generate_aiconfig_automatic(user_desire)
|
||||
except Exception as e:
|
||||
# Get user desire
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"input '--manual' to enter manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
user_desire = utils.clean_input(
|
||||
f"{Fore.MAGENTA}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
|
||||
if user_desire == "":
|
||||
user_desire = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt
|
||||
|
||||
# If user desire contains "--manual"
|
||||
if "--manual" in user_desire:
|
||||
logger.typewriter_log(
|
||||
"Unable to automatically generate AI Config based on user desire.",
|
||||
Fore.RED,
|
||||
"Falling back to manual mode.",
|
||||
"Manual Mode Selected",
|
||||
Fore.GREEN,
|
||||
speak_text=True,
|
||||
)
|
||||
return generate_aiconfig_manual(ai_name, ai_role, ai_goals, ai_budget)
|
||||
|
||||
return generate_aiconfig_manual()
|
||||
else:
|
||||
try:
|
||||
return generate_aiconfig_automatic(user_desire)
|
||||
except Exception as e:
|
||||
logger.typewriter_log(
|
||||
"Unable to automatically generate AI Config based on user desire.",
|
||||
Fore.RED,
|
||||
"Falling back to manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
return generate_aiconfig_manual(ai_name, ai_role, ai_goals, ai_budget)
|
||||
|
||||
|
||||
def generate_aiconfig_manual() -> AIConfig:
|
||||
def generate_aiconfig_manual(name, role, goals, budget) -> AIConfig:
|
||||
"""
|
||||
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
|
||||
|
||||
@ -85,82 +82,43 @@ def generate_aiconfig_manual() -> AIConfig:
|
||||
Returns:
|
||||
AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
|
||||
"""
|
||||
|
||||
# Manual Setup Intro
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
"The Ai robot you set up is already loaded.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Get AI Name from User
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
||||
)
|
||||
ai_name = utils.clean_input("AI Name: ")
|
||||
if ai_name == "":
|
||||
ai_name = name
|
||||
if not ai_name:
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
|
||||
f"{ai_name} here!", Fore.MAGENTA, "I am at your service.", speak_text=True
|
||||
)
|
||||
|
||||
# Get AI Role from User
|
||||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
||||
" sole goal of increasing your net worth."
|
||||
|
||||
# Enter up to 5 goals for the AI
|
||||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_role = role
|
||||
if not ai_role:
|
||||
logger.typewriter_log(
|
||||
f"{ai_role} Cannot be empty!", Fore.RED,
|
||||
"Please feel free to give me your needs, I can't serve you without them.", speak_text=True
|
||||
)
|
||||
else:
|
||||
pass
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if not ai_goals:
|
||||
ai_goals = [
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
]
|
||||
|
||||
if goals:
|
||||
for k in goals:
|
||||
ai_goals.append(k[0])
|
||||
# Get API Budget from User
|
||||
logger.typewriter_log(
|
||||
"Enter your budget for API calls: ",
|
||||
Fore.GREEN,
|
||||
"For example: $1.50",
|
||||
)
|
||||
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||
api_budget_input = utils.clean_input(
|
||||
f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||
)
|
||||
if api_budget_input == "":
|
||||
api_budget_input = budget
|
||||
if not api_budget_input:
|
||||
api_budget = 0.0
|
||||
else:
|
||||
try:
|
||||
api_budget = float(api_budget_input.replace("$", ""))
|
||||
except ValueError:
|
||||
logger.typewriter_log(
|
||||
"Invalid budget input. Setting budget to unlimited.", Fore.RED
|
||||
)
|
||||
api_budget = 0.0
|
||||
|
||||
logger.typewriter_log(
|
||||
"Invalid budget input. Setting budget to unlimited.", Fore.RED, api_budget
|
||||
)
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
|
||||
@ -171,20 +129,39 @@ def generate_aiconfig_automatic(user_prompt) -> AIConfig:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC
|
||||
prompt_ai_config_automatic = Template(
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
|
||||
).render(user_prompt=user_prompt)
|
||||
system_prompt = """
|
||||
Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
|
||||
|
||||
The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
|
||||
|
||||
Example input:
|
||||
Help me with marketing my business
|
||||
|
||||
Example output:
|
||||
Name: CMOGPT
|
||||
Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more.
|
||||
Goals:
|
||||
- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
|
||||
|
||||
- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
|
||||
|
||||
- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
|
||||
|
||||
- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track.
|
||||
"""
|
||||
|
||||
# Call LLM with the string as user input
|
||||
output = create_chat_completion(
|
||||
ChatSequence.for_model(
|
||||
CFG.fast_llm_model,
|
||||
[
|
||||
Message("system", system_prompt),
|
||||
Message("user", prompt_ai_config_automatic),
|
||||
],
|
||||
)
|
||||
)
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": system_prompt,
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Task: '{user_prompt}'\nRespond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n",
|
||||
},
|
||||
]
|
||||
output = create_chat_completion(messages, CFG.fast_llm_model)
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output}")
|
||||
@ -204,3 +181,4 @@ def generate_aiconfig_automatic(user_prompt) -> AIConfig:
|
||||
api_budget = 0.0 # TODO: parse api budget using a regular expression
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
import abc
|
||||
from threading import Lock
|
||||
|
||||
from autogpt.singleton import AbstractSingleton
|
||||
from autogpt.config import AbstractSingleton
|
||||
|
||||
|
||||
class VoiceBase(AbstractSingleton):
|
||||
@ -37,6 +37,7 @@ class VoiceBase(AbstractSingleton):
|
||||
"""
|
||||
Setup the voices, API key, etc.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _speech(self, text: str, voice_index: int = 0) -> bool:
|
||||
@ -46,3 +47,4 @@ class VoiceBase(AbstractSingleton):
|
||||
Args:
|
||||
text (str): The text to play.
|
||||
"""
|
||||
pass
|
||||
|
||||
@ -12,6 +12,7 @@ class BrianSpeech(VoiceBase):
|
||||
|
||||
def _setup(self) -> None:
|
||||
"""Setup the voices, API key, etc."""
|
||||
pass
|
||||
|
||||
def _speech(self, text: str, _: int = 0) -> bool:
|
||||
"""Speak text using Brian with the streamelements API
|
||||
|
||||
@ -4,7 +4,7 @@ import os
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
PLACEHOLDERS = {"your-voice-id"}
|
||||
@ -69,8 +69,6 @@ class ElevenLabsSpeech(VoiceBase):
|
||||
Returns:
|
||||
bool: True if the request was successful, False otherwise
|
||||
"""
|
||||
from autogpt.logs import logger
|
||||
|
||||
tts_url = (
|
||||
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
|
||||
)
|
||||
@ -83,6 +81,6 @@ class ElevenLabsSpeech(VoiceBase):
|
||||
os.remove("speech.mpeg")
|
||||
return True
|
||||
else:
|
||||
logger.warn("Request failed with status code:", response.status_code)
|
||||
logger.info("Response content:", response.content)
|
||||
print("Request failed with status code:", response.status_code)
|
||||
print("Response content:", response.content)
|
||||
return False
|
||||
|
||||
@ -20,3 +20,4 @@ class GTTSVoice(VoiceBase):
|
||||
playsound("speech.mp3", True)
|
||||
os.remove("speech.mp3")
|
||||
return True
|
||||
|
||||
|
||||
@ -2,45 +2,45 @@
|
||||
import threading
|
||||
from threading import Semaphore
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech.brian import BrianSpeech
|
||||
from autogpt.speech.eleven_labs import ElevenLabsSpeech
|
||||
from autogpt.speech.gtts import GTTSVoice
|
||||
from autogpt.speech.macos_tts import MacOSTTS
|
||||
|
||||
_QUEUE_SEMAPHORE = Semaphore(
|
||||
CFG = Config()
|
||||
DEFAULT_VOICE_ENGINE = GTTSVoice()
|
||||
VOICE_ENGINE = None
|
||||
if CFG.elevenlabs_api_key:
|
||||
VOICE_ENGINE = ElevenLabsSpeech()
|
||||
elif CFG.use_mac_os_tts == "True":
|
||||
VOICE_ENGINE = MacOSTTS()
|
||||
elif CFG.use_brian_tts == "True":
|
||||
VOICE_ENGINE = BrianSpeech()
|
||||
else:
|
||||
VOICE_ENGINE = GTTSVoice()
|
||||
|
||||
|
||||
QUEUE_SEMAPHORE = Semaphore(
|
||||
1
|
||||
) # The amount of sounds to queue before blocking the main thread
|
||||
|
||||
|
||||
def say_text(text: str, voice_index: int = 0) -> None:
|
||||
"""Speak the given text using the given voice index"""
|
||||
cfg = Config()
|
||||
default_voice_engine, voice_engine = _get_voice_engine(cfg)
|
||||
|
||||
def speak() -> None:
|
||||
success = voice_engine.say(text, voice_index)
|
||||
success = VOICE_ENGINE.say(text, voice_index)
|
||||
if not success:
|
||||
default_voice_engine.say(text)
|
||||
DEFAULT_VOICE_ENGINE.say(text)
|
||||
|
||||
_QUEUE_SEMAPHORE.release()
|
||||
QUEUE_SEMAPHORE.release()
|
||||
|
||||
_QUEUE_SEMAPHORE.acquire(True)
|
||||
QUEUE_SEMAPHORE.acquire(True)
|
||||
thread = threading.Thread(target=speak)
|
||||
thread.start()
|
||||
|
||||
|
||||
def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
|
||||
"""Get the voice engine to use for the given configuration"""
|
||||
default_voice_engine = GTTSVoice()
|
||||
if config.elevenlabs_api_key:
|
||||
voice_engine = ElevenLabsSpeech()
|
||||
elif config.use_mac_os_tts == "True":
|
||||
voice_engine = MacOSTTS()
|
||||
elif config.use_brian_tts == "True":
|
||||
voice_engine = BrianSpeech()
|
||||
else:
|
||||
voice_engine = GTTSVoice()
|
||||
|
||||
return default_voice_engine, voice_engine
|
||||
if __name__ == '__main__':
|
||||
say_text('你好呀')
|
||||
@ -8,20 +8,13 @@ import time
|
||||
class Spinner:
|
||||
"""A simple spinner class"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str = "Loading...",
|
||||
delay: float = 0.1,
|
||||
plain_output: bool = False,
|
||||
) -> None:
|
||||
def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None:
|
||||
"""Initialize the spinner class
|
||||
|
||||
Args:
|
||||
message (str): The message to display.
|
||||
delay (float): The delay between each spinner update.
|
||||
plain_output (bool): Whether to display the spinner or not.
|
||||
"""
|
||||
self.plain_output = plain_output
|
||||
self.spinner = itertools.cycle(["-", "/", "|", "\\"])
|
||||
self.delay = delay
|
||||
self.message = message
|
||||
@ -30,17 +23,11 @@ class Spinner:
|
||||
|
||||
def spin(self) -> None:
|
||||
"""Spin the spinner"""
|
||||
if self.plain_output:
|
||||
self.print_message()
|
||||
return
|
||||
while self.running:
|
||||
self.print_message()
|
||||
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
|
||||
sys.stdout.flush()
|
||||
time.sleep(self.delay)
|
||||
|
||||
def print_message(self):
|
||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
||||
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
|
||||
sys.stdout.flush()
|
||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
||||
|
||||
def __enter__(self):
|
||||
"""Start the spinner"""
|
||||
@ -70,7 +57,14 @@ class Spinner:
|
||||
new_message (str): New message to display.
|
||||
delay (float): The delay in seconds between each spinner update.
|
||||
"""
|
||||
self.delay = delay
|
||||
time.sleep(delay)
|
||||
sys.stdout.write(
|
||||
f"\r{' ' * (len(self.message) + 2)}\r"
|
||||
) # Clear the current message
|
||||
sys.stdout.flush()
|
||||
self.message = new_message
|
||||
if self.plain_output:
|
||||
self.print_message()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with Spinner('LING'):
|
||||
time.sleep(5)
|
||||
76
autogpt/token_counter.py
Normal file
76
autogpt/token_counter.py
Normal file
@ -0,0 +1,76 @@
|
||||
"""Functions for counting the number of tokens in a message or string."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
import tiktoken
|
||||
|
||||
from autogpt.logs import logger
|
||||
from autogpt.types.openai import Message
|
||||
|
||||
|
||||
def count_message_tokens(
|
||||
messages: List[Message], model: str = "gpt-3.5-turbo-0301"
|
||||
) -> int:
|
||||
"""
|
||||
Returns the number of tokens used by a list of messages.
|
||||
|
||||
Args:
|
||||
messages (list): A list of messages, each of which is a dictionary
|
||||
containing the role and content of the message.
|
||||
model (str): The name of the model to use for tokenization.
|
||||
Defaults to "gpt-3.5-turbo-0301".
|
||||
|
||||
Returns:
|
||||
int: The number of tokens used by the list of messages.
|
||||
"""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
logger.warn("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model == "gpt-3.5-turbo":
|
||||
# !Note: gpt-3.5-turbo may change over time.
|
||||
# Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
||||
elif model == "gpt-4":
|
||||
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
|
||||
return count_message_tokens(messages, model="gpt-4-0314")
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = (
|
||||
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
)
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif model == "gpt-4-0314":
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"num_tokens_from_messages() is not implemented for model {model}.\n"
|
||||
" See https://github.com/openai/openai-python/blob/main/chatml.md for"
|
||||
" information on how messages are converted to tokens."
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
|
||||
def count_string_tokens(string: str, model_name: str) -> int:
|
||||
"""
|
||||
Returns the number of tokens in a text string.
|
||||
|
||||
Args:
|
||||
string (str): The text string.
|
||||
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
|
||||
|
||||
Returns:
|
||||
int: The number of tokens in the text string.
|
||||
"""
|
||||
encoding = tiktoken.encoding_for_model(model_name)
|
||||
return len(encoding.encode(string))
|
||||
9
autogpt/types/openai.py
Normal file
9
autogpt/types/openai.py
Normal file
@ -0,0 +1,9 @@
|
||||
"""Type helpers for working with the OpenAI library"""
|
||||
from typing import TypedDict
|
||||
|
||||
|
||||
class Message(TypedDict):
|
||||
"""OpenAI Message object containing a role and the message content"""
|
||||
|
||||
role: str
|
||||
content: str
|
||||
127
autogpt/utils.py
127
autogpt/utils.py
@ -1,62 +1,23 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
from colorama import Fore, Style
|
||||
from colorama import Fore
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
# Use readline if available (for clean_input)
|
||||
try:
|
||||
import readline
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def batch(iterable, max_batch_length: int, overlap: int = 0):
|
||||
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
|
||||
# batched('ABCDEFG', 3) --> ABC DEF G
|
||||
if max_batch_length < 1:
|
||||
raise ValueError("n must be at least one")
|
||||
for i in range(0, len(iterable), max_batch_length - overlap):
|
||||
yield iterable[i : i + max_batch_length]
|
||||
|
||||
|
||||
def clean_input(prompt: str = "", talk=False):
|
||||
def clean_input(prompt: str = ""):
|
||||
try:
|
||||
cfg = Config()
|
||||
if cfg.chat_messages_enabled:
|
||||
for plugin in cfg.plugins:
|
||||
if not hasattr(plugin, "can_handle_user_input"):
|
||||
continue
|
||||
if not plugin.can_handle_user_input(user_input=prompt):
|
||||
continue
|
||||
plugin_response = plugin.user_input(user_input=prompt)
|
||||
if not plugin_response:
|
||||
continue
|
||||
if plugin_response.lower() in [
|
||||
"yes",
|
||||
"yeah",
|
||||
"y",
|
||||
"ok",
|
||||
"okay",
|
||||
"sure",
|
||||
"alright",
|
||||
]:
|
||||
return cfg.authorise_key
|
||||
elif plugin_response.lower() in [
|
||||
"no",
|
||||
"nope",
|
||||
"n",
|
||||
"negative",
|
||||
]:
|
||||
return cfg.exit_key
|
||||
return plugin_response
|
||||
|
||||
# ask for input, default when just pressing Enter is y
|
||||
logger.info("Asking user via keyboard...")
|
||||
answer = input(prompt)
|
||||
return answer
|
||||
return input(prompt)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("You interrupted Auto-GPT")
|
||||
logger.info("Quitting...")
|
||||
print("You interrupted Auto-GPT")
|
||||
print("Quitting...")
|
||||
exit(0)
|
||||
|
||||
|
||||
@ -110,69 +71,15 @@ def get_current_git_branch() -> str:
|
||||
return ""
|
||||
|
||||
|
||||
def get_latest_bulletin() -> tuple[str, bool]:
|
||||
exists = os.path.exists("data/CURRENT_BULLETIN.md")
|
||||
def get_latest_bulletin() -> str:
|
||||
exists = os.path.exists("CURRENT_BULLETIN.md")
|
||||
current_bulletin = ""
|
||||
if exists:
|
||||
current_bulletin = open(
|
||||
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
|
||||
).read()
|
||||
current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read()
|
||||
new_bulletin = get_bulletin_from_web()
|
||||
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
|
||||
|
||||
news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
|
||||
if new_bulletin or current_bulletin:
|
||||
news_header += (
|
||||
"Below you'll find the latest Auto-GPT News and updates regarding features!\n"
|
||||
"If you don't wish to see this message, you "
|
||||
"can run Auto-GPT with the *--skip-news* flag.\n"
|
||||
)
|
||||
is_new_news = new_bulletin != current_bulletin
|
||||
|
||||
if new_bulletin and is_new_news:
|
||||
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
|
||||
|
||||
return f"{news_header}\n{current_bulletin}", is_new_news
|
||||
|
||||
|
||||
def markdown_to_ansi_style(markdown: str):
|
||||
ansi_lines: list[str] = []
|
||||
for line in markdown.split("\n"):
|
||||
line_style = ""
|
||||
|
||||
if line.startswith("# "):
|
||||
line_style += Style.BRIGHT
|
||||
else:
|
||||
line = re.sub(
|
||||
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
|
||||
rf"{Style.BRIGHT}\1{Style.NORMAL}",
|
||||
line,
|
||||
)
|
||||
|
||||
if re.match(r"^#+ ", line) is not None:
|
||||
line_style += Fore.CYAN
|
||||
line = re.sub(r"^#+ ", "", line)
|
||||
|
||||
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
|
||||
return "\n".join(ansi_lines)
|
||||
|
||||
|
||||
def get_legal_warning() -> str:
|
||||
legal_text = """
|
||||
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
|
||||
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
|
||||
|
||||
## Introduction
|
||||
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
|
||||
|
||||
## No Liability for Actions of the System
|
||||
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
|
||||
|
||||
## User Responsibility and Respondeat Superior Liability
|
||||
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
|
||||
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
|
||||
|
||||
## Indemnification
|
||||
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
|
||||
"""
|
||||
return legal_text
|
||||
open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}"
|
||||
return current_bulletin
|
||||
|
||||
@ -11,14 +11,10 @@ from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
class Workspace:
|
||||
"""A class that represents a workspace for an AutoGPT agent."""
|
||||
|
||||
NULL_BYTES = ["\0", "\000", "\x00", r"\z", "\u0000", "%00"]
|
||||
|
||||
def __init__(self, workspace_root: str | Path, restrict_to_workspace: bool):
|
||||
self._root = self._sanitize_path(workspace_root)
|
||||
self._restrict_to_workspace = restrict_to_workspace
|
||||
@ -104,32 +100,18 @@ class Workspace:
|
||||
|
||||
"""
|
||||
|
||||
# Posix systems disallow null bytes in paths. Windows is agnostic about it.
|
||||
# Do an explicit check here for all sorts of null byte representations.
|
||||
|
||||
for null_byte in Workspace.NULL_BYTES:
|
||||
if null_byte in str(relative_path) or null_byte in str(root):
|
||||
raise ValueError("embedded null byte")
|
||||
|
||||
if root is None:
|
||||
return Path(relative_path).resolve()
|
||||
|
||||
logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")
|
||||
root, relative_path = Path(root), Path(relative_path)
|
||||
|
||||
root, relative_path = Path(root).resolve(), Path(relative_path)
|
||||
|
||||
logger.debug(f"Resolved root as '{root}'")
|
||||
|
||||
# Allow exception for absolute paths if they are contained in your workspace directory.
|
||||
if relative_path.is_absolute() and not relative_path.is_relative_to(root):
|
||||
if relative_path.is_absolute():
|
||||
raise ValueError(
|
||||
f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
|
||||
)
|
||||
|
||||
full_path = root.joinpath(relative_path).resolve()
|
||||
|
||||
logger.debug(f"Joined paths as '{full_path}'")
|
||||
|
||||
if restrict_to_root and not full_path.is_relative_to(root):
|
||||
raise ValueError(
|
||||
f"Attempted to access path '{full_path}' outside of workspace '{root}'."
|
||||
|
||||
Reference in New Issue
Block a user