import os from huggingface_hub import InferenceClient class CommanderAgent: def __init__(self, hf_token): self.hf_token = hf_token # 使用适合免费方案的模型 self.client = InferenceClient(token=hf_token, model="google/gemma-2b-it") def coordinate_team(self, task_description, available_agents): """协调团队完成任务""" system_message = "你是AI Agent团队的指挥官,负责协调各个专业Agent,分配任务并整合结果。" messages = [ {"role": "system", "content": system_message}, {"role": "user", "content": f"任务描述: {task_description}\n可用的Agent: {', '.join(available_agents)}\n\n请分配任务并提供执行计划。"} ] response = "" # 使用适合免费方案的参数 for message in self.client.chat_completion( messages, max_tokens=256, # 降低max_tokens以适应免费方案限制 stream=True, temperature=0.7, top_p=0.9, ): choices = message.choices if len(choices) and choices[0].delta.content: response += choices[0].delta.content return response # 使用示例 if __name__ == "__main__": # 从环境变量获取HF_TOKEN hf_token = os.environ.get("HF_TOKEN", "") if not hf_token: print("错误:请设置HF_TOKEN环境变量") exit(1) commander = CommanderAgent(hf_token) task = "开发一个聊天机器人应用" agents = ["coder", "tester", "documenter"] result = commander.coordinate_team(task, agents) print(result)