-
Notifications
You must be signed in to change notification settings - Fork 5.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
add your own LLM_api #1386
base: main
Are you sure you want to change the base?
add your own LLM_api #1386
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
""" | ||
@Time : 2023/5/5 22:59 | ||
@Author : alexanderwu | ||
@File : __init__.py | ||
""" | ||
|
||
from metagpt.provider.google_gemini_api import GeminiLLM | ||
from metagpt.provider.ollama_api import OllamaLLM | ||
from metagpt.provider.openai_api import OpenAILLM | ||
from metagpt.provider.zhipuai_api import ZhiPuAILLM | ||
from metagpt.provider.azure_openai_api import AzureOpenAILLM | ||
from metagpt.provider.metagpt_api import MetaGPTLLM | ||
from metagpt.provider.human_provider import HumanProvider | ||
from metagpt.provider.spark_api import SparkLLM | ||
from metagpt.provider.qianfan_api import QianFanLLM | ||
from metagpt.provider.dashscope_api import DashScopeLLM | ||
from metagpt.provider.anthropic_api import AnthropicLLM | ||
from metagpt.provider.bedrock_api import BedrockLLM | ||
from metagpt.provider.ark_api import ArkLLM | ||
from metagpt.provider.own_api import OwnLLM | ||
|
||
__all__ = [ | ||
"GeminiLLM", | ||
"OpenAILLM", | ||
"ZhiPuAILLM", | ||
"AzureOpenAILLM", | ||
"MetaGPTLLM", | ||
"OllamaLLM", | ||
"HumanProvider", | ||
"SparkLLM", | ||
"QianFanLLM", | ||
"DashScopeLLM", | ||
"AnthropicLLM", | ||
"BedrockLLM", | ||
"ArkLLM", | ||
"OwnLLM" | ||
] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,133 @@ | ||
import copy | ||
import os | ||
import requests | ||
import json | ||
import sys | ||
import pdb | ||
import json | ||
import requests | ||
import hashlib | ||
import uuid | ||
import time | ||
import traceback | ||
import re | ||
from pathlib import Path | ||
import pandas as pd | ||
import datetime | ||
import uuid | ||
import qianfan | ||
from qianfan.resources.typing import JsonBody | ||
from metagpt.configs.llm_config import LLMConfig, LLMType | ||
from metagpt.const import USE_CONFIG_TIMEOUT | ||
from metagpt.logs import log_llm_stream | ||
from metagpt.provider.base_llm import BaseLLM | ||
from metagpt.provider.llm_provider_registry import register_provider | ||
from metagpt.utils.cost_manager import CostManager | ||
from metagpt.utils.token_counter import ( | ||
QIANFAN_ENDPOINT_TOKEN_COSTS, | ||
QIANFAN_MODEL_TOKEN_COSTS, | ||
) | ||
|
||
@register_provider(LLMType.SHAHE) | ||
class OwnLLM(BaseLLM): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what's the difference with |
||
""" | ||
create your own api | ||
""" | ||
def __init__(self, config: LLMConfig): | ||
self.config = config | ||
self.use_system_prompt = False | ||
self.__init_ownapi() | ||
self.cost_manager = CostManager(token_costs=self.token_costs) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. refs to |
||
|
||
|
||
def __init_ownapi(self): | ||
# finish your own init | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. seems not a complete code |
||
|
||
def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict: | ||
kwargs = { | ||
"messages": messages, | ||
"stream": stream, | ||
} | ||
if self.config.temperature > 0: | ||
kwargs["temperature"] = self.config.temperature | ||
if self.config.endpoint: | ||
kwargs["endpoint"] = self.config.endpoint | ||
elif self.config.model: | ||
kwargs["model"] = self.config.model | ||
|
||
if self.use_system_prompt: | ||
if messages[0]["role"] == "system": | ||
kwargs["messages"] = messages[1:] | ||
kwargs["system"] = messages[0]["content"] | ||
return kwargs | ||
|
||
def _update_costs(self, usage: dict): | ||
"""update each request's token cost""" | ||
model_or_endpoint = self.config.model or self.config.endpoint | ||
local_calc_usage = model_or_endpoint in self.token_costs | ||
super()._update_costs(usage, model_or_endpoint, local_calc_usage) | ||
|
||
def get_choice_text(self, resp: JsonBody) -> str: | ||
return resp.get("result", "") | ||
|
||
def request_eb_dynamic_ppo(self, query, history, model_id="", compute_close=False): | ||
# here is the example | ||
|
||
URL = "" | ||
payload = { | ||
"text": query, | ||
"model_version": "", | ||
"session_id": uuid.uuid1().hex, | ||
"history": history, | ||
"userId": "", | ||
"key":"", | ||
"model_id": model_id | ||
} | ||
headers = { | ||
'Content-Type': 'application/json' | ||
} | ||
resp_json = None | ||
for i in range(666): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why 666? |
||
try: | ||
resp = requests.post(URL, headers=headers, data=json.dumps(payload)) | ||
# print(query) | ||
# print(resp) | ||
resp_json = json.loads(resp.text) | ||
# print(resp_json) | ||
result = resp_json["data"]["result"] | ||
return resp_json | ||
except: | ||
# print(f"Fail to get EB result, try times: {i}") | ||
time.sleep(1) | ||
resp_json = None | ||
return resp_json | ||
|
||
def completion(self, messages: list[dict]) -> JsonBody: | ||
history = [] | ||
for message in messages: | ||
query = message["content"] | ||
res = self.request_eb_dynamic_ppo(query, history, model_id="") | ||
result = res["data"]["result"] | ||
history.insert(0, [query, result]) | ||
# Here we just return the final response, you can modify it to return all responses if needed | ||
return res | ||
|
||
async def _achat_completion(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> JsonBody: | ||
return self.completion(messages) | ||
|
||
async def acompletion(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> JsonBody: | ||
return await self._achat_completion(messages, timeout=self.get_timeout(timeout)) | ||
|
||
async def _achat_completion_stream(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> str: | ||
collected_content = [] | ||
history = [] | ||
for message in messages: | ||
query = message["content"] | ||
res = self.request_eb_dynamic_ppo(query, history, model_id="") | ||
content = res["data"]["result"] | ||
log_llm_stream(content) | ||
collected_content.append(content) | ||
log_llm_stream("\n") | ||
history.insert(0, [query, content]) | ||
full_content = "".join(collected_content) | ||
return full_content |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,133 @@ | ||
import copy | ||
import os | ||
import requests | ||
import json | ||
import sys | ||
import pdb | ||
import json | ||
import requests | ||
import hashlib | ||
import uuid | ||
import time | ||
import traceback | ||
import re | ||
from pathlib import Path | ||
import pandas as pd | ||
import datetime | ||
import uuid | ||
import qianfan | ||
from qianfan.resources.typing import JsonBody | ||
from metagpt.configs.llm_config import LLMConfig, LLMType | ||
from metagpt.const import USE_CONFIG_TIMEOUT | ||
from metagpt.logs import log_llm_stream | ||
from metagpt.provider.base_llm import BaseLLM | ||
from metagpt.provider.llm_provider_registry import register_provider | ||
from metagpt.utils.cost_manager import CostManager | ||
from metagpt.utils.token_counter import ( | ||
QIANFAN_ENDPOINT_TOKEN_COSTS, | ||
QIANFAN_MODEL_TOKEN_COSTS, | ||
) | ||
|
||
@register_provider(LLMType.SHAHE) | ||
class OwnLLM(BaseLLM): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why another own_api.py? |
||
""" | ||
create your own api | ||
""" | ||
def __init__(self, config: LLMConfig): | ||
self.config = config | ||
self.use_system_prompt = False | ||
self.__init_ownapi() | ||
self.cost_manager = CostManager(token_costs=self.token_costs) | ||
|
||
|
||
def __init_ownapi(self): | ||
# finish your own init | ||
|
||
def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict: | ||
kwargs = { | ||
"messages": messages, | ||
"stream": stream, | ||
} | ||
if self.config.temperature > 0: | ||
kwargs["temperature"] = self.config.temperature | ||
if self.config.endpoint: | ||
kwargs["endpoint"] = self.config.endpoint | ||
elif self.config.model: | ||
kwargs["model"] = self.config.model | ||
|
||
if self.use_system_prompt: | ||
if messages[0]["role"] == "system": | ||
kwargs["messages"] = messages[1:] | ||
kwargs["system"] = messages[0]["content"] | ||
return kwargs | ||
|
||
def _update_costs(self, usage: dict): | ||
"""update each request's token cost""" | ||
model_or_endpoint = self.config.model or self.config.endpoint | ||
local_calc_usage = model_or_endpoint in self.token_costs | ||
super()._update_costs(usage, model_or_endpoint, local_calc_usage) | ||
|
||
def get_choice_text(self, resp: JsonBody) -> str: | ||
return resp.get("result", "") | ||
|
||
def request_eb_dynamic_ppo(self, query, history, model_id="", compute_close=False): | ||
# here is the example | ||
|
||
URL = "" | ||
payload = { | ||
"text": query, | ||
"model_version": "", | ||
"session_id": uuid.uuid1().hex, | ||
"history": history, | ||
"userId": "", | ||
"key":"", | ||
"model_id": model_id | ||
} | ||
headers = { | ||
'Content-Type': 'application/json' | ||
} | ||
resp_json = None | ||
for i in range(666): | ||
try: | ||
resp = requests.post(URL, headers=headers, data=json.dumps(payload)) | ||
# print(query) | ||
# print(resp) | ||
resp_json = json.loads(resp.text) | ||
# print(resp_json) | ||
result = resp_json["data"]["result"] | ||
return resp_json | ||
except: | ||
# print(f"Fail to get EB result, try times: {i}") | ||
time.sleep(1) | ||
resp_json = None | ||
return resp_json | ||
|
||
def completion(self, messages: list[dict]) -> JsonBody: | ||
history = [] | ||
for message in messages: | ||
query = message["content"] | ||
res = self.request_eb_dynamic_ppo(query, history, model_id="") | ||
result = res["data"]["result"] | ||
history.insert(0, [query, result]) | ||
# Here we just return the final response, you can modify it to return all responses if needed | ||
return res | ||
|
||
async def _achat_completion(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> JsonBody: | ||
return self.completion(messages) | ||
|
||
async def acompletion(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> JsonBody: | ||
return await self._achat_completion(messages, timeout=self.get_timeout(timeout)) | ||
|
||
async def _achat_completion_stream(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> str: | ||
collected_content = [] | ||
history = [] | ||
for message in messages: | ||
query = message["content"] | ||
res = self.request_eb_dynamic_ppo(query, history, model_id="") | ||
content = res["data"]["result"] | ||
log_llm_stream(content) | ||
collected_content.append(content) | ||
log_llm_stream("\n") | ||
history.insert(0, [query, content]) | ||
full_content = "".join(collected_content) | ||
return full_content |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
should add unit-test code