1、在【基础信息】获取group_id 2、在【接口秘钥】获取 api key 3、将groupid和apikey填到后台key池
super().__init__(key, model_name, base_url) base_url = "https://api.minimax.chat/v1/text/chatcompletion_v2" self.base_url = base_url self.model_name = model_name self.api_key = key def chat(self, system, history, gen_conf): if system: history.insert(0, {"role": "system"...
openai_api_key=get_model_path(model), chunk_size=CHUNK_SIZE) elif 'bge-' in model: 11 changes: 7 additions & 4 deletions 11 server/knowledge_base/kb_cache/faiss_cache.py Original file line numberDiff line numberDiff line change @@ -1,7 +1,10 @@ from configs import CACHED_VS_NUM...
服务地址: http://host:port/v1,api-key可以任意设置 支持模型名称设置为random,后台会自动找一个"enabled": true的模型来使用。 配置文件示例(以讯飞星火spark-lite为例) { "load_balancing": "random", "services": { "xinghuo": [ { "models": ["spark-lite"], "enabled": true, "credentials": ...
{key:27,text:'MiniMax',value:27,color:'red'}, {key:8,text:'自定义渠道',value:8,color:'pink'}, {key:22,text:'知识库:FastGPT',value:22,color:'blue'}, {key:21,text:'知识库:AI Proxy',value:21,color:'purple'}, Expand Down ...
openai_api_key=get_model_path(model), chunk_size=CHUNK_SIZE) elif 'bge-' in model:11 changes: 7 additions & 4 deletions 11 server/knowledge_base/kb_cache/faiss_cache.py Original file line numberDiff line numberDiff line change @@ -1,7 +1,10 @@ from configs import CACHED_VS_NUM ...
openai_api_key=get_model_path(model), chunk_size=CHUNK_SIZE) elif 'bge-' in model: 11 changes: 7 additions & 4 deletions 11 server/knowledge_base/kb_cache/faiss_cache.py Original file line numberDiff line numberDiff line change @@ -1,7 +1,10 @@ from configs import CACHED_VS_NUM...
openai_api_key=get_model_path(model), chunk_size=CHUNK_SIZE) elif 'bge-' in model: 11 changes: 7 additions & 4 deletions 11 server/knowledge_base/kb_cache/faiss_cache.py Original file line numberDiff line numberDiff line change @@ -1,7 +1,10 @@ from configs import CACHED_VS_N...
openai_api_key=get_model_path(model), chunk_size=CHUNK_SIZE) elif 'bge-' in model: 11 changes: 7 additions & 4 deletions 11 server/knowledge_base/kb_cache/faiss_cache.py Original file line numberDiff line numberDiff line change @@ -1,7 +1,10 @@ from configs import CACHED_...
openai_api_key=get_model_path(model), chunk_size=CHUNK_SIZE) elif 'bge-' in model: 11 changes: 7 additions & 4 deletions 11 server/knowledge_base/kb_cache/faiss_cache.py Original file line numberDiff line numberDiff line change @@ -1,7 +1,10 @@ from configs import CACHED_VS_NU...