Skip to content

Commit 2a4e2ed

Browse files
authored
✨ feat: add Cohere provider support (lobehub#7016)
* ✨ feat: add Cohere provider support * 💄 style: update cohere model list * 🐛 fix: fix cohere calling, exclude user and stream_options * 💄 style: update model description * 🐛 fix: fix cohere model list fetch * 🐛 fix: fix cohere calling * 💄 style: update provider desc * 💄 style: update model name
1 parent 0e0b60e commit 2a4e2ed

File tree

13 files changed

+356
-0
lines changed

13 files changed

+356
-0
lines changed

Dockerfile

+2
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,8 @@ ENV \
157157
BAICHUAN_API_KEY="" BAICHUAN_MODEL_LIST="" \
158158
# Cloudflare
159159
CLOUDFLARE_API_KEY="" CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID="" CLOUDFLARE_MODEL_LIST="" \
160+
# Cohere
161+
COHERE_API_KEY="" COHERE_MODEL_LIST="" COHERE_PROXY_URL="" \
160162
# DeepSeek
161163
DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
162164
# Fireworks AI

Dockerfile.database

+2
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,8 @@ ENV \
200200
BAICHUAN_API_KEY="" BAICHUAN_MODEL_LIST="" \
201201
# Cloudflare
202202
CLOUDFLARE_API_KEY="" CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID="" CLOUDFLARE_MODEL_LIST="" \
203+
# Cohere
204+
COHERE_API_KEY="" COHERE_MODEL_LIST="" COHERE_PROXY_URL="" \
203205
# DeepSeek
204206
DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
205207
# Fireworks AI

Dockerfile.pglite

+2
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,8 @@ ENV \
158158
BAICHUAN_API_KEY="" BAICHUAN_MODEL_LIST="" \
159159
# Cloudflare
160160
CLOUDFLARE_API_KEY="" CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID="" CLOUDFLARE_MODEL_LIST="" \
161+
# Cohere
162+
COHERE_API_KEY="" COHERE_MODEL_LIST="" COHERE_PROXY_URL="" \
161163
# DeepSeek
162164
DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
163165
# Fireworks AI

src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx

+2
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import {
55
Ai360ProviderCard,
66
AnthropicProviderCard,
77
BaichuanProviderCard,
8+
CohereProviderCard,
89
DeepSeekProviderCard,
910
FireworksAIProviderCard,
1011
GiteeAIProviderCard,
@@ -82,6 +83,7 @@ export const useProviderList = (): ProviderItem[] => {
8283
XAIProviderCard,
8384
JinaProviderCard,
8485
SambaNovaProviderCard,
86+
CohereProviderCard,
8587
QwenProviderCard,
8688
WenxinProviderCard,
8789
HunyuanProviderCard,

src/config/aiModels/cohere.ts

+243
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,243 @@
1+
import { AIChatModelCard } from '@/types/aiModel';
2+
3+
const cohereChatModels: AIChatModelCard[] = [
4+
{
5+
abilities: {
6+
functionCall: true,
7+
},
8+
contextWindowTokens: 256_000,
9+
description:
10+
'Command A 是我们迄今为止性能最强的模型,在工具使用、代理、检索增强生成(RAG)和多语言应用场景方面表现出色。Command A 具有 256K 的上下文长度,仅需两块 GPU 即可运行,并且相比于 Command R+ 08-2024,吞吐量提高了 150%。',
11+
displayName: 'Command A 03-2025',
12+
enabled: true,
13+
id: 'command-a-03-2025',
14+
maxOutput: 8000,
15+
pricing: {
16+
input: 2.5,
17+
output: 10
18+
},
19+
type: 'chat'
20+
},
21+
{
22+
abilities: {
23+
functionCall: true,
24+
},
25+
contextWindowTokens: 128_000,
26+
description:
27+
'command-r-plus 是 command-r-plus-04-2024 的别名,因此如果您在 API 中使用 command-r-plus,实际上指向的就是该模型。',
28+
displayName: 'Command R+',
29+
enabled: true,
30+
id: 'command-r-plus',
31+
maxOutput: 4000,
32+
pricing: {
33+
input: 2.5,
34+
output: 10
35+
},
36+
type: 'chat'
37+
},
38+
{
39+
abilities: {
40+
functionCall: true,
41+
},
42+
contextWindowTokens: 128_000,
43+
description:
44+
'Command R+ 是一个遵循指令的对话模型,在语言任务方面表现出更高的质量、更可靠,并且相比以往模型具有更长的上下文长度。它最适用于复杂的 RAG 工作流和多步工具使用。',
45+
displayName: 'Command R+ 04-2024',
46+
id: 'command-r-plus-04-2024',
47+
maxOutput: 4000,
48+
pricing: {
49+
input: 3,
50+
output: 15
51+
},
52+
type: 'chat'
53+
},
54+
{
55+
abilities: {
56+
functionCall: true,
57+
},
58+
contextWindowTokens: 128_000,
59+
description:
60+
'command-r 是 command-c-03-2024 的别名,因此如果您在 API 中使用 command-r,实际上指向的就是该模型。',
61+
displayName: 'Command R',
62+
enabled: true,
63+
id: 'command-r',
64+
maxOutput: 4000,
65+
pricing: {
66+
input: 0.15,
67+
output: 0.6
68+
},
69+
type: 'chat'
70+
},
71+
{
72+
abilities: {
73+
functionCall: true,
74+
},
75+
contextWindowTokens: 128_000,
76+
description:
77+
'command-r-08-2024 是 Command R 模型的更新版本,于 2024 年 8 月发布。',
78+
displayName: 'Command R 08-2024',
79+
id: 'command-r-08-2024',
80+
maxOutput: 4000,
81+
pricing: {
82+
input: 0.15,
83+
output: 0.6
84+
},
85+
type: 'chat'
86+
},
87+
{
88+
abilities: {
89+
functionCall: true,
90+
},
91+
contextWindowTokens: 128_000,
92+
description:
93+
'Command R 是一个遵循指令的对话模型,在语言任务方面表现出更高的质量、更可靠,并且相比以往模型具有更长的上下文长度。它可用于复杂的工作流程,如代码生成、检索增强生成(RAG)、工具使用和代理。',
94+
displayName: 'Command R 03-2024',
95+
id: 'command-r-03-2024',
96+
maxOutput: 4000,
97+
pricing: {
98+
input: 0.5,
99+
output: 1.5
100+
},
101+
type: 'chat'
102+
},
103+
{
104+
abilities: {
105+
functionCall: true,
106+
},
107+
contextWindowTokens: 128_000,
108+
description:
109+
'command-r7b-12-2024 是一个小型且高效的更新版本,于 2024 年 12 月发布。它在 RAG、工具使用、代理等需要复杂推理和多步处理的任务中表现出色。',
110+
displayName: 'Command R7B 12-2024',
111+
enabled: true,
112+
id: 'command-r7b-12-2024',
113+
maxOutput: 4000,
114+
pricing: {
115+
input: 0.0375,
116+
output: 0.15
117+
},
118+
type: 'chat'
119+
},
120+
{
121+
contextWindowTokens: 4000,
122+
description:
123+
'一个遵循指令的对话模型,在语言任务中表现出高质量、更可靠,并且相比我们的基础生成模型具有更长的上下文长度。',
124+
displayName: 'Command',
125+
enabled: true,
126+
id: 'command',
127+
maxOutput: 4000,
128+
pricing: {
129+
input: 1,
130+
output: 2
131+
},
132+
type: 'chat'
133+
},
134+
{
135+
abilities: {
136+
functionCall: true,
137+
},
138+
contextWindowTokens: 128_000,
139+
description:
140+
'为了缩短主要版本发布之间的时间间隔,我们推出了 Command 模型的每夜版本。对于 Command 系列,这一版本称为 command-cightly。请注意,command-nightly 是最新、最具实验性且(可能)不稳定的版本。每夜版本会定期更新,且不会提前通知,因此不建议在生产环境中使用。',
141+
displayName: 'Command Nightly',
142+
id: 'command-nightly',
143+
maxOutput: 4000,
144+
pricing: {
145+
input: 1,
146+
output: 2
147+
},
148+
type: 'chat'
149+
},
150+
{
151+
contextWindowTokens: 4000,
152+
description:
153+
'一个更小、更快的 Command 版本,几乎同样强大,但速度更快。',
154+
displayName: 'Command Light',
155+
enabled: true,
156+
id: 'command-light',
157+
maxOutput: 4000,
158+
pricing: {
159+
input: 0.3,
160+
output: 0.6
161+
},
162+
type: 'chat'
163+
},
164+
{
165+
contextWindowTokens: 4000,
166+
description:
167+
'为了缩短主要版本发布之间的时间间隔,我们推出了 Command 模型的每夜版本。对于 command-light 系列,这一版本称为 command-light-nightly。请注意,command-light-nightly 是最新、最具实验性且(可能)不稳定的版本。每夜版本会定期更新,且不会提前通知,因此不建议在生产环境中使用。',
168+
displayName: 'Command Light Nightly',
169+
id: 'command-light-nightly',
170+
maxOutput: 4000,
171+
pricing: {
172+
input: 0.3,
173+
output: 0.6
174+
},
175+
type: 'chat'
176+
},
177+
{
178+
contextWindowTokens: 128_000,
179+
description:
180+
'Aya Expanse 是一款高性能的 32B 多语言模型,旨在通过指令调优、数据套利、偏好训练和模型合并的创新,挑战单语言模型的表现。它支持 23 种语言。',
181+
displayName: 'Aya Expanse 32B',
182+
enabled: true,
183+
id: 'c4ai-aya-expanse-32b',
184+
maxOutput: 4000,
185+
pricing: {
186+
input: 0.5,
187+
output: 1.5
188+
},
189+
type: 'chat'
190+
},
191+
{
192+
contextWindowTokens: 8000,
193+
description:
194+
'Aya Expanse 是一款高性能的 8B 多语言模型,旨在通过指令调优、数据套利、偏好训练和模型合并的创新,挑战单语言模型的表现。它支持 23 种语言。',
195+
displayName: 'Aya Expanse 8B',
196+
enabled: true,
197+
id: 'c4ai-aya-expanse-8b',
198+
maxOutput: 4000,
199+
pricing: {
200+
input: 0.5,
201+
output: 1.5
202+
},
203+
type: 'chat'
204+
},
205+
{
206+
abilities: {
207+
vision: true,
208+
},
209+
contextWindowTokens: 16_000,
210+
description:
211+
'Aya Vision 是一款最先进的多模态模型,在语言、文本和图像能力的多个关键基准上表现出色。它支持 23 种语言。这个 320 亿参数的版本专注于最先进的多语言表现。',
212+
displayName: 'Aya Vision 32B',
213+
enabled: true,
214+
id: 'c4ai-aya-vision-32b',
215+
maxOutput: 4000,
216+
pricing: {
217+
input: 0.5,
218+
output: 1.5
219+
},
220+
type: 'chat'
221+
},
222+
{
223+
abilities: {
224+
vision: true,
225+
},
226+
contextWindowTokens: 16_000,
227+
description:
228+
'Aya Vision 是一款最先进的多模态模型,在语言、文本和图像能力的多个关键基准上表现出色。这个 80 亿参数的版本专注于低延迟和最佳性能。',
229+
displayName: 'Aya Vision 8B',
230+
enabled: true,
231+
id: 'c4ai-aya-vision-8b',
232+
maxOutput: 4000,
233+
pricing: {
234+
input: 0.5,
235+
output: 1.5
236+
},
237+
type: 'chat'
238+
},
239+
]
240+
241+
export const allModels = [...cohereChatModels];
242+
243+
export default allModels;

src/config/aiModels/index.ts

+3
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import { default as azureai } from './azureai';
88
import { default as baichuan } from './baichuan';
99
import { default as bedrock } from './bedrock';
1010
import { default as cloudflare } from './cloudflare';
11+
import { default as cohere } from './cohere';
1112
import { default as deepseek } from './deepseek';
1213
import { default as doubao } from './doubao';
1314
import { default as fireworksai } from './fireworksai';
@@ -77,6 +78,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
7778
baichuan,
7879
bedrock,
7980
cloudflare,
81+
cohere,
8082
deepseek,
8183
doubao,
8284
fireworksai,
@@ -127,6 +129,7 @@ export { default as azureai } from './azureai';
127129
export { default as baichuan } from './baichuan';
128130
export { default as bedrock } from './bedrock';
129131
export { default as cloudflare } from './cloudflare';
132+
export { default as cohere } from './cohere';
130133
export { default as deepseek } from './deepseek';
131134
export { default as doubao } from './doubao';
132135
export { default as fireworksai } from './fireworksai';

src/config/llm.ts

+6
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,9 @@ export const getLLMConfig = () => {
150150

151151
ENABLED_PPIO: z.boolean(),
152152
PPIO_API_KEY: z.string().optional(),
153+
154+
ENABLED_COHERE: z.boolean(),
155+
COHERE_API_KEY: z.string().optional(),
153156
},
154157
runtimeEnv: {
155158
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -298,6 +301,9 @@ export const getLLMConfig = () => {
298301

299302
ENABLED_PPIO: !!process.env.PPIO_API_KEY,
300303
PPIO_API_KEY: process.env.PPIO_API_KEY,
304+
305+
ENABLED_COHERE: !!process.env.COHERE_API_KEY,
306+
COHERE_API_KEY: process.env.COHERE_API_KEY,
301307
},
302308
});
303309
};

src/config/modelProviders/cohere.ts

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import { ModelProviderCard } from '@/types/llm';
2+
3+
const Cohere: ModelProviderCard = {
4+
chatModels: [],
5+
checkModel: 'command-r7b-12-2024',
6+
description: 'Cohere 为您带来最前沿的多语言模型、先进的检索功能以及为现代企业量身定制的 AI 工作空间 — 一切都集成在一个安全的平台中。',
7+
id: 'cohere',
8+
modelsUrl: 'https://docs.cohere.com/v2/docs/models',
9+
name: 'Cohere',
10+
settings: {
11+
proxyUrl: {
12+
placeholder: 'https://api.cohere.ai/compatibility/v1',
13+
},
14+
sdkType: 'openai',
15+
},
16+
url: 'https://cohere.com',
17+
};
18+
19+
export default Cohere;

src/config/modelProviders/index.ts

+4
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import AzureAIProvider from './azureai';
88
import BaichuanProvider from './baichuan';
99
import BedrockProvider from './bedrock';
1010
import CloudflareProvider from './cloudflare';
11+
import CohereProvider from './cohere';
1112
import DeepSeekProvider from './deepseek';
1213
import DoubaoProvider from './doubao';
1314
import FireworksAIProvider from './fireworksai';
@@ -75,6 +76,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
7576
XAIProvider.chatModels,
7677
JinaProvider.chatModels,
7778
SambaNovaProvider.chatModels,
79+
CohereProvider.chatModels,
7880
ZeroOneProvider.chatModels,
7981
StepfunProvider.chatModels,
8082
NovitaProvider.chatModels,
@@ -125,6 +127,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
125127
XAIProvider,
126128
JinaProvider,
127129
SambaNovaProvider,
130+
CohereProvider,
128131
QwenProvider,
129132
WenxinProvider,
130133
TencentcloudProvider,
@@ -165,6 +168,7 @@ export { default as AzureAIProviderCard } from './azureai';
165168
export { default as BaichuanProviderCard } from './baichuan';
166169
export { default as BedrockProviderCard } from './bedrock';
167170
export { default as CloudflareProviderCard } from './cloudflare';
171+
export { default as CohereProviderCard } from './cohere';
168172
export { default as DeepSeekProviderCard } from './deepseek';
169173
export { default as DoubaoProviderCard } from './doubao';
170174
export { default as FireworksAIProviderCard } from './fireworksai';

0 commit comments

Comments
 (0)