Skip to content

Commit e2a290f

Browse files
authoredDec 3, 2024··
[Bump] Bump version to 0.3.7 (#1733)
1 parent 98c4666 commit e2a290f

File tree

4 files changed

+39
-3
lines changed

4 files changed

+39
-3
lines changed
 

‎opencompass/__init__.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,18 @@
1-
__version__ = '0.3.6'
1+
__version__ = '0.3.7'
22

33

44
def _warn_about_config_migration():
55
import warnings
6+
67
warnings.warn(
78
'Starting from v0.4.0, all AMOTIC configuration files currently '
89
'located in `./configs/datasets`, `./configs/models`, and '
910
'`./configs/summarizers` will be migrated to the '
1011
'`opencompass/configs/` package. Please update your configuration '
1112
'file paths accordingly.',
1213
UserWarning, # Changed to UserWarning
13-
stacklevel=2)
14+
stacklevel=2,
15+
)
1416

1517

1618
# Trigger the warning

‎opencompass/configs/models/deepseek/lmdeploy_deepseek_v2_lite.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
models = [
44
dict(
55
type=TurboMindModelwithChatTemplate,
6-
abbr='deepseek-v2_lite-turbomind',
6+
abbr='deepseek-v2_lite-chat-turbomind',
77
path='deepseek-ai/DeepSeek-V2-Lite-Chat',
88
engine_config=dict(
99
session_len=7168,
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from opencompass.models import TurboMindModel
2+
3+
models = [
4+
dict(
5+
type=TurboMindModel,
6+
abbr='gemma-2-27b-turbomind',
7+
path='google/gemma-2-27b',
8+
engine_config=dict(session_len=16384, max_batch_size=16, tp=2),
9+
gen_config=dict(
10+
top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096
11+
),
12+
max_seq_len=16384,
13+
max_out_len=4096,
14+
batch_size=16,
15+
run_cfg=dict(num_gpus=2),
16+
)
17+
]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from opencompass.models import TurboMindModel
2+
3+
models = [
4+
dict(
5+
type=TurboMindModel,
6+
abbr='gemma-2-9b-turbomind',
7+
path='google/gemma-2-9b',
8+
engine_config=dict(session_len=16384, max_batch_size=16, tp=1),
9+
gen_config=dict(
10+
top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096
11+
),
12+
max_seq_len=16384,
13+
max_out_len=4096,
14+
batch_size=16,
15+
run_cfg=dict(num_gpus=1),
16+
)
17+
]

0 commit comments

Comments
 (0)
Please sign in to comment.