mirror of
https://www.modelscope.cn/Shanghai_AI_Laboratory/internlm3-8b-instruct.git
synced 2025-08-14 04:35:53 +08:00
Update README.md
This commit is contained in:
parent
d085e29bbc
commit
667e0de63d
22
README.md
22
README.md
@ -80,15 +80,14 @@ transformers >= 4.48
|
||||
|
||||
### Conversation Mode
|
||||
|
||||
#### Transformers inference
|
||||
#### Modelscope inference
|
||||
|
||||
To load the InternLM3 8B Instruct model using Transformers, use the following code:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from modelscope import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
model_dir = "internlm/internlm3-8b-instruct"
|
||||
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||
model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm3-8b-instruct')
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||
# model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
||||
@ -272,9 +271,8 @@ Focus on clear, logical progression of ideas and thorough explanation of your ma
|
||||
|
||||
```python
|
||||
import torch
|
||||
from modelscope import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
model_dir = "internlm/internlm3-8b-instruct"
|
||||
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||
model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm3-8b-instruct')
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||
model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
||||
@ -442,9 +440,8 @@ transformers >= 4.48
|
||||
|
||||
```python
|
||||
import torch
|
||||
from modelscope import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
model_dir = "internlm/internlm3-8b-instruct"
|
||||
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||
model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm3-8b-instruct')
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||
# model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
||||
@ -623,9 +620,8 @@ Focus on clear, logical progression of ideas and thorough explanation of your ma
|
||||
|
||||
```python
|
||||
import torch
|
||||
from modelscope import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
model_dir = "internlm/internlm3-8b-instruct"
|
||||
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||
model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm3-8b-instruct')
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||
model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
||||
|
Loading…
x
Reference in New Issue
Block a user