mirror of
https://www.modelscope.cn/Shanghai_AI_Laboratory/internlm3-8b-instruct.git
synced 2025-08-16 05:25:53 +08:00
Update README.md
This commit is contained in:
parent
d085e29bbc
commit
667e0de63d
22
README.md
22
README.md
@ -80,15 +80,14 @@ transformers >= 4.48
|
|||||||
|
|
||||||
### Conversation Mode
|
### Conversation Mode
|
||||||
|
|
||||||
#### Transformers inference
|
#### Modelscope inference
|
||||||
|
|
||||||
To load the InternLM3 8B Instruct model using Transformers, use the following code:
|
To load the InternLM3 8B Instruct model using Transformers, use the following code:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from modelscope import AutoTokenizer, AutoModelForCausalLM
|
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||||
|
model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm3-8b-instruct')
|
||||||
model_dir = "internlm/internlm3-8b-instruct"
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||||
# model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
# model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
||||||
@ -272,9 +271,8 @@ Focus on clear, logical progression of ideas and thorough explanation of your ma
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from modelscope import AutoTokenizer, AutoModelForCausalLM
|
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||||
|
model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm3-8b-instruct')
|
||||||
model_dir = "internlm/internlm3-8b-instruct"
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
||||||
@ -442,9 +440,8 @@ transformers >= 4.48
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from modelscope import AutoTokenizer, AutoModelForCausalLM
|
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||||
|
model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm3-8b-instruct')
|
||||||
model_dir = "internlm/internlm3-8b-instruct"
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||||
# model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
# model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
||||||
@ -623,9 +620,8 @@ Focus on clear, logical progression of ideas and thorough explanation of your ma
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from modelscope import AutoTokenizer, AutoModelForCausalLM
|
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||||
|
model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm3-8b-instruct')
|
||||||
model_dir = "internlm/internlm3-8b-instruct"
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.float16)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user