Skip to content

Commit ff7c169

Browse files
committed
updated.
1 parent 83eabd8 commit ff7c169

File tree

10 files changed

+15
-15
lines changed

10 files changed

+15
-15
lines changed

examples/classification_regression/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ This example implements classification tasks language models, supporting sequenc
77

88
```shell
99
# Install RedCoast
10-
pip install redco==0.4.20
10+
pip install redco==0.4.21
1111
# Install torchvision/torch (cpu version)
1212
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
1313
```

examples/classification_regression/main.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def main(dataset_name='sst2',
7575
per_device_batch_size=4,
7676
n_epochs=2,
7777
learning_rate=2e-5,
78-
warmup_rate=0.1,
78+
warmup_ratio=0.1,
7979
lr_schedule_type='linear',
8080
grad_norm_clip=1.,
8181
weight_decay=0.,
@@ -117,7 +117,7 @@ def main(dataset_name='sst2',
117117
n_epochs=n_epochs,
118118
learning_rate=learning_rate,
119119
schedule_type=lr_schedule_type,
120-
warmup_rate=warmup_rate)
120+
warmup_ratio=warmup_ratio)
121121
optimizer = optax.MultiSteps(optax.chain(
122122
optax.clip_by_global_norm(grad_norm_clip),
123123
optax.adamw(learning_rate=lr_schedule_fn, weight_decay=weight_decay)

examples/image_to_text/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ This example implements image-to-text with Redco.
66

77
Install Redco
88
```shell
9-
pip install redco==0.4.8
9+
pip install redco==0.4.21
1010
```
1111

1212
### Usage

examples/image_to_text/main.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def main(data_dir='./mscoco_data',
9292
per_device_batch_size=8,
9393
learning_rate=2e-5,
9494
lr_schedule_type='linear',
95-
warmup_rate=0.1,
95+
warmup_ratio=0.1,
9696
grad_norm_clip=1.,
9797
weight_decay=0.01,
9898
workdir='./workdir',
@@ -147,7 +147,7 @@ def main(data_dir='./mscoco_data',
147147
n_epochs=n_epochs,
148148
learning_rate=learning_rate,
149149
schedule_type=lr_schedule_type,
150-
warmup_rate=warmup_rate)
150+
warmup_ratio=warmup_ratio)
151151
optimizer = optax.MultiSteps(optax.chain(
152152
optax.clip_by_global_norm(grad_norm_clip),
153153
optax.adamw(learning_rate=lr_schedule_fn, weight_decay=weight_decay)

examples/language_modeling/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ This example implements training causal language models, supporting causal langu
77

88
```shell
99
# Install RedCoast
10-
pip install redco==0.4.20
10+
pip install redco==0.4.21
1111
# Install torchvision/torch (cpu version)
1212
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
1313
```

examples/language_modeling/main.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def main(dataset_name='alexgshaw/llama-13b-tokenized-wikitext-2-v1',
4242
per_device_batch_size=4,
4343
learning_rate=2e-5,
4444
lr_schedule_type='linear',
45-
warmup_rate=0.1,
45+
warmup_ratio=0.1,
4646
grad_norm_clip=1.,
4747
weight_decay=0.01,
4848
workdir='./workdir',
@@ -81,7 +81,7 @@ def main(dataset_name='alexgshaw/llama-13b-tokenized-wikitext-2-v1',
8181
n_epochs=n_epochs,
8282
learning_rate=learning_rate,
8383
schedule_type=lr_schedule_type,
84-
warmup_rate=warmup_rate)
84+
warmup_ratio=warmup_ratio)
8585
optimizer = optax.MultiSteps(optax.chain(
8686
optax.clip_by_global_norm(grad_norm_clip),
8787
optax.adamw(learning_rate=lr_schedule_fn, weight_decay=weight_decay)

examples/text_to_text/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ It supports
1111

1212
```shell
1313
# Install RedCoast
14-
pip install redco==0.4.20
14+
pip install redco==0.4.21
1515
# Install nltk, rouge for evaluation
1616
pip install nltk rouge-score
1717
# Install torchvision/torch (cpu version) for loading PyTorch initializations

examples/text_to_text/main.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def main(dataset_name='EdinburghNLP/xsum',
8787
per_device_batch_size=32,
8888
learning_rate=2e-5,
8989
lr_schedule_type='linear',
90-
warmup_rate=0.1,
90+
warmup_ratio=0.1,
9191
grad_norm_clip=1.,
9292
weight_decay=0.01,
9393
workdir='./workdir',
@@ -133,7 +133,7 @@ def main(dataset_name='EdinburghNLP/xsum',
133133
n_epochs=n_epochs,
134134
learning_rate=learning_rate,
135135
schedule_type=lr_schedule_type,
136-
warmup_rate=warmup_rate)
136+
warmup_ratio=warmup_ratio)
137137
optimizer = optax.MultiSteps(optax.chain(
138138
optax.clip_by_global_norm(grad_norm_clip),
139139
optax.adamw(learning_rate=lr_schedule_fn, weight_decay=weight_decay)

redco/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = '0.4.19'
15+
__version__ = '0.4.21'
1616

1717
from .deployers import *
1818
from .trainers import *

setup.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@
1717

1818
setup(
1919
name="redco",
20-
version="0.4.20",
20+
version="0.4.21",
2121
author="Bowen Tan",
2222
packages=find_packages(),
23-
install_requires=['jax', 'flax', 'optax', 'numpy'],
23+
install_requires=['jax', 'flax', 'optax'],
2424
include_package_data=True,
2525
python_requires=">=3.8",
2626
long_description=open('README.md').read(),

0 commit comments

Comments
 (0)