Skip to content

Commit b02b2d5

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 02602cb commit b02b2d5

5 files changed

+20
-29
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ gpus per model.
4444

4545
This is an example to run a single 8bit llama-65b model on 2 A40s that have
4646
~50 GB of memory each.
47-
47+
4848
```
4949
elk elicit huggyllama/llama-65b imdb --num_gpus 2 --gpus_per_model 2 --int8
5050
```

tests/test_smoke_elicit.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
def test_smoke_elicit_run_tiny_gpt2_ccs(tmp_path: Path):
99
# we need about 5 mb of gpu memory to run this test
10-
model_path, min_mem = "sshleifer/tiny-gpt2", 10 * 1024 ** 2
10+
model_path, min_mem = "sshleifer/tiny-gpt2", 10 * 1024**2
1111
dataset_name = "imdb"
1212
elicit = Elicit(
1313
data=Extract(
@@ -38,7 +38,7 @@ def test_smoke_elicit_run_tiny_gpt2_ccs(tmp_path: Path):
3838

3939
def test_smoke_elicit_run_tiny_gpt2_eigen(tmp_path: Path):
4040
# we need about 5 mb of gpu memory to run this test
41-
model_path, min_mem = "sshleifer/tiny-gpt2", 10 * 1024 ** 2
41+
model_path, min_mem = "sshleifer/tiny-gpt2", 10 * 1024**2
4242
dataset_name = "imdb"
4343
elicit = Elicit(
4444
data=Extract(

tests/test_smoke_eval.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def setup_elicit(
1919
tmp_path: Path,
2020
dataset_name="imdb",
2121
model_path="sshleifer/tiny-gpt2",
22-
min_mem=10 * 1024 ** 2,
22+
min_mem=10 * 1024**2,
2323
is_ccs: bool = True,
2424
) -> Elicit:
2525
"""Setup elicit config for testing, execute elicit, and save output to tmp_path.

tests/test_split_devices.py

+15-24
Original file line numberDiff line numberDiff line change
@@ -5,39 +5,30 @@ def test_split_2_devices_1_gpu_per_model():
55
devices = ["a", "b"]
66
gpus_per_model = 1
77
models_to_create = 2
8-
assert (
9-
split_devices_into_model_devices(
10-
devices=devices,
11-
gpus_per_model=gpus_per_model,
12-
models_to_create=models_to_create,
13-
)
14-
== [ModelDevices("a", []), ModelDevices("b", [])]
15-
)
8+
assert split_devices_into_model_devices(
9+
devices=devices,
10+
gpus_per_model=gpus_per_model,
11+
models_to_create=models_to_create,
12+
) == [ModelDevices("a", []), ModelDevices("b", [])]
1613

1714

1815
def test_split_4_devices_2_gpus_per_model():
1916
devices = ["a", "b", "c", "d"]
2017
gpus_per_model = 2
2118
models_to_create = 2
22-
assert (
23-
split_devices_into_model_devices(
24-
devices=devices,
25-
gpus_per_model=gpus_per_model,
26-
models_to_create=models_to_create,
27-
)
28-
== [ModelDevices("a", ["b"]), ModelDevices("c", ["d"])]
29-
)
19+
assert split_devices_into_model_devices(
20+
devices=devices,
21+
gpus_per_model=gpus_per_model,
22+
models_to_create=models_to_create,
23+
) == [ModelDevices("a", ["b"]), ModelDevices("c", ["d"])]
3024

3125

3226
def test_split_7_devices_3_gpus_per_model():
3327
devices = ["a", "b", "c", "d", "e", "f", "g"]
3428
gpus_per_model = 3
3529
models_to_create = 2
36-
assert (
37-
split_devices_into_model_devices(
38-
devices=devices,
39-
gpus_per_model=gpus_per_model,
40-
models_to_create=models_to_create,
41-
)
42-
== [ModelDevices("a", ["b", "c"]), ModelDevices("d", ["e", "f"])]
43-
)
30+
assert split_devices_into_model_devices(
31+
devices=devices,
32+
gpus_per_model=gpus_per_model,
33+
models_to_create=models_to_create,
34+
) == [ModelDevices("a", ["b", "c"]), ModelDevices("d", ["e", "f"])]

tests/test_truncated_eigh.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ def random_symmetric_matrix(n: int, k: int) -> torch.Tensor:
1111
assert k <= n, "Rank k should be less than or equal to the matrix size n."
1212

1313
# Generate random n x k matrix A with elements drawn from a uniform distribution
14-
A = torch.rand(n, k) / k ** 0.5
14+
A = torch.rand(n, k) / k**0.5
1515

1616
# Create a diagonal matrix D with k eigenvalues evenly distributed around zero
1717
eigenvalues = torch.linspace(-1, 1, k)

0 commit comments

Comments
 (0)