Skip to content

Commit aaacc17

Browse files
committedJan 8, 2022
Cleanup imports
1 parent 70d8b60 commit aaacc17

39 files changed

+57
-245
lines changed
 

‎.gitignore

-1
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,4 @@ __pycache__
66
*.txt
77
ray_results
88
setup.py
9-
setup.cfg
109
dist/

‎README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ Then make sure [Nvidia MPS](https://docs.nvidia.com/deploy/mps/index.html#topic_
1717
To use `Fluid` in Ray Tune, pass an instance of it as the trial executor to `tune.run`. It should work with any other schedulers:
1818

1919
```python
20-
from fluid.executor import FluidExecutor
20+
from fluid.fliud_executor import FluidExecutor
2121
tune.run(
2222
MyTrainable,
2323
trial_executor=FluidExecutor(),

‎fluid/algo_random.py

-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from ray.tune.config_parser import create_trial_from_spec, make_parser
44
from ray.tune.error import TuneError
55
from ray.tune.experiment import convert_to_experiment_list
6-
from ray.tune.suggest.bohb import TuneBOHB as OrigTuneBOHB
76
from ray.tune.suggest.search import SearchAlgorithm
87
from ray.tune.suggest.variant_generator import (
98
flatten_resolved_vars,

‎fluid/ashaparallel.py

+1-4
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,15 @@
66
@author: liujiachen
77
"""
88
import logging
9+
import os
910

1011
import numpy as np
11-
import ray
1212
from ray.tune.schedulers import AsyncHyperBandScheduler
1313
from ray.tune.trial import Trial
1414

1515
logger = logging.getLogger(__name__)
1616

1717

18-
import os
19-
20-
2118
def sched_algo():
2219
return int(os.environ.get("NUM_WORKER", 8))
2320

‎fluid/fluid_executor.py

+4-15
Original file line numberDiff line numberDiff line change
@@ -16,31 +16,20 @@
1616
from ray.exceptions import RayTimeoutError
1717
from ray.resource_spec import ResourceSpec
1818
from ray.tune.durable_trainable import DurableTrainable
19-
from ray.tune.error import AbortTrialExecution, TuneError
19+
from ray.tune.error import AbortTrialExecution
2020
from ray.tune.logger import NoopLogger
2121
from ray.tune.ray_trial_executor import _LocalWrapper, _to_gb, _TrialCleanup
2222
from ray.tune.resources import Resources
23-
from ray.tune.result import TIME_THIS_ITER_S, TRAINING_ITERATION, TRIAL_INFO
23+
from ray.tune.result import TRIAL_INFO
2424
from ray.tune.trainable import TrainableUtil
2525
from ray.tune.trial import Checkpoint, Location, Trial, TrialInfo
2626
from ray.tune.trial_executor import TrialExecutor
2727

2828
from .perf_manager import PerfManager
29-
from .ray_custom_gpu_res import create_custom_gpu_res, gpu_idx_from_name
29+
from .ray_custom_gpu_res import create_custom_gpu_res
3030

3131
if TYPE_CHECKING:
32-
from typing import (
33-
Any,
34-
Dict,
35-
Iterable,
36-
List,
37-
Optional,
38-
Set,
39-
Tuple,
40-
TypedDict,
41-
TypeVar,
42-
Union,
43-
)
32+
from typing import Any, Dict, Iterable, List, Optional, Tuple, TypeVar
4433

4534
T = TypeVar("T")
4635

‎fluid/my_bracket.py

-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
from typing import TYPE_CHECKING, NamedTuple
1313

1414
import numpy as np
15-
import ray
1615
from ray.tune.result import TIME_THIS_ITER_S
1716
from ray.tune.trial import Trial
1817

‎fluid/perf_manager.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from __future__ import annotations
22

33
import logging
4-
from typing import TYPE_CHECKING, NamedTuple
4+
from typing import TYPE_CHECKING
55

66
if TYPE_CHECKING:
77
from typing import Any, Dict, Mapping, Optional, Set

‎fluid/runnner.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
66
@author: liujiachen
77
"""
8-
from ray.tune.schedulers import FIFOScheduler, TrialScheduler
8+
from ray.tune.schedulers import TrialScheduler
99
from ray.tune.trial import Trial
1010
from ray.tune.trial_runner import TrialRunner
1111
from ray.tune.web_server import TuneServer

‎fluid/syncbohb.py

-7
Original file line numberDiff line numberDiff line change
@@ -9,18 +9,11 @@
99

1010
import logging
1111
import os
12-
from typing import TYPE_CHECKING
1312

1413
import numpy as np
15-
from ray.tune.error import TuneError
16-
from ray.tune.result import TIME_THIS_ITER_S, TRAINING_ITERATION
1714
from ray.tune.schedulers import HyperBandForBOHB
1815
from ray.tune.trial import Trial
1916

20-
if TYPE_CHECKING:
21-
from typing import Dict, List, NamedTuple, Optional, Set, Tuple, TypedDict, Union
22-
23-
2417
logger = logging.getLogger(__name__)
2518

2619

‎fluid/trainer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ def _configure_and_split_batch(self, num_workers):
308308
return batch_size_per_worker
309309

310310
def _start_workers(self, num_workers):
311-
logger.debug(f"start_workers: Setting %d workers." % num_workers)
311+
logger.debug(f"start_workers: Setting {num_workers} workers.")
312312
worker_config = self.config.copy()
313313
batch_size_per_worker = self._configure_and_split_batch(num_workers)
314314
if batch_size_per_worker:

‎setup.cfg

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
[flake8]
2+
exclude =
3+
.git,
4+
__pycache__,
5+
# thirdparty code
6+
workloads/word_language_model
7+
ignore =
8+
C901,
9+
# flake8 disagrees with black's formatting
10+
E203,
11+
E231,
12+
W503

‎tests/test_fluid_executor.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
# from ray.rllib import _register_all
1111
from ray.tune import Trainable
12-
from ray.tune.registry import TRAINABLE_CLASS, _global_registry, register_trainable
12+
from ray.tune.registry import TRAINABLE_CLASS, _global_registry
1313
from ray.tune.resources import Resources
1414
from ray.tune.result import TRAINING_ITERATION
1515
from ray.tune.suggest import BasicVariantGenerator

‎workloads/common/__init__.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -38,12 +38,14 @@ def init_ray():
3838
)
3939
logger.info("Init ray finished")
4040
for name in logging.root.manager.loggerDict:
41-
l = logging.getLogger(name)
41+
named_logger = logging.getLogger(name)
4242
if name.startswith("fluid") or name.startswith("workloads"):
43-
l.setLevel(logging.DEBUG)
43+
named_logger.setLevel(logging.DEBUG)
4444
else:
45-
l.setLevel(logging.INFO)
46-
logger.info(f"{name} at {logging.getLevelName(l.getEffectiveLevel())}")
45+
named_logger.setLevel(logging.INFO)
46+
logger.info(
47+
f"{name} at {logging.getLevelName(named_logger.getEffectiveLevel())}"
48+
)
4749

4850
return args.exp, args.seed
4951

‎workloads/common/cifar.py

-3
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,7 @@
88
import numpy as np
99
import torch
1010
import torch.nn as nn
11-
import torch.optim as optim
1211
from ray import tune
13-
from ray.util.sgd.utils import BATCH_SIZE
14-
from torch.utils.data import DataLoader
1512
from torchvision import datasets, transforms
1613

1714
import workloads.common as com

‎workloads/common/imagenet.py

+16-21
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,10 @@
1-
import math
21
import os
32
import time
4-
from collections import OrderedDict
53

6-
import numpy as np
74
import torch
85
import torch.nn as nn
9-
import torch.nn.functional as F
106
import torch.optim as optim
117
import torchvision
12-
import torchvision.transforms as transforms
13-
from ray import tune
148
from torchvision import datasets, transforms
159

1610
import workloads.common as com
@@ -26,41 +20,41 @@ def __init__(self):
2620
super(VGG, self).__init__()
2721
self.conv = nn.Sequential(
2822
# Stage 1
29-
# TODO: convolutional layer, input channels 3, output channels 8, filter size 3
23+
# convolutional layer, input channels 3, output channels 8, filter size 3
3024
torch.nn.Conv2d(3, 8, 3, padding=1),
31-
# TODO: max-pooling layer, size 2
25+
# max-pooling layer, size 2
3226
torch.nn.MaxPool2d(2),
3327
# Stage 2
34-
# TODO: convolutional layer, input channels 8, output channels 16, filter size 3
28+
# convolutional layer, input channels 8, output channels 16, filter size 3
3529
torch.nn.Conv2d(8, 16, 3, padding=1),
36-
# TODO: max-pooling layer, size 2
30+
# max-pooling layer, size 2
3731
torch.nn.MaxPool2d(2),
3832
# Stage 3
39-
# TODO: convolutional layer, input channels 16, output channels 32, filter size 3
33+
# convolutional layer, input channels 16, output channels 32, filter size 3
4034
torch.nn.Conv2d(16, 32, 3, padding=1),
41-
# TODO: convolutional layer, input channels 32, output channels 32, filter size 3
35+
# convolutional layer, input channels 32, output channels 32, filter size 3
4236
torch.nn.Conv2d(32, 32, 3, padding=1),
43-
# TODO: max-pooling layer, size 2
37+
# max-pooling layer, size 2
4438
torch.nn.MaxPool2d(2),
4539
# Stage 4
46-
# TODO: convolutional layer, input channels 32, output channels 64, filter size 3
40+
# convolutional layer, input channels 32, output channels 64, filter size 3
4741
torch.nn.Conv2d(32, 64, 3, padding=1),
48-
# TODO: convolutional layer, input channels 64, output channels 64, filter size 3
42+
# convolutional layer, input channels 64, output channels 64, filter size 3
4943
torch.nn.Conv2d(64, 64, 3, padding=1),
50-
# TODO: max-pooling layer, size 2
44+
# max-pooling layer, size 2
5145
torch.nn.MaxPool2d(2),
5246
# Stage 5
53-
# TODO: convolutional layer, input channels 64, output channels 64, filter size 3
47+
# convolutional layer, input channels 64, output channels 64, filter size 3
5448
torch.nn.Conv2d(64, 64, 3, padding=1),
55-
# TODO: convolutional layer, input channels 64, output channels 64, filter size 3
49+
# convolutional layer, input channels 64, output channels 64, filter size 3
5650
torch.nn.Conv2d(64, 64, 3, padding=1),
57-
# TODO: max-pooling layer, size 2
51+
# max-pooling layer, size 2
5852
torch.nn.MaxPool2d(2),
5953
)
6054
self.fc = nn.Sequential(
61-
# TODO: fully-connected layer (64->64)
62-
# TODO: fully-connected layer (64->10)
55+
# fully-connected layer (64->64)
6356
torch.nn.Linear(64, 64),
57+
# fully-connected layer (64->10)
6458
torch.nn.Linear(64, 10),
6559
)
6660

@@ -129,6 +123,7 @@ def data_creator(config):
129123
shuffle=False,
130124
pin_memory=True,
131125
)
126+
return train_loader, val_loader
132127

133128

134129
# https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html

‎workloads/common/mnist.py

-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import torch.optim as optim
44
from ray import tune
55
from ray.tune.examples.mnist_pytorch import ConvNet
6-
from ray.util.sgd.utils import BATCH_SIZE
76
from torch.utils.data import DataLoader
87
from torchvision import datasets, transforms
98

‎workloads/common/mnist_upgrade.py

-2
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,10 @@
77
"""
88

99
import numpy as np
10-
import torch
1110
import torch.nn as nn
1211
import torch.nn.functional as F
1312
import torch.optim as optim
1413
from ray import tune
15-
from ray.util.sgd.utils import BATCH_SIZE
1614
from torch.utils.data import DataLoader
1715
from torchvision import datasets, transforms
1816

‎workloads/common/wlm.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ def create_sample_space():
145145
"bptt": lambda *_: np.random.randint(30, 41),
146146
"clip": lambda *_: np.random.uniform(0.1, 0.5),
147147
"model": ["RNN_TANH", "RNN_RELU", "LSTM", "GRU"],
148-
#'model': ["RNN_TANH", "RNN_RELU", "LSTM", "GRU", "Transformer"],
148+
# 'model': ["RNN_TANH", "RNN_RELU", "LSTM", "GRU", "Transformer"],
149149
# half_emsize is multiplied by 2 * nhead to get emsize, and the conceptual range is 100, 300
150150
"half_emsize": lambda *_: np.random.randint(25, 76),
151151
"nhead": lambda *_: np.random.randint(1, 3),

‎workloads/test_ddp.py

+7-15
Original file line numberDiff line numberDiff line change
@@ -5,35 +5,27 @@
55
66
@author: liujiachen
77
"""
8-
9-
import tensorflow as tf
10-
11-
try:
12-
tf.get_logger().setLevel("INFO")
13-
except Exception as exc:
14-
print(exc)
158
import warnings
169

17-
warnings.simplefilter("ignore")
18-
19-
import matplotlib.pyplot as plt
2010
import matplotlib.style as style
21-
import numpy as np
2211
import ray
12+
import tensorflow as tf
2313
import torch
2414
import torch.optim as optim
2515
from hyperopt import hp
2616
from ray import tune
27-
from ray.tune import track
2817
from ray.tune.examples.mnist_pytorch import ConvNet, get_data_loaders, test, train
29-
from ray.tune.schedulers import ASHAScheduler, AsyncHyperBandScheduler
18+
from ray.tune.schedulers import ASHAScheduler
3019
from ray.tune.suggest.hyperopt import HyperOptSearch
3120
from torchvision import datasets
3221

22+
try:
23+
tf.get_logger().setLevel("INFO")
24+
except Exception as exc:
25+
print(exc)
26+
warnings.simplefilter("ignore")
3327
style.use("ggplot")
34-
3528
datasets.MNIST("~/data", train=True, download=True)
36-
import workloads.common as com
3729

3830

3931
def train_mnist(config):

‎workloads/test_fluid.py

-112
This file was deleted.

‎workloads/tune_asha_cifar.py

-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
import random
21
from pathlib import Path
32

4-
import numpy as np
5-
import torch
63
from ray import tune
74
from ray.tune.schedulers import ASHAScheduler
85
from ray.util.sgd.utils import BATCH_SIZE

‎workloads/tune_asha_para_cifar.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#!/usr/bin/env python3
22
# -*- coding: utf-8 -*-
33

4+
import os
45
from pathlib import Path
56

67
from ray import tune
@@ -15,8 +16,6 @@
1516
DATA_PATH, RESULTS_PATH = com.detect_paths()
1617
EXP_NAME = com.remove_prefix(Path(__file__).stem, "tune_")
1718

18-
import os
19-
2019

2120
def sched_algo():
2221
return int(os.environ.get("NUM_WORKER", 4))

‎workloads/tune_asha_para_mnist.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
66
@author: liujiachen
77
"""
8-
8+
import os
99
from pathlib import Path
1010

1111
from ray import tune
@@ -20,8 +20,6 @@
2020
DATA_PATH, RESULTS_PATH = com.detect_paths()
2121
EXP_NAME = com.remove_prefix(Path(__file__).stem, "tune_")
2222

23-
import os
24-
2523

2624
def sched_algo():
2725
return int(os.environ.get("NUM_WORKER", 8))

‎workloads/tune_asha_para_mnist_upgrade.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
66
@author: liujiachen
77
"""
8-
8+
import os
99
from pathlib import Path
1010

1111
from ray import tune
@@ -20,8 +20,6 @@
2020
DATA_PATH, RESULTS_PATH = com.detect_paths()
2121
EXP_NAME = com.remove_prefix(Path(__file__).stem, "tune_")
2222

23-
import os
24-
2523

2624
def sched_algo():
2725
return int(os.environ.get("NUM_WORKER", 8))

‎workloads/tune_asha_wlm.py

-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
import random
21
from pathlib import Path
32

4-
import numpy as np
5-
import torch
63
from ray import tune
74
from ray.tune.schedulers.async_hyperband import ASHAScheduler
85

‎workloads/tune_bohb_cifar.py

-3
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,8 @@
66
@author: liujiachen
77
"""
88

9-
import random
109
from pathlib import Path
1110

12-
import numpy as np
13-
import torch
1411
from ray import tune
1512
from ray.tune.schedulers.hb_bohb import HyperBandForBOHB
1613
from ray.tune.suggest.bohb import TuneBOHB

‎workloads/tune_bohb_wlm.py

-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
import random
21
from pathlib import Path
32

4-
import numpy as np
5-
import torch
63
from ray import tune
74
from ray.tune.schedulers.hb_bohb import HyperBandForBOHB
85
from ray.tune.suggest.bohb import TuneBOHB

‎workloads/tune_grid_cifar.py

-2
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,7 @@
44
from ray.util.sgd.utils import BATCH_SIZE
55

66
import workloads.common as com
7-
from fluid.algo_random import VariantGenerator
87
from fluid.trainer import TorchTrainer
9-
from workloads import grid_search_space as space
108
from workloads.common import cifar as workload
119

1210
DATA_PATH, RESULTS_PATH = com.detect_paths()

‎workloads/tune_grid_dcgan.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from ray import tune
44

55
import workloads.common as com
6-
from fluid.algo_random import VariantGenerator
76
from fluid.trainer import TorchTrainer
87
from workloads.common import dcgan as workload
98

@@ -50,7 +49,7 @@ def main():
5049

5150
params = {
5251
# **com.run_options(__file__),
53-
#'stop': workload.create_stopper(),
52+
# 'stop': workload.create_stopper(),
5453
**setup_tune_scheduler(exp),
5554
}
5655

‎workloads/tune_grid_mnist.py

-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
import torch
1212
import torch.nn as nn
1313
from ray import tune
14-
from ray.util.sgd.utils import BATCH_SIZE
1514

1615
import workloads.common as com
1716
from workloads.common import mnist_upgrade as workload

‎workloads/tune_grid_wlm.py

-2
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
11
from pathlib import Path
22

33
from ray import tune
4-
from ray.util.sgd.utils import BATCH_SIZE
54

65
import workloads.common as com
76
from fluid.trainer import TorchTrainer
8-
from workloads import grid_search_space as space
97
from workloads.common import wlm as workload
108

119
DATA_PATH, RESULTS_PATH = com.detect_paths()

‎workloads/tune_hb_cifar.py

-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
import random
21
from pathlib import Path
32

4-
import numpy as np
5-
import torch
63
from ray import tune
74
from ray.tune.schedulers.hb_bohb import HyperBandScheduler
85
from ray.util.sgd.utils import BATCH_SIZE

‎workloads/tune_hb_dcgan.py

-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
from ray import tune
44
from ray.tune.schedulers.hyperband import HyperBandScheduler
5-
from ray.util.sgd.utils import BATCH_SIZE
65

76
import workloads.common as com
87
from fluid.algo_random import VariantGenerator

‎workloads/tune_hb_wlm.py

-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
import random
21
from pathlib import Path
32

4-
import numpy as np
5-
import torch
63
from ray import tune
74
from ray.tune.schedulers.hyperband import HyperBandScheduler
85

‎workloads/tune_pbt_cifar.py

-5
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,6 @@
1212
DATA_PATH, RESULTS_PATH = com.detect_paths()
1313
EXP_NAME = com.remove_prefix(Path(__file__).stem, "tune_")
1414

15-
import random
16-
17-
import numpy as np
18-
import torch
19-
2015

2116
def setup_tune_scheduler():
2217
ss, custom_explore = workload.create_sample_space()

‎workloads/tune_pbt_wlm.py

-5
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,6 @@
1010
DATA_PATH, RESULTS_PATH = com.detect_paths()
1111
EXP_NAME = com.remove_prefix(Path(__file__).stem, "tune_")
1212

13-
import random
14-
15-
import numpy as np
16-
import torch
17-
1813

1914
def setup_tune_scheduler():
2015
ss, custom_explore = workload.create_sample_space()

‎workloads/tune_syncbohb_cifar.py

-3
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,8 @@
66
@author: liujiachen
77
"""
88

9-
import random
109
from pathlib import Path
1110

12-
import numpy as np
13-
import torch
1411
from ray import tune
1512

1613
# from ray.tune.schedulers.hb_bohb import HyperBandForBOHB

‎workloads/tune_syncbohb_dcgan.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,13 @@
44
from ray.tune.suggest.bohb import TuneBOHB
55

66
import workloads.common as com
7+
from fluid.syncbohb import SyncBOHB
78
from fluid.trainer import TorchTrainer
89
from workloads.common import dcgan as workload
910

1011
DATA_PATH, RESULTS_PATH = com.detect_paths()
1112
EXP_NAME = com.remove_prefix(Path(__file__).stem, "tune_")
1213

13-
from fluid.syncbohb import SyncBOHB
14-
1514

1615
def setup_tune_scheduler(num_worker):
1716

‎workloads/tune_syncbohb_wlm.py

-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
import random
21
from pathlib import Path
32

4-
import numpy as np
5-
import torch
63
from ray import tune
74
from ray.tune.suggest.bohb import TuneBOHB
85

0 commit comments

Comments
 (0)
Please sign in to comment.