Skip to content

Commit

Permalink
Fix flake8 linting issues
Browse files Browse the repository at this point in the history
Signed-off-by: Ignas Baranauskas <[email protected]>
  • Loading branch information
Ygnas committed Aug 6, 2024
1 parent 3878fbb commit fa1f72d
Show file tree
Hide file tree
Showing 6 changed files with 11 additions and 12 deletions.
2 changes: 0 additions & 2 deletions examples/pytorch/elastic/echo/echo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,10 @@
import os
import pprint
import sys
import time

import torch.distributed as dist

if __name__ == "__main__":

env_dict = {
k: os.environ[k]
for k in (
Expand Down
6 changes: 3 additions & 3 deletions examples/pytorch/elastic/imagenet/imagenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ def load_checkpoint(

# max_epoch == -1 means no one has checkpointed return base state
if max_epoch == -1:
print(f"=> no workers have checkpoints, starting from epoch 0")
print("=> no workers have checkpoints, starting from epoch 0")
return state

# broadcast the state from max_rank (which has the most up-to-date state)
Expand All @@ -370,7 +370,7 @@ def load_checkpoint(
blob = torch.as_tensor(raw_blob, dtype=torch.uint8)

dist.broadcast(blob, src=max_rank, group=pg)
print(f"=> done broadcasting checkpoint")
print("=> done broadcasting checkpoint")

if rank != max_rank:
with io.BytesIO(blob.numpy()) as f:
Expand All @@ -380,7 +380,7 @@ def load_checkpoint(
# wait till everyone has loaded the checkpoint
dist.barrier(group=pg)

print(f"=> done restoring from previous checkpoint")
print("=> done restoring from previous checkpoint")
return state


Expand Down
3 changes: 1 addition & 2 deletions sdk/python/kubeflow/storage_initializer/s3.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from dataclasses import dataclass
from dataclasses import field
import json
import os
from urllib.parse import urlparse
Expand Down Expand Up @@ -71,4 +70,4 @@ def download_dataset(self):
# Download the file
file_path = os.path.sep.join(path_components[1:])
bucket.download_file(obj_key, os.path.join(VOLUME_PATH_DATASET, file_path))
print(f"Files downloaded")
print("Files downloaded")
4 changes: 2 additions & 2 deletions sdk/python/kubeflow/training/api/training_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,8 @@ def train(
and dataset. You can configure PVC size and storage class name in this argument.
"""
try:
import peft
import transformers
import peft # noqa: F401
import transformers # noqa: F401
except ImportError:
raise ImportError(
"Train API dependencies not installed. "
Expand Down
6 changes: 4 additions & 2 deletions sdk/python/kubeflow/training/api/training_client_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,10 @@ def __init__(self, kind) -> None:
"replica_type": constants.REPLICA_TYPE_MASTER.lower(),
"replica_index": 0,
},
f"{constants.JOB_NAME_LABEL}={TEST_NAME},{constants.JOB_ROLE_LABEL}={constants.JOB_ROLE_MASTER}"
f",{constants.REPLICA_TYPE_LABEL}={constants.REPLICA_TYPE_MASTER.lower()},{constants.REPLICA_INDEX_LABEL}=0",
f"{constants.JOB_NAME_LABEL}={TEST_NAME},"
f"{constants.JOB_ROLE_LABEL}={constants.JOB_ROLE_MASTER},"
f"{constants.REPLICA_TYPE_LABEL}={constants.REPLICA_TYPE_MASTER.lower()},"
f"{constants.REPLICA_INDEX_LABEL}=0",
LIST_RESPONSE,
),
(
Expand Down
2 changes: 1 addition & 1 deletion sdk/python/kubeflow/training/constants/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Dict, Union
from typing import Union

from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH
from kubeflow.training import models
Expand Down

0 comments on commit fa1f72d

Please sign in to comment.