Skip to content

Commit

Permalink
update: CPUOffloadOptimizer
Browse files Browse the repository at this point in the history
  • Loading branch information
kozistr committed Oct 26, 2024
1 parent 26eb948 commit df3c4ea
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions pytorch_optimizer/optimizer/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def compare_versions(v1: str, v2: str) -> bool:
TORCH_VERSION_AT_LEAST_2_4: bool = compare_versions(torch.__version__, '2.4.0')


class CPUOffloadOptimizer:
class CPUOffloadOptimizer: # pragma: no cover
"""Offload optimizer to CPU for single-GPU training. This will reduce GPU memory by the size of optimizer state.
Reference: https://github.com/pytorch/ao/blob/main/torchao/prototype/low_bit_optim/cpu_offload.py
Expand Down Expand Up @@ -94,7 +94,7 @@ def __init__(

self.queue = {}

def backward_hook(p_cuda: torch.Tensor) -> None: # pragma: no cover
def backward_hook(p_cuda: torch.Tensor) -> None:
if p_cuda.grad is None:
return

Expand Down Expand Up @@ -127,7 +127,7 @@ def backward_hook(p_cuda: torch.Tensor) -> None: # pragma: no cover
self.optim_dict[p_cuda] = optimizer_class([{'params': p_cpu, **param_group}], **kwargs)

@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS: # pragma: no cover
def step(self, closure: CLOSURE = None) -> LOSS:
loss = None
if closure is not None:
loss = closure()
Expand Down

0 comments on commit df3c4ea

Please sign in to comment.