Skip to content

Commit

Permalink
Fix more
Browse files Browse the repository at this point in the history
  • Loading branch information
philippmwirth committed Nov 20, 2023
1 parent c427347 commit 0606f88
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 17 deletions.
16 changes: 5 additions & 11 deletions lightly/utils/benchmarking/linear_classifier.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,10 @@
from typing import Dict, List, Tuple, Union
from typing import Any, Dict, List, Tuple, Union

from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import CrossEntropyLoss, Linear, Module
from torch.optim import SGD, Optimizer

# Try to import the new LRScheduler from torch 2.0.
try:
from torch.optim.lr_scheduler import LRScheduler
except ImportError:
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler

from lightly.models.utils import activate_requires_grad, deactivate_requires_grad
from lightly.utils.benchmarking.topk import mean_topk_accuracy
from lightly.utils.scheduler import CosineWarmupScheduler
Expand Down Expand Up @@ -104,7 +98,7 @@ def forward(self, images: Tensor) -> Tensor:
return output

def shared_step(
self, batch: Tuple[Tensor, Tensor], batch_idx: int
self, batch: Tuple[Tensor, ...], batch_idx: int
) -> Tuple[Tensor, Dict[int, Tensor]]:
images, targets = batch[0], batch[1]
predictions = self.forward(images)
Expand All @@ -113,7 +107,7 @@ def shared_step(
topk = mean_topk_accuracy(predicted_labels, targets, k=self.topk)
return loss, topk

def training_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:
def training_step(self, batch: Tuple[Tensor, ...], batch_idx: int) -> Tensor:
loss, topk = self.shared_step(batch=batch, batch_idx=batch_idx)
batch_size = len(batch[1])
log_dict = {f"train_top{k}": acc for k, acc in topk.items()}
Expand All @@ -123,7 +117,7 @@ def training_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:
self.log_dict(log_dict, sync_dist=True, batch_size=batch_size)
return loss

def validation_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:
def validation_step(self, batch: Tuple[Tensor, ...], batch_idx: int) -> Tensor:
loss, topk = self.shared_step(batch=batch, batch_idx=batch_idx)
batch_size = len(batch[1])
log_dict = {f"val_top{k}": acc for k, acc in topk.items()}
Expand All @@ -133,7 +127,7 @@ def validation_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tenso

def configure_optimizers(
self,
) -> Tuple[List[Optimizer], List[Dict[str, Union[LRScheduler, str]]]]:
) -> Tuple[List[Optimizer], List[Dict[str, Union[Any, str]]]]:
parameters = list(self.classification_head.parameters())
if not self.freeze_model:
parameters += self.model.parameters()
Expand Down
10 changes: 4 additions & 6 deletions lightly/utils/benchmarking/metric_callback.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from typing import Dict, List
from typing import Dict, List, Union

from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback
from torch import Tensor

MetricValue = Union[Tensor, float]


class MetricCallback(Callback):
"""Callback that collects log metrics from the LightningModule and stores them after
Expand Down Expand Up @@ -58,8 +60,4 @@ def _append_metrics(
self, metrics_dict: Dict[str, List[float]], trainer: Trainer
) -> None:
for name, value in trainer.callback_metrics.items():
# Only store scalar values.
if isinstance(value, float) or (
isinstance(value, Tensor) and value.numel() == 1
):
metrics_dict.setdefault(name, []).append(float(value))
metrics_dict.setdefault(name, []).append(float(value))

0 comments on commit 0606f88

Please sign in to comment.