@@ -709,3 +709,111 @@ def configure_optimizers(self):
709709 # Verify the callback metric tensor was created successfully
710710 assert "lr-SGD" in trainer .callback_metrics
711711 assert isinstance (trainer .callback_metrics ["lr-SGD" ], torch .Tensor )
712+
713+
714+ def test_lr_monitor_log_key_prefix (tmp_path ):
715+ """Test that learning rate metric names are correctly prefixed when log_key_prefix is set."""
716+ model = BoringModel ()
717+
718+ lr_monitor = LearningRateMonitor (log_key_prefix = "optim/" )
719+ trainer = Trainer (
720+ default_root_dir = tmp_path ,
721+ max_epochs = 2 ,
722+ limit_val_batches = 0.1 ,
723+ limit_train_batches = 0.5 ,
724+ callbacks = [lr_monitor ],
725+ logger = CSVLogger (tmp_path ),
726+ )
727+ trainer .fit (model )
728+
729+ assert lr_monitor .lrs , "No learning rates logged"
730+ assert list (lr_monitor .lrs ) == ["optim/lr-SGD" ]
731+ assert "optim/lr-SGD" in trainer .callback_metrics
732+
733+
734+ def test_lr_monitor_log_key_prefix_with_momentum_and_weight_decay (tmp_path ):
735+ """Test that prefix is applied to momentum and weight decay metric names as well."""
736+
737+ class CustomModel (BoringModel ):
738+ def configure_optimizers (self ):
739+ optimizer = optim .Adam (self .parameters (), lr = 1e-2 , betas = (0.9 , 0.999 ), weight_decay = 0.01 )
740+ lr_scheduler = optim .lr_scheduler .StepLR (optimizer , step_size = 1 )
741+ return [optimizer ], [lr_scheduler ]
742+
743+ model = CustomModel ()
744+ lr_monitor = LearningRateMonitor (log_momentum = True , log_weight_decay = True , log_key_prefix = "train/" )
745+ trainer = Trainer (
746+ default_root_dir = tmp_path ,
747+ max_epochs = 2 ,
748+ limit_val_batches = 2 ,
749+ limit_train_batches = 5 ,
750+ log_every_n_steps = 1 ,
751+ callbacks = [lr_monitor ],
752+ logger = CSVLogger (tmp_path ),
753+ )
754+ trainer .fit (model )
755+
756+ assert list (lr_monitor .lrs ) == ["train/lr-Adam" ]
757+ assert all (k == "train/lr-Adam-momentum" for k in lr_monitor .last_momentum_values )
758+ assert all (k == "train/lr-Adam-weight_decay" for k in lr_monitor .last_weight_decay_values )
759+
760+
761+ def test_lr_monitor_log_key_prefix_multi_optimizers (tmp_path ):
762+ """Test that prefix is applied correctly with multiple optimizers."""
763+
764+ class MultiOptModel (BoringModel ):
765+ def __init__ (self ):
766+ super ().__init__ ()
767+ self .automatic_optimization = False
768+
769+ def training_step (self , batch , batch_idx ):
770+ opt1 , opt2 = self .optimizers ()
771+
772+ loss = self .loss (self .step (batch ))
773+ opt1 .zero_grad ()
774+ self .manual_backward (loss )
775+ opt1 .step ()
776+
777+ loss = self .loss (self .step (batch ))
778+ opt2 .zero_grad ()
779+ self .manual_backward (loss )
780+ opt2 .step ()
781+
782+ def configure_optimizers (self ):
783+ optimizer1 = optim .Adam (self .parameters (), lr = 1e-2 )
784+ optimizer2 = optim .SGD (self .parameters (), lr = 1e-2 )
785+ return [optimizer1 , optimizer2 ]
786+
787+ model = MultiOptModel ()
788+ lr_monitor = LearningRateMonitor (log_key_prefix = "hparams/" )
789+ trainer = Trainer (
790+ default_root_dir = tmp_path ,
791+ max_epochs = 2 ,
792+ limit_val_batches = 0.1 ,
793+ limit_train_batches = 5 ,
794+ log_every_n_steps = 1 ,
795+ callbacks = [lr_monitor ],
796+ logger = CSVLogger (tmp_path ),
797+ )
798+ trainer .fit (model )
799+
800+ assert lr_monitor .lrs , "No learning rates logged"
801+ assert list (lr_monitor .lrs ) == ["hparams/lr-Adam" , "hparams/lr-SGD" ]
802+
803+
804+ def test_lr_monitor_log_key_prefix_none (tmp_path ):
805+ """Test that when log_key_prefix is None (default), metric names are unchanged."""
806+ model = BoringModel ()
807+
808+ lr_monitor = LearningRateMonitor (log_key_prefix = None )
809+ trainer = Trainer (
810+ default_root_dir = tmp_path ,
811+ max_epochs = 2 ,
812+ limit_val_batches = 0.1 ,
813+ limit_train_batches = 0.5 ,
814+ callbacks = [lr_monitor ],
815+ logger = CSVLogger (tmp_path ),
816+ )
817+ trainer .fit (model )
818+
819+ assert list (lr_monitor .lrs ) == ["lr-SGD" ]
0 commit comments