|
20 | 20 |
|
21 | 21 | from lightning.pytorch.plugins import MixedPrecision |
22 | 22 | from lightning.pytorch.utilities import GradClipAlgorithmType |
| 23 | +from tests_pytorch.helpers.runif import RunIf |
23 | 24 |
|
24 | 25 |
|
25 | 26 | def test_clip_gradients(): |
@@ -62,10 +63,111 @@ def test_amp_with_no_grad(): |
62 | 63 | x = torch.randn(1, 2) |
63 | 64 | amp = MixedPrecision(precision="bf16-mixed", device="cpu") |
64 | 65 |
|
65 | | - with amp.autocast_context_manager(): |
| 66 | + with amp.forward_context(): |
66 | 67 | with torch.no_grad(): |
67 | 68 | _ = layer(x) |
68 | 69 |
|
69 | 70 | loss = layer(x).mean() |
70 | 71 | loss.backward() |
71 | 72 | assert loss.grad_fn is not None |
| 73 | + |
| 74 | + |
| 75 | +def test_amp_with_inference_mode(): |
| 76 | + """Test that nested `inference_mode` also clears the autocast cache on exit.""" |
| 77 | + layer = nn.Linear(2, 1) |
| 78 | + x = torch.randn(1, 2) |
| 79 | + amp = MixedPrecision(precision="bf16-mixed", device="cpu") |
| 80 | + |
| 81 | + with amp.forward_context(): |
| 82 | + with torch.inference_mode(): |
| 83 | + _ = layer(x) |
| 84 | + |
| 85 | + loss = layer(x).mean() |
| 86 | + loss.backward() |
| 87 | + assert loss.grad_fn is not None |
| 88 | + |
| 89 | + |
| 90 | +def test_amp_forward_context_restores_grad_mode_context_managers(): |
| 91 | + amp = MixedPrecision(precision="bf16-mixed", device="cpu") |
| 92 | + original_no_grad = torch.no_grad |
| 93 | + original_inference_mode = torch.inference_mode |
| 94 | + |
| 95 | + with amp.forward_context(): |
| 96 | + assert torch.no_grad is not original_no_grad |
| 97 | + assert torch.inference_mode is not original_inference_mode |
| 98 | + |
| 99 | + assert torch.no_grad is original_no_grad |
| 100 | + assert torch.inference_mode is original_inference_mode |
| 101 | + |
| 102 | + |
| 103 | +@pytest.mark.parametrize(("cache_enabled", "expect_grad"), [(True, False), (False, True)]) |
| 104 | +def test_torch_autocast_cache_behavior_with_no_grad(cache_enabled, expect_grad): |
| 105 | + """Document the underlying PyTorch autocast behavior that this plugin needs to handle.""" |
| 106 | + layer = nn.Linear(2, 1) |
| 107 | + x = torch.randn(1, 2) |
| 108 | + |
| 109 | + with torch.autocast("cpu", dtype=torch.bfloat16, cache_enabled=cache_enabled): |
| 110 | + with torch.no_grad(): |
| 111 | + _ = layer(x) |
| 112 | + |
| 113 | + loss = layer(x).mean() |
| 114 | + if expect_grad: |
| 115 | + loss.backward() |
| 116 | + assert loss.grad_fn is not None |
| 117 | + else: |
| 118 | + assert loss.grad_fn is None |
| 119 | + with pytest.raises(RuntimeError, match="does not require grad"): |
| 120 | + loss.backward() |
| 121 | + |
| 122 | + |
| 123 | +@RunIf(min_cuda_gpus=1) |
| 124 | +@pytest.mark.parametrize(("cache_enabled", "expect_grad"), [(True, False), (False, True)]) |
| 125 | +def test_torch_autocast_cache_behavior_with_no_grad_cuda(cache_enabled, expect_grad): |
| 126 | + """Document the same autocast cache behavior on CUDA, where the reported regression happens.""" |
| 127 | + layer = nn.Linear(2, 1, device="cuda") |
| 128 | + x = torch.randn(1, 2, device="cuda") |
| 129 | + |
| 130 | + with torch.autocast("cuda", dtype=torch.float16, cache_enabled=cache_enabled): |
| 131 | + with torch.no_grad(): |
| 132 | + _ = layer(x) |
| 133 | + |
| 134 | + loss = layer(x).mean() |
| 135 | + if expect_grad: |
| 136 | + loss.backward() |
| 137 | + assert loss.grad_fn is not None |
| 138 | + else: |
| 139 | + assert loss.grad_fn is None |
| 140 | + with pytest.raises(RuntimeError, match="does not require grad"): |
| 141 | + loss.backward() |
| 142 | + |
| 143 | + |
| 144 | +@RunIf(min_cuda_gpus=1) |
| 145 | +def test_amp_with_no_grad_cuda(): |
| 146 | + """Test the Lightning workaround on the CUDA path used by the reported regression.""" |
| 147 | + layer = nn.Linear(2, 1, device="cuda") |
| 148 | + x = torch.randn(1, 2, device="cuda") |
| 149 | + amp = MixedPrecision(precision="16-mixed", device="cuda") |
| 150 | + |
| 151 | + with amp.forward_context(): |
| 152 | + with torch.no_grad(): |
| 153 | + _ = layer(x) |
| 154 | + |
| 155 | + loss = layer(x).mean() |
| 156 | + loss.backward() |
| 157 | + assert loss.grad_fn is not None |
| 158 | + |
| 159 | + |
| 160 | +def test_amp_autocast_context_manager_disables_cache(): |
| 161 | + """Test that the public autocast context manager preserves the existing no-cache workaround.""" |
| 162 | + amp = MixedPrecision(precision="bf16-mixed", device="cpu") |
| 163 | + |
| 164 | + with amp.autocast_context_manager(): |
| 165 | + assert not torch.is_autocast_cache_enabled() |
| 166 | + |
| 167 | + |
| 168 | +def test_amp_forward_context_keeps_cache_enabled(): |
| 169 | + """Test that Lightning's internal step context keeps the cached autocast path enabled.""" |
| 170 | + amp = MixedPrecision(precision="bf16-mixed", device="cpu") |
| 171 | + |
| 172 | + with amp.forward_context(): |
| 173 | + assert torch.is_autocast_cache_enabled() |
0 commit comments