---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[17], line 3
1 windows = int((2 * 60 * samples_1min) / horizon)
2 nf = NeuralForecast(models=models, freq='5S')
----> 3 cv_df = nf.cross_validation(main_df, n_windows=windows, step_size=horizon)
4 cv_df.shape
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/neuralforecast/core.py:1187, in NeuralForecast.cross_validation(self, df, static_df, n_windows, step_size, val_size, test_size, use_init_models, verbose, refit, id_col, time_col, target_col, prediction_intervals, level, quantiles, **data_kwargs)
1183 raise ValueError("You can't set both level and quantiles argument.")
1185 if not refit:
-> 1187 return self._no_refit_cross_validation(
1188 df=df,
1189 static_df=static_df,
1190 n_windows=n_windows,
1191 step_size=step_size,
1192 val_size=val_size,
1193 test_size=test_size,
1194 verbose=verbose,
1195 id_col=id_col,
1196 time_col=time_col,
1197 target_col=target_col,
1198 **data_kwargs,
1199 )
1200 if df is None:
1201 raise ValueError("Must specify `df` with `refit!=False`.")
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/neuralforecast/core.py:1036, in NeuralForecast._no_refit_cross_validation(self, df, static_df, n_windows, step_size, val_size, test_size, verbose, id_col, time_col, target_col, **data_kwargs)
1031 if self._add_level and (
1032 model.loss.outputsize_multiplier > 1 or isinstance(model.loss, IQLoss)
1033 ):
1034 continue
-> 1036 model.fit(dataset=self.dataset, val_size=val_size, test_size=test_size)
1037 model_fcsts = model.predict(
1038 self.dataset, step_size=step_size, **data_kwargs
1039 )
1041 # Append predictions in memory placeholder
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/neuralforecast/common/_base_model.py:1468, in BaseModel.fit(self, dataset, val_size, test_size, random_seed, distributed_config)
1439 def fit(
1440 self,
1441 dataset,
(...)
1445 distributed_config=None,
1446 ):
1447 """Fit.
1448
1449 The `fit` method, optimizes the neural network's weights using the
(...)
1466 `test_size`: int, test size for temporal cross-validation.
1467 """
-> 1468 return self._fit(
1469 dataset=dataset,
1470 batch_size=self.batch_size,
1471 valid_batch_size=self.valid_batch_size,
1472 val_size=val_size,
1473 test_size=test_size,
1474 random_seed=random_seed,
1475 distributed_config=distributed_config,
1476 )
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/neuralforecast/common/_base_model.py:546, in BaseModel._fit(self, dataset, batch_size, valid_batch_size, val_size, test_size, random_seed, shuffle_train, distributed_config)
544 model = self
545 trainer = pl.Trainer(**model.trainer_kwargs)
--> 546 trainer.fit(model, datamodule=datamodule)
547 model.metrics = trainer.callback_metrics
548 model.__dict__.pop("_trainer", None)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:539, in Trainer.fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
537 self.state.status = TrainerStatus.RUNNING
538 self.training = True
--> 539 call._call_and_handle_interrupt(
540 self, self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
541 )
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py:47, in _call_and_handle_interrupt(trainer, trainer_fn, *args, **kwargs)
45 if trainer.strategy.launcher is not None:
46 return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
---> 47 return trainer_fn(*args, **kwargs)
49 except _TunerExitException:
50 _call_teardown_hook(trainer)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:575, in Trainer._fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
568 assert self.state.fn is not None
569 ckpt_path = self._checkpoint_connector._select_ckpt_path(
570 self.state.fn,
571 ckpt_path,
572 model_provided=True,
573 model_connected=self.lightning_module is not None,
574 )
--> 575 self._run(model, ckpt_path=ckpt_path)
577 assert self.state.stopped
578 self.training = False
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:982, in Trainer._run(self, model, ckpt_path)
977 self._signal_connector.register_signal_handlers()
979 # ----------------------------
980 # RUN THE TRAINER
981 # ----------------------------
--> 982 results = self._run_stage()
984 # ----------------------------
985 # POST-Training CLEAN UP
986 # ----------------------------
987 log.debug(f"{self.__class__.__name__}: trainer tearing down")
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:1026, in Trainer._run_stage(self)
1024 self._run_sanity_check()
1025 with torch.autograd.set_detect_anomaly(self._detect_anomaly):
-> 1026 self.fit_loop.run()
1027 return None
1028 raise RuntimeError(f"Unexpected state {self.state}")
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py:216, in _FitLoop.run(self)
214 try:
215 self.on_advance_start()
--> 216 self.advance()
217 self.on_advance_end()
218 except StopIteration:
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py:455, in _FitLoop.advance(self)
453 with self.trainer.profiler.profile("run_training_epoch"):
454 assert self._data_fetcher is not None
--> 455 self.epoch_loop.run(self._data_fetcher)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py:150, in _TrainingEpochLoop.run(self, data_fetcher)
148 while not self.done:
149 try:
--> 150 self.advance(data_fetcher)
151 self.on_advance_end(data_fetcher)
152 except StopIteration:
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py:320, in _TrainingEpochLoop.advance(self, data_fetcher)
317 with trainer.profiler.profile("run_training_batch"):
318 if trainer.lightning_module.automatic_optimization:
319 # in automatic optimization, there can only be one optimizer
--> 320 batch_output = self.automatic_optimization.run(trainer.optimizers[0], batch_idx, kwargs)
321 else:
322 batch_output = self.manual_optimization.run(kwargs)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py:192, in _AutomaticOptimization.run(self, optimizer, batch_idx, kwargs)
185 closure()
187 # ------------------------------
188 # BACKWARD PASS
189 # ------------------------------
190 # gradient update with accumulated gradients
191 else:
--> 192 self._optimizer_step(batch_idx, closure)
194 result = closure.consume_result()
195 if result.loss is None:
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py:270, in _AutomaticOptimization._optimizer_step(self, batch_idx, train_step_and_backward_closure)
267 self.optim_progress.optimizer.step.increment_ready()
269 # model hook
--> 270 call._call_lightning_module_hook(
271 trainer,
272 "optimizer_step",
273 trainer.current_epoch,
274 batch_idx,
275 optimizer,
276 train_step_and_backward_closure,
277 )
279 if not should_accumulate:
280 self.optim_progress.optimizer.step.increment_completed()
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py:171, in _call_lightning_module_hook(trainer, hook_name, pl_module, *args, **kwargs)
168 pl_module._current_fx_name = hook_name
170 with trainer.profiler.profile(f"[LightningModule]{pl_module.__class__.__name__}.{hook_name}"):
--> 171 output = fn(*args, **kwargs)
173 # restore current_fx when nested context
174 pl_module._current_fx_name = prev_fx_name
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/core/module.py:1302, in LightningModule.optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure)
1271 def optimizer_step(
1272 self,
1273 epoch: int,
(...)
1276 optimizer_closure: Optional[Callable[[], Any]] = None,
1277 ) -> None:
1278 r"""Override this method to adjust the default way the :class:`~pytorch_lightning.trainer.trainer.Trainer` calls
1279 the optimizer.
1280
(...)
1300
1301 """
-> 1302 optimizer.step(closure=optimizer_closure)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/core/optimizer.py:154, in LightningOptimizer.step(self, closure, **kwargs)
151 raise MisconfigurationException("When `optimizer.step(closure)` is called, the closure should be callable")
153 assert self._strategy is not None
--> 154 step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)
156 self._on_after_step()
158 return step_output
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/strategies/strategy.py:239, in Strategy.optimizer_step(self, optimizer, closure, model, **kwargs)
237 # TODO(fabric): remove assertion once strategy's optimizer_step typing is fixed
238 assert isinstance(model, pl.LightningModule)
--> 239 return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/precision.py:123, in Precision.optimizer_step(self, optimizer, model, closure, **kwargs)
121 """Hook to run the optimizer step."""
122 closure = partial(self._wrap_closure, model, optimizer, closure)
--> 123 return optimizer.step(closure=closure, **kwargs)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/torch/optim/lr_scheduler.py:140, in LRScheduler.__init__..patch_track_step_called..wrap_step..wrapper(*args, **kwargs)
138 opt = opt_ref()
139 opt._opt_called = True # type: ignore[union-attr]
--> 140 return func.__get__(opt, opt.__class__)(*args, **kwargs)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/torch/optim/optimizer.py:493, in Optimizer.profile_hook_step..wrapper(*args, **kwargs)
488 else:
489 raise RuntimeError(
490 f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}."
491 )
--> 493 out = func(*args, **kwargs)
494 self._optimizer_step_code()
496 # call optimizer step post hooks
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/torch/optim/optimizer.py:91, in _use_grad_for_differentiable.._use_grad(self, *args, **kwargs)
89 torch.set_grad_enabled(self.defaults["differentiable"])
90 torch._dynamo.graph_break()
---> 91 ret = func(self, *args, **kwargs)
92 finally:
93 torch._dynamo.graph_break()
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/torch/optim/adam.py:223, in Adam.step(self, closure)
221 if closure is not None:
222 with torch.enable_grad():
--> 223 loss = closure()
225 for group in self.param_groups:
226 params_with_grad: List[Tensor] = []
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/precision.py:109, in Precision._wrap_closure(self, model, optimizer, closure)
96 def _wrap_closure(
97 self,
98 model: "pl.LightningModule",
99 optimizer: Steppable,
100 closure: Callable[[], Any],
101 ) -> Any:
102 """This double-closure allows makes sure the ``closure`` is executed before the ``on_before_optimizer_step``
103 hook is called.
104
(...)
107
108 """
--> 109 closure_result = closure()
110 self._after_closure(model, optimizer)
111 return closure_result
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py:146, in Closure.__call__(self, *args, **kwargs)
144 @override
145 def __call__(self, *args: Any, **kwargs: Any) -> Optional[Tensor]:
--> 146 self._result = self.closure(*args, **kwargs)
147 return self._result.loss
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/torch/utils/_contextlib.py:116, in context_decorator..decorate_context(*args, **kwargs)
113 @functools.wraps(func)
114 def decorate_context(*args, **kwargs):
115 with ctx_factory():
--> 116 return func(*args, **kwargs)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py:131, in Closure.closure(self, *args, **kwargs)
128 @override
129 @torch.enable_grad()
130 def closure(self, *args: Any, **kwargs: Any) -> ClosureResult:
--> 131 step_output = self._step_fn()
133 if step_output.closure_loss is None:
134 self.warning_cache.warn("`training_step` returned `None`. If this was on purpose, ignore this warning...")
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py:319, in _AutomaticOptimization._training_step(self, kwargs)
308 """Performs the actual train step with the tied hooks.
309
310 Args:
(...)
315
316 """
317 trainer = self.trainer
--> 319 training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values())
320 self.trainer.strategy.post_training_step() # unused hook - call anyway for backward compatibility
322 if training_step_output is None and trainer.world_size > 1:
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py:323, in _call_strategy_hook(trainer, hook_name, *args, **kwargs)
320 return None
322 with trainer.profiler.profile(f"[Strategy]{trainer.strategy.__class__.__name__}.{hook_name}"):
--> 323 output = fn(*args, **kwargs)
325 # restore current_fx when nested context
326 pl_module._current_fx_name = prev_fx_name
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/pytorch_lightning/strategies/strategy.py:391, in Strategy.training_step(self, *args, **kwargs)
389 if self.model != self.lightning_module:
390 return self._forward_redirection(self.model, self.lightning_module, "training_step", *args, **kwargs)
--> 391 return self.lightning_module.training_step(*args, **kwargs)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/neuralforecast/common/_base_model.py:1234, in BaseModel.training_step(self, batch, batch_idx)
1231 # windows: [Ws, L + h, C, n_series] or [Ws, L + h, C]
1232 y_idx = batch["y_idx"]
-> 1234 windows = self._create_windows(batch, step="train")
1235 original_outsample_y = torch.clone(
1236 windows["temporal"][:, self.input_size :, y_idx]
1237 )
1238 windows = self._normalization(windows=windows, y_idx=y_idx)
File ~/Repos/DeepSeekExperiments/.venv/lib/python3.9/site-packages/neuralforecast/common/_base_model.py:694, in BaseModel._create_windows(self, batch, step, w_idxs)
689 sample_condition = torch.sum(
690 sample_condition, axis=(1, -1)
691 ) # Sum over time & series dimension
692 final_condition = (sample_condition > 0) & (available_condition > 0)
--> 694 windows = windows[final_condition]
696 # Parse Static data to match windows
697 static = batch.get("static", None)
RuntimeError: Invalid buffer size: 17.62 GB