Skip to content

Commit

Permalink
reduce_default_windows_batch_size_multivariate
Browse files Browse the repository at this point in the history
  • Loading branch information
elephaint committed Sep 26, 2024
1 parent ff89950 commit b3fafc3
Show file tree
Hide file tree
Showing 12 changed files with 48 additions and 48 deletions.
8 changes: 4 additions & 4 deletions nbs/models.itransformer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -226,8 +226,8 @@
" `val_check_steps`: int=100, Number of training steps between every validation loss check.<br>\n",
" `batch_size`: int=32, number of different series in each batch.<br>\n",
" `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>\n",
" `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `windows_batch_size`: int=128, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=128, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>\n",
" `step_size`: int=1, step size between each window of temporal data.<br>\n",
" `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
Expand Down Expand Up @@ -277,8 +277,8 @@
" val_check_steps: int = 100,\n",
" batch_size: int = 32,\n",
" valid_batch_size: Optional[int] = None,\n",
" windows_batch_size = 1024,\n",
" inference_windows_batch_size = 1024,\n",
" windows_batch_size = 128,\n",
" inference_windows_batch_size = 128,\n",
" start_padding_enabled = False,\n",
" step_size: int = 1,\n",
" scaler_type: str = 'identity',\n",
Expand Down
8 changes: 4 additions & 4 deletions nbs/models.mlpmultivariate.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@
" `val_check_steps`: int=100, Number of training steps between every validation loss check.<br>\n",
" `batch_size`: int=32, number of different series in each batch.<br>\n",
" `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>\n",
" `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>\n",
" `step_size`: int=1, step size between each window of temporal data.<br>\n",
" `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
Expand Down Expand Up @@ -148,8 +148,8 @@
" val_check_steps: int = 100,\n",
" batch_size: int = 32,\n",
" valid_batch_size: Optional[int] = None,\n",
" windows_batch_size = 1024,\n",
" inference_windows_batch_size = 1024,\n",
" windows_batch_size = 256,\n",
" inference_windows_batch_size = 256,\n",
" start_padding_enabled = False,\n",
" step_size: int = 1,\n",
" scaler_type: str = 'identity',\n",
Expand Down
8 changes: 4 additions & 4 deletions nbs/models.softs.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,8 @@
" `val_check_steps`: int=100, Number of training steps between every validation loss check.<br>\n",
" `batch_size`: int=32, number of different series in each batch.<br>\n",
" `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>\n",
" `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>\n",
" `step_size`: int=1, step size between each window of temporal data.<br>\n",
" `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
Expand Down Expand Up @@ -248,8 +248,8 @@
" val_check_steps: int = 100,\n",
" batch_size: int = 32,\n",
" valid_batch_size: Optional[int] = None,\n",
" windows_batch_size = 1024,\n",
" inference_windows_batch_size = 1024,\n",
" windows_batch_size = 256,\n",
" inference_windows_batch_size = 256,\n",
" start_padding_enabled = False,\n",
" step_size: int = 1,\n",
" scaler_type: str = 'identity',\n",
Expand Down
8 changes: 4 additions & 4 deletions nbs/models.timemixer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -358,8 +358,8 @@
" `val_check_steps`: int=100, Number of training steps between every validation loss check.<br>\n",
" `batch_size`: int=32, number of different series in each batch.<br>\n",
" `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>\n",
" `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>\n",
" `step_size`: int=1, step size between each window of temporal data.<br>\n",
" `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
Expand Down Expand Up @@ -413,8 +413,8 @@
" val_check_steps: int = 100,\n",
" batch_size: int = 32,\n",
" valid_batch_size: Optional[int] = None,\n",
" windows_batch_size = 1024,\n",
" inference_windows_batch_size = 1024,\n",
" windows_batch_size = 256,\n",
" inference_windows_batch_size = 256,\n",
" start_padding_enabled = False,\n",
" step_size: int = 1,\n",
" scaler_type: str = 'identity',\n",
Expand Down
8 changes: 4 additions & 4 deletions nbs/models.tsmixer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -200,8 +200,8 @@
" `val_check_steps`: int=100, Number of training steps between every validation loss check.<br>\n",
" `batch_size`: int=32, number of different series in each batch.<br>\n",
" `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>\n",
" `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>\n",
" `step_size`: int=1, step size between each window of temporal data.<br>\n",
" `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
Expand Down Expand Up @@ -247,8 +247,8 @@
" val_check_steps: int = 100,\n",
" batch_size: int = 32,\n",
" valid_batch_size: Optional[int] = None,\n",
" windows_batch_size = 1024,\n",
" inference_windows_batch_size = 1024,\n",
" windows_batch_size = 256,\n",
" inference_windows_batch_size = 256,\n",
" start_padding_enabled = False,\n",
" step_size: int = 1,\n",
" scaler_type: str = 'identity',\n",
Expand Down
8 changes: 4 additions & 4 deletions nbs/models.tsmixerx.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -274,8 +274,8 @@
" `val_check_steps`: int=100, Number of training steps between every validation loss check.<br>\n",
" `batch_size`: int=32, number of different series in each batch.<br>\n",
" `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>\n",
" `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>\n",
" `step_size`: int=1, step size between each window of temporal data.<br>\n",
" `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
Expand Down Expand Up @@ -321,8 +321,8 @@
" val_check_steps: int = 100,\n",
" batch_size: int = 32,\n",
" valid_batch_size: Optional[int] = None,\n",
" windows_batch_size = 1024,\n",
" inference_windows_batch_size = 1024,\n",
" windows_batch_size = 256,\n",
" inference_windows_batch_size = 256,\n",
" start_padding_enabled = False,\n",
" step_size: int = 1,\n",
" scaler_type: str = 'identity',\n",
Expand Down
8 changes: 4 additions & 4 deletions neuralforecast/models/itransformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,8 @@ class iTransformer(BaseModel):
`val_check_steps`: int=100, Number of training steps between every validation loss check.<br>
`batch_size`: int=32, number of different series in each batch.<br>
`valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>
`windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>
`windows_batch_size`: int=128, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=128, number of windows to sample in each inference batch, -1 uses all.<br>
`start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>
`step_size`: int=1, step size between each window of temporal data.<br>
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>
Expand Down Expand Up @@ -181,8 +181,8 @@ def __init__(
val_check_steps: int = 100,
batch_size: int = 32,
valid_batch_size: Optional[int] = None,
windows_batch_size=1024,
inference_windows_batch_size=1024,
windows_batch_size=128,
inference_windows_batch_size=128,
start_padding_enabled=False,
step_size: int = 1,
scaler_type: str = "identity",
Expand Down
8 changes: 4 additions & 4 deletions neuralforecast/models/mlpmultivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ class MLPMultivariate(BaseModel):
`val_check_steps`: int=100, Number of training steps between every validation loss check.<br>
`batch_size`: int=32, number of different series in each batch.<br>
`valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>
`windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>
`windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>
`start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>
`step_size`: int=1, step size between each window of temporal data.<br>
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>
Expand Down Expand Up @@ -84,8 +84,8 @@ def __init__(
val_check_steps: int = 100,
batch_size: int = 32,
valid_batch_size: Optional[int] = None,
windows_batch_size=1024,
inference_windows_batch_size=1024,
windows_batch_size=256,
inference_windows_batch_size=256,
start_padding_enabled=False,
step_size: int = 1,
scaler_type: str = "identity",
Expand Down
8 changes: 4 additions & 4 deletions neuralforecast/models/softs.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ class SOFTS(BaseModel):
`val_check_steps`: int=100, Number of training steps between every validation loss check.<br>
`batch_size`: int=32, number of different series in each batch.<br>
`valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>
`windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>
`windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>
`start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>
`step_size`: int=1, step size between each window of temporal data.<br>
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>
Expand Down Expand Up @@ -155,8 +155,8 @@ def __init__(
val_check_steps: int = 100,
batch_size: int = 32,
valid_batch_size: Optional[int] = None,
windows_batch_size=1024,
inference_windows_batch_size=1024,
windows_batch_size=256,
inference_windows_batch_size=256,
start_padding_enabled=False,
step_size: int = 1,
scaler_type: str = "identity",
Expand Down
8 changes: 4 additions & 4 deletions neuralforecast/models/timemixer.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,8 +280,8 @@ class TimeMixer(BaseModel):
`val_check_steps`: int=100, Number of training steps between every validation loss check.<br>
`batch_size`: int=32, number of different series in each batch.<br>
`valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>
`windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>
`windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>
`start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>
`step_size`: int=1, step size between each window of temporal data.<br>
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>
Expand Down Expand Up @@ -338,8 +338,8 @@ def __init__(
val_check_steps: int = 100,
batch_size: int = 32,
valid_batch_size: Optional[int] = None,
windows_batch_size=1024,
inference_windows_batch_size=1024,
windows_batch_size=256,
inference_windows_batch_size=256,
start_padding_enabled=False,
step_size: int = 1,
scaler_type: str = "identity",
Expand Down
8 changes: 4 additions & 4 deletions neuralforecast/models/tsmixer.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,8 @@ class TSMixer(BaseModel):
`val_check_steps`: int=100, Number of training steps between every validation loss check.<br>
`batch_size`: int=32, number of different series in each batch.<br>
`valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>
`windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>
`windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.<br>
`start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>
`step_size`: int=1, step size between each window of temporal data.<br>
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>
Expand Down Expand Up @@ -170,8 +170,8 @@ def __init__(
val_check_steps: int = 100,
batch_size: int = 32,
valid_batch_size: Optional[int] = None,
windows_batch_size=1024,
inference_windows_batch_size=1024,
windows_batch_size=256,
inference_windows_batch_size=256,
start_padding_enabled=False,
step_size: int = 1,
scaler_type: str = "identity",
Expand Down
Loading

0 comments on commit b3fafc3

Please sign in to comment.