diff --git a/src/airunner/widgets/memory_preferences/memory_preferences_widget.py b/src/airunner/widgets/memory_preferences/memory_preferences_widget.py new file mode 100644 index 000000000..cc532368c --- /dev/null +++ b/src/airunner/widgets/memory_preferences/memory_preferences_widget.py @@ -0,0 +1,70 @@ +from airunner.widgets.base_widget import BaseWidget +from airunner.widgets.memory_preferences.templates.memory_preferences_ui import Ui_memory_preferences + + +class MemoryPreferencesWidget(BaseWidget): + widget_class_ = Ui_memory_preferences + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + self.ui.use_accelerated_transformers.blockSignals(True) + self.ui.use_attention_slicing.blockSignals(True) + self.ui.use_enable_sequential_cpu_offload.blockSignals(True) + self.ui.enable_model_cpu_offload.blockSignals(True) + self.ui.use_lastchannels.blockSignals(True) + self.ui.use_tf32.blockSignals(True) + self.ui.use_tiled_vae.blockSignals(True) + self.ui.use_enable_vae_slicing.blockSignals(True) + + self.ui.use_accelerated_transformers.setChecked(self.settings_manager.memory_settings.use_accelerated_transformers is True) + self.ui.use_attention_slicing.setChecked(self.settings_manager.memory_settings.use_attention_slicing is True) + self.ui.use_enable_sequential_cpu_offload.setChecked( + self.settings_manager.memory_settings.use_enable_sequential_cpu_offload is True) + self.ui.enable_model_cpu_offload.setChecked( + self.settings_manager.memory_settings.enable_model_cpu_offload is True + ) + self.ui.use_lastchannels.setChecked(self.settings_manager.memory_settings.use_last_channels is True) + self.ui.use_tf32.setChecked(self.settings_manager.memory_settings.use_tf32 is True) + self.ui.use_tiled_vae.setChecked(self.settings_manager.memory_settings.use_tiled_vae is True) + self.ui.use_enable_vae_slicing.setChecked(self.settings_manager.memory_settings.use_enable_vae_slicing is True) + + self.ui.use_accelerated_transformers.blockSignals(False) + self.ui.use_attention_slicing.blockSignals(False) + self.ui.use_enable_sequential_cpu_offload.blockSignals(False) + self.ui.enable_model_cpu_offload.blockSignals(False) + self.ui.use_lastchannels.blockSignals(False) + self.ui.use_tf32.blockSignals(False) + self.ui.use_tiled_vae.blockSignals(False) + self.ui.use_enable_vae_slicing.blockSignals(False) + + def action_toggled_tile_vae(self, val): + self.settings_manager.set_value("memory_settings.use_tiled_vae", val) + + def action_toggled_tf32(self, val): + self.settings_manager.set_value("memory_settings.use_tf32", val) + + def action_toggled_last_memory(self, val): + self.settings_manager.set_value("memory_settings.use_last_channels", val) + + def action_toggled_vae_slicing(self, val): + self.settings_manager.set_value("memory_settings.use_enable_vae_slicing", val) + + def action_toggled_sequential_cpu_offload(self, val): + self.settings_manager.set_value("memory_settings.use_enable_sequential_cpu_offload", val) + + def action_toggled_attention_slicing(self, val): + self.settings_manager.set_value("memory_settings.use_attention_slicing", val) + + def action_toggled_accelerated_transformers(self, val): + self.settings_manager.set_value("memory_settings.use_accelerated_transformers", val) + + def action_button_clicked_optimize_memory_settings(self): + self.ui.use_accelerated_transformers.setChecked(True) + self.ui.use_attention_slicing.setChecked(False) + self.ui.use_lastchannels.setChecked(True) + self.ui.use_enable_sequential_cpu_offload.setChecked(False) + self.ui.enable_model_cpu_offload.setChecked(False) + self.ui.use_tf32.setChecked(False) + self.ui.use_tiled_vae.setChecked(True) + self.ui.use_enable_vae_slicing.setChecked(True) diff --git a/src/airunner/widgets/memory_preferences/templates/memory_preferences.ui b/src/airunner/widgets/memory_preferences/templates/memory_preferences.ui index 44917f5e4..d5e972f70 100644 --- a/src/airunner/widgets/memory_preferences/templates/memory_preferences.ui +++ b/src/airunner/widgets/memory_preferences/templates/memory_preferences.ui @@ -1,13 +1,13 @@ - Form - + memory_preferences + 0 0 352 - 504 + 544 @@ -365,5 +365,161 @@ - + + + use_accelerated_transformers + toggled(bool) + memory_preferences + action_toggled_accelerated_transformers(bool) + + + 145 + 62 + + + 5 + 5 + + + + + use_attention_slicing + toggled(bool) + memory_preferences + action_toggled_attention_slicing(bool) + + + 86 + 142 + + + 2 + 39 + + + + + use_lastchannels + toggled(bool) + memory_preferences + action_toggled_last_memory(bool) + + + 163 + 198 + + + 271 + 6 + + + + + use_enable_sequential_cpu_offload + toggled(bool) + memory_preferences + action_toggled_sequential_cpu_offload(bool) + + + 124 + 255 + + + 258 + 0 + + + + + enable_model_cpu_offload + toggled(bool) + memory_preferences + action_toggled_sequential_cpu_offload(bool) + + + 75 + 321 + + + 2 + 268 + + + + + use_tf32 + toggled(bool) + memory_preferences + action_toggled_tf32(bool) + + + 37 + 382 + + + -3 + 318 + + + + + use_enable_vae_slicing + toggled(bool) + memory_preferences + action_toggled_vae_slicing(bool) + + + 93 + 438 + + + 0 + 388 + + + + + use_tiled_vae + toggled(bool) + memory_preferences + action_toggled_tile_vae(bool) + + + 40 + 495 + + + -1 + 440 + + + + + optimize_memory_button + clicked() + memory_preferences + action_button_clicked_optimize_memory_settings() + + + 155 + 28 + + + 176 + -14 + + + + + + action_button_clicked_optimize_memory_settings() + action_toggled_accelerated_transformers(bool) + action_toggled_attention_slicing(bool) + action_toggled_last_memory(bool) + action_toggled_sequential_cpu_offload(bool) + action_toggled_model_cpu_offload(bool) + action_toggled_tf32(bool) + action_toggled_vae_slicing(bool) + action_toggled_tile_vae(bool) + diff --git a/src/airunner/widgets/memory_preferences/templates/memory_preferences_ui.py b/src/airunner/widgets/memory_preferences/templates/memory_preferences_ui.py index 3108603b8..cd31514b7 100644 --- a/src/airunner/widgets/memory_preferences/templates/memory_preferences_ui.py +++ b/src/airunner/widgets/memory_preferences/templates/memory_preferences_ui.py @@ -9,30 +9,30 @@ from PyQt6 import QtCore, QtGui, QtWidgets -class Ui_Form(object): - def setupUi(self, Form): - Form.setObjectName("Form") - Form.resize(352, 504) - self.gridLayout = QtWidgets.QGridLayout(Form) +class Ui_memory_preferences(object): + def setupUi(self, memory_preferences): + memory_preferences.setObjectName("memory_preferences") + memory_preferences.resize(352, 544) + self.gridLayout = QtWidgets.QGridLayout(memory_preferences) self.gridLayout.setObjectName("gridLayout") self.verticalLayout_3 = QtWidgets.QVBoxLayout() self.verticalLayout_3.setObjectName("verticalLayout_3") - self.optimize_memory_button = QtWidgets.QPushButton(parent=Form) + self.optimize_memory_button = QtWidgets.QPushButton(parent=memory_preferences) self.optimize_memory_button.setObjectName("optimize_memory_button") self.verticalLayout_3.addWidget(self.optimize_memory_button) - self.line_7 = QtWidgets.QFrame(parent=Form) + self.line_7 = QtWidgets.QFrame(parent=memory_preferences) self.line_7.setFrameShape(QtWidgets.QFrame.Shape.HLine) self.line_7.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken) self.line_7.setObjectName("line_7") self.verticalLayout_3.addWidget(self.line_7) - self.use_accelerated_transformers = QtWidgets.QCheckBox(parent=Form) + self.use_accelerated_transformers = QtWidgets.QCheckBox(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) self.use_accelerated_transformers.setFont(font) self.use_accelerated_transformers.setObjectName("use_accelerated_transformers") self.verticalLayout_3.addWidget(self.use_accelerated_transformers) - self.label_8 = QtWidgets.QLabel(parent=Form) + self.label_8 = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) font.setItalic(False) @@ -40,25 +40,25 @@ def setupUi(self, Form): self.label_8.setIndent(-1) self.label_8.setObjectName("label_8") self.verticalLayout_3.addWidget(self.label_8) - self.label_9 = QtWidgets.QLabel(parent=Form) + self.label_9 = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) self.label_9.setFont(font) self.label_9.setObjectName("label_9") self.verticalLayout_3.addWidget(self.label_9) - self.line_8 = QtWidgets.QFrame(parent=Form) + self.line_8 = QtWidgets.QFrame(parent=memory_preferences) self.line_8.setFrameShape(QtWidgets.QFrame.Shape.HLine) self.line_8.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken) self.line_8.setObjectName("line_8") self.verticalLayout_3.addWidget(self.line_8) - self.use_attention_slicing = QtWidgets.QCheckBox(parent=Form) + self.use_attention_slicing = QtWidgets.QCheckBox(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) self.use_attention_slicing.setFont(font) self.use_attention_slicing.setObjectName("use_attention_slicing") self.verticalLayout_3.addWidget(self.use_attention_slicing) - self.label_4 = QtWidgets.QLabel(parent=Form) + self.label_4 = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) font.setItalic(False) @@ -66,19 +66,19 @@ def setupUi(self, Form): self.label_4.setIndent(-1) self.label_4.setObjectName("label_4") self.verticalLayout_3.addWidget(self.label_4) - self.line_4 = QtWidgets.QFrame(parent=Form) + self.line_4 = QtWidgets.QFrame(parent=memory_preferences) self.line_4.setFrameShape(QtWidgets.QFrame.Shape.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken) self.line_4.setObjectName("line_4") self.verticalLayout_3.addWidget(self.line_4) - self.use_lastchannels = QtWidgets.QCheckBox(parent=Form) + self.use_lastchannels = QtWidgets.QCheckBox(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) self.use_lastchannels.setFont(font) self.use_lastchannels.setObjectName("use_lastchannels") self.verticalLayout_3.addWidget(self.use_lastchannels) - self.label_3 = QtWidgets.QLabel(parent=Form) + self.label_3 = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) font.setItalic(False) @@ -86,19 +86,19 @@ def setupUi(self, Form): self.label_3.setIndent(-1) self.label_3.setObjectName("label_3") self.verticalLayout_3.addWidget(self.label_3) - self.line = QtWidgets.QFrame(parent=Form) + self.line = QtWidgets.QFrame(parent=memory_preferences) self.line.setFrameShape(QtWidgets.QFrame.Shape.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken) self.line.setObjectName("line") self.verticalLayout_3.addWidget(self.line) - self.use_enable_sequential_cpu_offload = QtWidgets.QCheckBox(parent=Form) + self.use_enable_sequential_cpu_offload = QtWidgets.QCheckBox(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) self.use_enable_sequential_cpu_offload.setFont(font) self.use_enable_sequential_cpu_offload.setObjectName("use_enable_sequential_cpu_offload") self.verticalLayout_3.addWidget(self.use_enable_sequential_cpu_offload) - self.label = QtWidgets.QLabel(parent=Form) + self.label = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) font.setItalic(False) @@ -106,19 +106,19 @@ def setupUi(self, Form): self.label.setIndent(-1) self.label.setObjectName("label") self.verticalLayout_3.addWidget(self.label) - self.line_2 = QtWidgets.QFrame(parent=Form) + self.line_2 = QtWidgets.QFrame(parent=memory_preferences) self.line_2.setFrameShape(QtWidgets.QFrame.Shape.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken) self.line_2.setObjectName("line_2") self.verticalLayout_3.addWidget(self.line_2) - self.enable_model_cpu_offload = QtWidgets.QCheckBox(parent=Form) + self.enable_model_cpu_offload = QtWidgets.QCheckBox(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) self.enable_model_cpu_offload.setFont(font) self.enable_model_cpu_offload.setObjectName("enable_model_cpu_offload") self.verticalLayout_3.addWidget(self.enable_model_cpu_offload) - self.label_2 = QtWidgets.QLabel(parent=Form) + self.label_2 = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) font.setItalic(False) @@ -126,19 +126,19 @@ def setupUi(self, Form): self.label_2.setIndent(-1) self.label_2.setObjectName("label_2") self.verticalLayout_3.addWidget(self.label_2) - self.line_3 = QtWidgets.QFrame(parent=Form) + self.line_3 = QtWidgets.QFrame(parent=memory_preferences) self.line_3.setFrameShape(QtWidgets.QFrame.Shape.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout_3.addWidget(self.line_3) - self.use_tf32 = QtWidgets.QCheckBox(parent=Form) + self.use_tf32 = QtWidgets.QCheckBox(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) self.use_tf32.setFont(font) self.use_tf32.setObjectName("use_tf32") self.verticalLayout_3.addWidget(self.use_tf32) - self.label_5 = QtWidgets.QLabel(parent=Form) + self.label_5 = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) font.setItalic(False) @@ -146,19 +146,19 @@ def setupUi(self, Form): self.label_5.setIndent(-1) self.label_5.setObjectName("label_5") self.verticalLayout_3.addWidget(self.label_5) - self.line_5 = QtWidgets.QFrame(parent=Form) + self.line_5 = QtWidgets.QFrame(parent=memory_preferences) self.line_5.setFrameShape(QtWidgets.QFrame.Shape.HLine) self.line_5.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken) self.line_5.setObjectName("line_5") self.verticalLayout_3.addWidget(self.line_5) - self.use_enable_vae_slicing = QtWidgets.QCheckBox(parent=Form) + self.use_enable_vae_slicing = QtWidgets.QCheckBox(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) self.use_enable_vae_slicing.setFont(font) self.use_enable_vae_slicing.setObjectName("use_enable_vae_slicing") self.verticalLayout_3.addWidget(self.use_enable_vae_slicing) - self.label_6 = QtWidgets.QLabel(parent=Form) + self.label_6 = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) font.setItalic(False) @@ -166,19 +166,19 @@ def setupUi(self, Form): self.label_6.setIndent(-1) self.label_6.setObjectName("label_6") self.verticalLayout_3.addWidget(self.label_6) - self.line_6 = QtWidgets.QFrame(parent=Form) + self.line_6 = QtWidgets.QFrame(parent=memory_preferences) self.line_6.setFrameShape(QtWidgets.QFrame.Shape.HLine) self.line_6.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken) self.line_6.setObjectName("line_6") self.verticalLayout_3.addWidget(self.line_6) - self.use_tiled_vae = QtWidgets.QCheckBox(parent=Form) + self.use_tiled_vae = QtWidgets.QCheckBox(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) self.use_tiled_vae.setFont(font) self.use_tiled_vae.setObjectName("use_tiled_vae") self.verticalLayout_3.addWidget(self.use_tiled_vae) - self.label_7 = QtWidgets.QLabel(parent=Form) + self.label_7 = QtWidgets.QLabel(parent=memory_preferences) font = QtGui.QFont() font.setPointSize(10) font.setItalic(False) @@ -190,35 +190,44 @@ def setupUi(self, Form): self.verticalLayout_3.addItem(spacerItem) self.gridLayout.addLayout(self.verticalLayout_3, 0, 0, 1, 1) - self.retranslateUi(Form) - QtCore.QMetaObject.connectSlotsByName(Form) + self.retranslateUi(memory_preferences) + self.use_accelerated_transformers.toggled['bool'].connect(memory_preferences.action_toggled_accelerated_transformers) # type: ignore + self.use_attention_slicing.toggled['bool'].connect(memory_preferences.action_toggled_attention_slicing) # type: ignore + self.use_lastchannels.toggled['bool'].connect(memory_preferences.action_toggled_last_memory) # type: ignore + self.use_enable_sequential_cpu_offload.toggled['bool'].connect(memory_preferences.action_toggled_sequential_cpu_offload) # type: ignore + self.enable_model_cpu_offload.toggled['bool'].connect(memory_preferences.action_toggled_sequential_cpu_offload) # type: ignore + self.use_tf32.toggled['bool'].connect(memory_preferences.action_toggled_tf32) # type: ignore + self.use_enable_vae_slicing.toggled['bool'].connect(memory_preferences.action_toggled_vae_slicing) # type: ignore + self.use_tiled_vae.toggled['bool'].connect(memory_preferences.action_toggled_tile_vae) # type: ignore + self.optimize_memory_button.clicked.connect(memory_preferences.action_button_clicked_optimize_memory_settings) # type: ignore + QtCore.QMetaObject.connectSlotsByName(memory_preferences) - def retranslateUi(self, Form): + def retranslateUi(self, memory_preferences): _translate = QtCore.QCoreApplication.translate - Form.setWindowTitle(_translate("Form", "Form")) - self.optimize_memory_button.setText(_translate("Form", "Optimize Memory Settings")) - self.use_accelerated_transformers.setToolTip(_translate("Form", "

Optimized and memory-efficient attention implementation.

")) - self.use_accelerated_transformers.setText(_translate("Form", "Accelerated Transformers")) - self.label_8.setText(_translate("Form", "Faster inference, lower VRAM usage")) - self.label_9.setText(_translate("Form", "Keep this checked to take advantage of torch 2.0")) - self.use_attention_slicing.setToolTip(_translate("Form", "

Perform computation in steps instead of all at once.

About 10% slower inference times.

Uses as little as 3.2 GB of VRAM.

")) - self.use_attention_slicing.setText(_translate("Form", "Attention Slicing")) - self.label_4.setText(_translate("Form", "Less VRAM usage, slight inference impact")) - self.use_lastchannels.setToolTip(_translate("Form", "

Alternative way of ordering NCHW tensors in memory preserving dimensions ordering. Channels last tensors ordered in such a way that channels become the densest dimension (aka storing images pixel-per-pixel).

Since not all operators currently support channels last format it may result in a worst performance, so it’s better to try it and see if it works for your model.

")) - self.use_lastchannels.setText(_translate("Form", "Channels last memory")) - self.label_3.setText(_translate("Form", "May slow inference on some models, speed up on others")) - self.use_enable_sequential_cpu_offload.setToolTip(_translate("Form", "

Use with attention slicing for lower memory consumption.

Offloads the weights to CPU and only load them to GPU when performing the forward pass for memory savings.

")) - self.use_enable_sequential_cpu_offload.setText(_translate("Form", "Sequential CPU offload")) - self.label.setText(_translate("Form", "Less VRAM usage, slower inference")) - self.enable_model_cpu_offload.setToolTip(_translate("Form", "

Use with attention slicing for lower memory consumption.

Moves whole models to the GPU, instead of handling each model’s constituent modules. This results in a negligible impact on inference time (compared with moving the pipeline to cuda), while still providing some memory savings.

")) - self.enable_model_cpu_offload.setText(_translate("Form", "Model CPU offload")) - self.label_2.setText(_translate("Form", "Less VRAM usage, slight inference impact")) - self.use_tf32.setToolTip(_translate("Form", "

On Ampere and later CUDA devices matrix multiplications and convolutions can use the TensorFloat32 (TF32) mode for faster but slightly less accurate computations.

")) - self.use_tf32.setText(_translate("Form", "TF32")) - self.label_5.setText(_translate("Form", "faster matrix multiplications on ampere achitecture")) - self.use_enable_vae_slicing.setToolTip(_translate("Form", "

Use with Attention Slicing or Xformers

Decode large batches of images with limited VRAM, or to enable batches with 32 images or more.

")) - self.use_enable_vae_slicing.setText(_translate("Form", "Vae Slicing")) - self.label_6.setText(_translate("Form", "Work with large batches")) - self.use_tiled_vae.setToolTip(_translate("Form", "

Use with Attention Slicing or Xformers

Makes it possible to work with large images on limited VRAM. Splits image into overlapping tiles, decodes tiles, blends outputs to make final image.

")) - self.use_tiled_vae.setText(_translate("Form", "Tile vae")) - self.label_7.setText(_translate("Form", "Work with large images")) + memory_preferences.setWindowTitle(_translate("memory_preferences", "Form")) + self.optimize_memory_button.setText(_translate("memory_preferences", "Optimize Memory Settings")) + self.use_accelerated_transformers.setToolTip(_translate("memory_preferences", "

Optimized and memory-efficient attention implementation.

")) + self.use_accelerated_transformers.setText(_translate("memory_preferences", "Accelerated Transformers")) + self.label_8.setText(_translate("memory_preferences", "Faster inference, lower VRAM usage")) + self.label_9.setText(_translate("memory_preferences", "Keep this checked to take advantage of torch 2.0")) + self.use_attention_slicing.setToolTip(_translate("memory_preferences", "

Perform computation in steps instead of all at once.

About 10% slower inference times.

Uses as little as 3.2 GB of VRAM.

")) + self.use_attention_slicing.setText(_translate("memory_preferences", "Attention Slicing")) + self.label_4.setText(_translate("memory_preferences", "Less VRAM usage, slight inference impact")) + self.use_lastchannels.setToolTip(_translate("memory_preferences", "

Alternative way of ordering NCHW tensors in memory preserving dimensions ordering. Channels last tensors ordered in such a way that channels become the densest dimension (aka storing images pixel-per-pixel).

Since not all operators currently support channels last format it may result in a worst performance, so it’s better to try it and see if it works for your model.

")) + self.use_lastchannels.setText(_translate("memory_preferences", "Channels last memory")) + self.label_3.setText(_translate("memory_preferences", "May slow inference on some models, speed up on others")) + self.use_enable_sequential_cpu_offload.setToolTip(_translate("memory_preferences", "

Use with attention slicing for lower memory consumption.

Offloads the weights to CPU and only load them to GPU when performing the forward pass for memory savings.

")) + self.use_enable_sequential_cpu_offload.setText(_translate("memory_preferences", "Sequential CPU offload")) + self.label.setText(_translate("memory_preferences", "Less VRAM usage, slower inference")) + self.enable_model_cpu_offload.setToolTip(_translate("memory_preferences", "

Use with attention slicing for lower memory consumption.

Moves whole models to the GPU, instead of handling each model’s constituent modules. This results in a negligible impact on inference time (compared with moving the pipeline to cuda), while still providing some memory savings.

")) + self.enable_model_cpu_offload.setText(_translate("memory_preferences", "Model CPU offload")) + self.label_2.setText(_translate("memory_preferences", "Less VRAM usage, slight inference impact")) + self.use_tf32.setToolTip(_translate("memory_preferences", "

On Ampere and later CUDA devices matrix multiplications and convolutions can use the TensorFloat32 (TF32) mode for faster but slightly less accurate computations.

")) + self.use_tf32.setText(_translate("memory_preferences", "TF32")) + self.label_5.setText(_translate("memory_preferences", "faster matrix multiplications on ampere achitecture")) + self.use_enable_vae_slicing.setToolTip(_translate("memory_preferences", "

Use with Attention Slicing or Xformers

Decode large batches of images with limited VRAM, or to enable batches with 32 images or more.

")) + self.use_enable_vae_slicing.setText(_translate("memory_preferences", "Vae Slicing")) + self.label_6.setText(_translate("memory_preferences", "Work with large batches")) + self.use_tiled_vae.setToolTip(_translate("memory_preferences", "

Use with Attention Slicing or Xformers

Makes it possible to work with large images on limited VRAM. Splits image into overlapping tiles, decodes tiles, blends outputs to make final image.

")) + self.use_tiled_vae.setText(_translate("memory_preferences", "Tile vae")) + self.label_7.setText(_translate("memory_preferences", "Work with large images")) diff --git a/src/airunner/windows/settings/airunner_settings.py b/src/airunner/windows/settings/airunner_settings.py index 61aac464f..631e0b851 100644 --- a/src/airunner/windows/settings/airunner_settings.py +++ b/src/airunner/windows/settings/airunner_settings.py @@ -5,6 +5,7 @@ from airunner.widgets.grid_preferences.grid_preferences_widget import GridPreferencesWidget from airunner.widgets.keyboard_shortcuts.keyboard_shortcuts_widget import KeyboardShortcutsWidget +from airunner.widgets.memory_preferences.memory_preferences_widget import MemoryPreferencesWidget from airunner.widgets.paths.paths_widget import PathsWidget from airunner.windows.settings.templates.airunner_settings_ui import Ui_airunner_settings from airunner.windows.base_window import BaseWindow @@ -236,11 +237,11 @@ def show_content(self, section, display_name, name, description): "keyboard_shortcuts": KeyboardShortcutsWidget, "export_preferences": ExportPreferencesWidget, "grid": GridPreferencesWidget, - "memory": MemoryWidget, + "memory": MemoryPreferencesWidget, # "hf_api_key": HFAPIKeyWidget, } if name in widgets: - if name in ["paths", "export_preferences", "grid"]: + if name in ["paths", "export_preferences", "grid", "memory"]: widget_object = widgets[name]() else: widget_object = widgets[name](