From 625754bdf994754a0e27ec0506c37920285fce08 Mon Sep 17 00:00:00 2001 From: Sourcery AI <> Date: Wed, 25 Oct 2023 12:41:35 +0000 Subject: [PATCH] 'Refactored by Sourcery' --- nxdrive/client/uploader/__init__.py | 20 ++++++--------- nxdrive/dao/engine.py | 38 +++++++++-------------------- nxdrive/engine/activity.py | 10 +++----- nxdrive/gui/view.py | 36 ++++++++------------------- tests/unit/test_client_uploader.py | 3 +-- tests/unit/test_engine_dao.py | 2 +- 6 files changed, 35 insertions(+), 74 deletions(-) diff --git a/nxdrive/client/uploader/__init__.py b/nxdrive/client/uploader/__init__.py index 50472802e9..b2a6639d99 100644 --- a/nxdrive/client/uploader/__init__.py +++ b/nxdrive/client/uploader/__init__.py @@ -357,11 +357,9 @@ def upload_chunks(self, transfer: Upload, blob: FileBlob, chunked: bool, /) -> N self.dao.update_upload(transfer) transfer.is_dirty = False - # Handle status changes every time a chunk is sent - _transfer = self.get_upload( + if _transfer := self.get_upload( doc_pair=transfer.doc_pair, path=transfer.path - ) - if _transfer: + ): self._handle_transfer_status(_transfer) else: uploader.upload() @@ -451,12 +449,11 @@ def link_blob_to_doc( kwargs["headers"] = headers try: doc_type = kwargs.get("doc_type", "") - if transfer.is_direct_transfer and doc_type and doc_type != "": - res = self._transfer_docType_file(transfer, headers, doc_type) - else: - res = self._transfer_autoType_file(command, blob, kwargs) - - return res + return ( + self._transfer_docType_file(transfer, headers, doc_type) + if transfer.is_direct_transfer and doc_type and doc_type != "" + else self._transfer_autoType_file(command, blob, kwargs) + ) except Exception as exc: err = f"Error while linking blob to doc: {exc!r}" log.warning(err) @@ -503,11 +500,10 @@ def _transfer_docType_file( data=content, ssl_verify=self.verification_needed, ) - res = self.remote.fetch( + return self.remote.fetch( f"{self.remote.client.api_path}/path{transfer.remote_parent_path}", headers=headers, ) - return res @staticmethod def _complete_upload(transfer: Upload, blob: FileBlob, /) -> None: diff --git a/nxdrive/dao/engine.py b/nxdrive/dao/engine.py index 9f52a18d90..db1ff9c0a3 100644 --- a/nxdrive/dao/engine.py +++ b/nxdrive/dao/engine.py @@ -690,7 +690,7 @@ def release_processor(self, processor_id: int, /) -> bool: "UPDATE States SET processor = 0 WHERE processor = ?", (processor_id,) ) log.debug(f"Released processor {processor_id}") - return bool(c.rowcount > 0) + return c.rowcount > 0 def acquire_processor(self, thread_id: int, row_id: int, /) -> bool: with self.lock: @@ -702,7 +702,7 @@ def acquire_processor(self, thread_id: int, row_id: int, /) -> bool: " AND processor IN (0, ?)", (thread_id, row_id, thread_id), ) - return bool(c.rowcount == 1) + return c.rowcount == 1 def _reinit_states(self, cursor: Cursor, /) -> None: cursor.execute("DROP TABLE States") @@ -748,7 +748,7 @@ def delete_remote_state(self, doc_pair: DocPair, /) -> None: c.execute(f"{update} WHERE id = ?", ("remotely_deleted", doc_pair.id)) if doc_pair.folderish: c.execute( - update + " " + self._get_recursive_remote_condition(doc_pair), + f"{update} {self._get_recursive_remote_condition(doc_pair)}", ("parent_remotely_deleted",), ) # Only queue parent @@ -781,16 +781,7 @@ def insert_local_state( ) -> int: digest = None if not info.folderish: - if is_large_file(info.size): - # We can't compute the digest of big files now as it will - # be done later when the entire file is fully copied. - # For instance, on my machine (32GB RAM, 8 cores, Intel NUC) - # it takes 23 minutes for 100 GB and 7 minute for 50 GB. - # This is way too much effort to compute it several times. - digest = UNACCESSIBLE_HASH - else: - digest = info.get_digest() - + digest = UNACCESSIBLE_HASH if is_large_file(info.size) else info.get_digest() with self.lock: c = self._get_write_connection().cursor() pair_state = PAIR_STATES[("created", "unknown")] @@ -1406,7 +1397,7 @@ def remove_state( condition = self._get_recursive_remote_condition(doc_pair) else: condition = self._get_recursive_condition(doc_pair) - c.execute("DELETE FROM States " + condition) + c.execute(f"DELETE FROM States {condition}") def remove_state_children( self, doc_pair: DocPair, /, *, remote_recursion: bool = False @@ -1417,7 +1408,7 @@ def remove_state_children( condition = self._get_recursive_remote_condition(doc_pair) else: condition = self._get_recursive_condition(doc_pair) - c.execute("DELETE FROM States " + condition) + c.execute(f"DELETE FROM States {condition}") def get_state_from_local(self, path: Path, /) -> Optional[DocPair]: c = self._get_read_connection().cursor() @@ -1485,15 +1476,10 @@ def insert_remote_state( def queue_children(self, row: DocPair, /) -> None: with self.lock: c = self._get_write_connection().cursor() - children: List[DocPair] = c.execute( - "SELECT *" - " FROM States" - " WHERE remote_parent_ref = ?" - " OR local_parent_path = ?" - " AND " + self._get_to_sync_condition(), + if children := c.execute( + f"SELECT * FROM States WHERE remote_parent_ref = ? OR local_parent_path = ? AND {self._get_to_sync_condition()}", (row.remote_ref, row.local_path), - ).fetchall() - if children: + ).fetchall(): log.info(f"Queuing {len(children)} children of {row}") for child in children: self._queue_pair_state(child.id, child.folderish, child.pair_state) @@ -1663,7 +1649,7 @@ def synchronize_state( version, ), ) - result = bool(c.rowcount == 1) + result = c.rowcount == 1 # Retry without version for folder if not result and row.folderish: @@ -1694,7 +1680,7 @@ def synchronize_state( row.remote_parent_ref, ), ) - result = bool(c.rowcount == 1) + result = c.rowcount == 1 if not result: log.debug(f"Was not able to synchronize state: {row!r}") @@ -1869,7 +1855,7 @@ def is_path_scanned(self, path: str, /) -> bool: row = c.execute( "SELECT COUNT(path) FROM RemoteScan WHERE path = ? LIMIT 1", (path,) ).fetchone() - return bool(row[0] > 0) + return row[0] > 0 def is_filter(self, path: str, /) -> bool: path = self._clean_filter_path(path) diff --git a/nxdrive/engine/activity.py b/nxdrive/engine/activity.py index c4436469ff..68a45d4653 100644 --- a/nxdrive/engine/activity.py +++ b/nxdrive/engine/activity.py @@ -54,8 +54,7 @@ def get_current_action(*, thread_id: int = None) -> Optional["Action"]: @staticmethod def finish_action() -> None: - action = Action.actions.pop(current_thread_id(), None) - if action: + if action := Action.actions.pop(current_thread_id(), None): action.finish() def finish(self) -> None: @@ -69,9 +68,7 @@ def export(self) -> Dict[str, Any]: } def __repr__(self) -> str: - if not self.progress: - return str(self.type) - return f"{self.type}({self.progress}%)" + return f"{self.type}({self.progress}%)" if self.progress else str(self.type) class IdleAction(Action): @@ -131,8 +128,7 @@ def _connect_reporter(self, reporter: Optional[QApplication], /) -> None: return for evt in ("started", "progressing", "done"): - signal = getattr(reporter, f"action_{evt}", None) - if signal: + if signal := getattr(reporter, f"action_{evt}", None): getattr(self, evt).connect(signal) @property diff --git a/nxdrive/gui/view.py b/nxdrive/gui/view.py index b0fdbceec9..2df6c95036 100755 --- a/nxdrive/gui/view.py +++ b/nxdrive/gui/view.py @@ -84,10 +84,7 @@ def data(self, index: QModelIndex, role: int, /) -> str: uid = self.engines_uid[index] engine = self.application.manager.engines.get(uid) - if not engine: - return "" - - return getattr(engine, self.names[role].decode()) + return "" if not engine else getattr(engine, self.names[role].decode()) @pyqtSlot(int, str, result=str) def get(self, index: int, role: str = "uid", /) -> str: @@ -96,10 +93,7 @@ def get(self, index: int, role: str = "uid", /) -> str: uid = self.engines_uid[index] engine = self.application.manager.engines.get(uid) - if not engine: - return "" - - return getattr(engine, role) + return "" if not engine else getattr(engine, role) def removeRows( self, row: int, count: int, /, *, parent: QModelIndex = QModelIndex() @@ -496,23 +490,18 @@ def data(self, index: QModelIndex, role: int, /) -> Any: elif role == self.CREATED_ON: label = "STARTED" args = [] - datetime = get_date_from_sqlite(row["created_on"]) - if datetime: + if datetime := get_date_from_sqlite(row["created_on"]): label += "_ON" - # As date_time is in UTC - offset = tzlocal().utcoffset(datetime) - if offset: + if offset := tzlocal().utcoffset(datetime): datetime += offset args.append(Translator.format_datetime(datetime)) return self.tr(label, values=args) elif role == self.COMPLETED_ON: label = "COMPLETED" if row["status"].name == "DONE" else "CANCELLED" args = [] - datetime = get_date_from_sqlite(row["completed_on"]) - if datetime: + if datetime := get_date_from_sqlite(row["completed_on"]): label += "_ON" - offset = tzlocal().utcoffset(datetime) - if offset: + if offset := tzlocal().utcoffset(datetime): datetime += offset args.append(Translator.format_datetime(datetime)) return self.tr(label, values=args) @@ -626,23 +615,18 @@ def data(self, index: QModelIndex, role: int, /) -> Any: elif role == self.CREATED_ON: label = "STARTED" args = [] - datetime = get_date_from_sqlite(row["created_on"]) - if datetime: + if datetime := get_date_from_sqlite(row["created_on"]): label += "_ON" - # As date_time is in UTC - offset = tzlocal().utcoffset(datetime) - if offset: + if offset := tzlocal().utcoffset(datetime): datetime += offset args.append(Translator.format_datetime(datetime)) return self.tr(label, values=args) elif role == self.COMPLETED_ON: label = "COMPLETED" if row["status"].name == "DONE" else "CANCELLED" args = [] - datetime = get_date_from_sqlite(row["completed_on"]) - if datetime: + if datetime := get_date_from_sqlite(row["completed_on"]): label += "_ON" - offset = tzlocal().utcoffset(datetime) - if offset: + if offset := tzlocal().utcoffset(datetime): datetime += offset args.append(Translator.format_datetime(datetime)) return self.tr(label, values=args) diff --git a/tests/unit/test_client_uploader.py b/tests/unit/test_client_uploader.py index e8ef8ce23a..f1a2dff404 100644 --- a/tests/unit/test_client_uploader.py +++ b/tests/unit/test_client_uploader.py @@ -13,8 +13,7 @@ def baseuploader(): remote = Remote remote.dao = Mock() - baseuploader = BaseUploader(remote) - return baseuploader + return BaseUploader(remote) def test_link_blob_to_doc(baseuploader, upload, tmp_path, monkeypatch): diff --git a/tests/unit/test_engine_dao.py b/tests/unit/test_engine_dao.py index 1c7f62caa6..1b2a07c2eb 100644 --- a/tests/unit/test_engine_dao.py +++ b/tests/unit/test_engine_dao.py @@ -409,7 +409,7 @@ def test_migration_db_v10(engine_dao): """Verify Downloads after migration from v9 to v10.""" with engine_dao("engine_migration_10.db") as dao: downloads = list(dao.get_downloads()) - assert len(downloads) == 0 + assert not downloads states = list(dao.get_states_from_partial_local(Path())) assert len(states) == 4