From 35a726e3c4cca941bb5223d8f45a743041715320 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> Date: Sun, 24 Nov 2024 11:44:22 +0200 Subject: [PATCH 1/3] macos-14 now has Python 3.9 --- .github/workflows/test.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9561cdee..5be73af6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -13,10 +13,6 @@ jobs: python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] numpy: [0, 1] os: [ubuntu-latest, macos-latest, windows-latest, macos-14] - # Skip 3.9 on macos-14 - it only has 3.10+ - exclude: - - python-version: "3.9" - os: macos-14 steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} From 15f15933b1c3b97b3a4e68ba9c09484b9548dafa Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> Date: Sun, 24 Nov 2024 11:48:16 +0200 Subject: [PATCH 2/3] Upgrade syntax with pyupgrade --py39-plus --- docs/conf.py | 1 - setup.py | 2 +- sqlite_utils/cli.py | 48 ++--- sqlite_utils/db.py | 335 +++++++++++++++++----------------- sqlite_utils/utils.py | 19 +- tests/test_cli.py | 26 ++- tests/test_cli_convert.py | 4 +- tests/test_cli_insert.py | 8 +- tests/test_cli_memory.py | 10 +- tests/test_column_affinity.py | 2 +- tests/test_create.py | 10 +- tests/test_default_value.py | 2 +- tests/test_docs.py | 2 +- tests/test_enable_counts.py | 4 +- tests/test_extract.py | 8 +- tests/test_extracts.py | 2 +- tests/test_fts.py | 16 +- tests/test_gis.py | 2 +- tests/test_insert_files.py | 4 +- tests/test_introspect.py | 4 +- tests/test_m2m.py | 3 +- tests/test_utils.py | 2 +- 22 files changed, 255 insertions(+), 259 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 859c6b90..2d014a8b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- from subprocess import Popen, PIPE from beanbag_docutils.sphinx.ext.github import github_linkcode_resolve diff --git a/setup.py b/setup.py index 1bd5fe1e..64cee7cb 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ def get_long_description(): - with io.open( + with open( os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"), encoding="utf8", ) as fp: diff --git a/sqlite_utils/cli.py b/sqlite_utils/cli.py index cc2d86eb..09eab47d 100644 --- a/sqlite_utils/cli.py +++ b/sqlite_utils/cli.py @@ -785,7 +785,7 @@ def enable_counts(path, tables, load_extension): # Check all tables exist bad_tables = [table for table in tables if not db[table].exists()] if bad_tables: - raise click.ClickException("Invalid tables: {}".format(bad_tables)) + raise click.ClickException(f"Invalid tables: {bad_tables}") for table in tables: db[table].enable_counts() @@ -1005,7 +1005,7 @@ def insert_upsert_implementation( reader = csv_std.reader(decoded, **csv_reader_args) first_row = next(reader) if no_headers: - headers = ["untitled_{}".format(i + 1) for i in range(len(first_row))] + headers = [f"untitled_{i + 1}" for i in range(len(first_row))] reader = itertools.chain([first_row], reader) else: headers = first_row @@ -1568,7 +1568,7 @@ def create_table( ctype = columns.pop(0) if ctype.upper() not in VALID_COLUMN_TYPES: raise click.ClickException( - "column types must be one of {}".format(VALID_COLUMN_TYPES) + f"column types must be one of {VALID_COLUMN_TYPES}" ) coltypes[name] = ctype.upper() # Does table already exist? @@ -1612,7 +1612,7 @@ def duplicate(path, table, new_table, ignore, load_extension): db[table].duplicate(new_table) except NoTable: if not ignore: - raise click.ClickException('Table "{}" does not exist'.format(table)) + raise click.ClickException(f'Table "{table}" does not exist') @cli.command(name="rename-table") @@ -1636,7 +1636,7 @@ def rename_table(path, table, new_name, ignore, load_extension): except sqlite3.OperationalError as ex: if not ignore: raise click.ClickException( - 'Table "{}" could not be renamed. {}'.format(table, str(ex)) + f'Table "{table}" could not be renamed. {str(ex)}' ) @@ -1662,7 +1662,7 @@ def drop_table(path, table, ignore, load_extension): try: db[table].drop(ignore=ignore) except OperationalError: - raise click.ClickException('Table "{}" does not exist'.format(table)) + raise click.ClickException(f'Table "{table}" does not exist') @cli.command(name="create-view") @@ -1732,7 +1732,7 @@ def drop_view(path, view, ignore, load_extension): try: db[view].drop(ignore=ignore) except OperationalError: - raise click.ClickException('View "{}" does not exist'.format(view)) + raise click.ClickException(f'View "{view}" does not exist') @cli.command() @@ -1944,7 +1944,7 @@ def memory( file_path = pathlib.Path(path) stem = file_path.stem if stem_counts.get(stem): - file_table = "{}_{}".format(stem, stem_counts[stem]) + file_table = f"{stem}_{stem_counts[stem]}" else: file_table = stem stem_counts[stem] = stem_counts.get(stem, 1) + 1 @@ -1961,12 +1961,12 @@ def memory( if tracker is not None: db[file_table].transform(types=tracker.types) # Add convenient t / t1 / t2 views - view_names = ["t{}".format(i + 1)] + view_names = [f"t{i + 1}"] if i == 0: view_names.append("t") for view_name in view_names: if not db[view_name].exists(): - db.create_view(view_name, "select * from [{}]".format(file_table)) + db.create_view(view_name, f"select * from [{file_table}]") if fp: fp.close() @@ -2127,10 +2127,10 @@ def search( # Check table exists table_obj = db[dbtable] if not table_obj.exists(): - raise click.ClickException("Table '{}' does not exist".format(dbtable)) + raise click.ClickException(f"Table '{dbtable}' does not exist") if not table_obj.detect_fts(): raise click.ClickException( - "Table '{}' is not configured for full-text search".format(dbtable) + f"Table '{dbtable}' is not configured for full-text search" ) if column: # Check they all exist @@ -2138,7 +2138,7 @@ def search( for c in column: if c not in table_columns: raise click.ClickException( - "Table '{}' has no column '{}".format(dbtable, c) + f"Table '{dbtable}' has no column '{c}" ) sql = table_obj.search_sql(columns=column, order_by=order, limit=limit) if show_sql: @@ -2165,7 +2165,7 @@ def search( except click.ClickException as e: if "malformed MATCH expression" in str(e) or "unterminated string" in str(e): raise click.ClickException( - "{}\n\nTry running this again with the --quote option".format(str(e)) + f"{str(e)}\n\nTry running this again with the --quote option" ) else: raise @@ -2230,16 +2230,16 @@ def rows( """ columns = "*" if column: - columns = ", ".join("[{}]".format(c) for c in column) - sql = "select {} from [{}]".format(columns, dbtable) + columns = ", ".join(f"[{c}]" for c in column) + sql = f"select {columns} from [{dbtable}]" if where: sql += " where " + where if order: sql += " order by " + order if limit: - sql += " limit {}".format(limit) + sql += f" limit {limit}" if offset: - sql += " offset {}".format(offset) + sql += f" offset {offset}" ctx.invoke( query, path=path, @@ -2494,7 +2494,7 @@ def transform( for column, ctype in type: if ctype.upper() not in VALID_COLUMN_TYPES: raise click.ClickException( - "column types must be one of {}".format(VALID_COLUMN_TYPES) + f"column types must be one of {VALID_COLUMN_TYPES}" ) types[column] = ctype.upper() @@ -2728,7 +2728,7 @@ def _content_text(p): except UnicodeDecodeErrorForPath as e: raise click.ClickException( UNICODE_ERROR.format( - "Could not read file '{}' as text\n\n{}".format(e.path, e.exception) + f"Could not read file '{e.path}' as text\n\n{e.exception}" ) ) @@ -3010,7 +3010,7 @@ def preview(v): """.format( column=columns[0], table=table, - where=" where {}".format(where) if where is not None else "", + where=f" where {where}" if where is not None else "", ) for row in db.conn.execute(sql, where_args).fetchall(): click.echo(str(row[0])) @@ -3181,7 +3181,7 @@ def _render_common(title, values): return "" lines = [title] for value, count in values: - lines.append(" {}: {}".format(count, value)) + lines.append(f" {count}: {value}") return "\n".join(lines) @@ -3261,7 +3261,7 @@ def json_binary(value): def verify_is_dict(doc): if not isinstance(doc, dict): raise click.ClickException( - "Rows must all be dictionaries, got: {}".format(repr(doc)[:1000]) + f"Rows must all be dictionaries, got: {repr(doc)[:1000]}" ) return doc @@ -3286,7 +3286,7 @@ def _register_functions(db, functions): try: exec(functions, globals) except SyntaxError as ex: - raise click.ClickException("Error in functions definition: {}".format(ex)) + raise click.ClickException(f"Error in functions definition: {ex}") # Register all callables in the locals dict: for name, value in globals.items(): if callable(value) and not name.startswith("_"): diff --git a/sqlite_utils/db.py b/sqlite_utils/db.py index ef8c40e6..981d4930 100644 --- a/sqlite_utils/db.py +++ b/sqlite_utils/db.py @@ -29,13 +29,12 @@ Any, Callable, Dict, - Generator, - Iterable, Union, Optional, List, Tuple, ) +from collections.abc import Generator, Iterable import uuid from sqlite_utils.plugins import pm @@ -163,12 +162,12 @@ class TransformError(Exception): ForeignKeyIndicator = Union[ str, ForeignKey, - Tuple[str, str], - Tuple[str, str, str], - Tuple[str, str, str, str], + tuple[str, str], + tuple[str, str, str], + tuple[str, str, str, str], ] -ForeignKeysType = Union[Iterable[ForeignKeyIndicator], List[ForeignKeyIndicator]] +ForeignKeysType = Union[Iterable[ForeignKeyIndicator], list[ForeignKeyIndicator]] class Default: @@ -335,7 +334,7 @@ def __init__( filename_or_conn is None and (memory or memory_name) ), "Either specify a filename_or_conn or pass memory=True" if memory_name: - uri = "file:{}?mode=memory&cache=shared".format(memory_name) + uri = f"file:{memory_name}?mode=memory&cache=shared" self.conn = sqlite3.connect( uri, uri=True, @@ -425,7 +424,7 @@ def __getitem__(self, table_name: str) -> Union["Table", "View"]: return self.table(table_name) def __repr__(self) -> str: - return "".format(self.conn) + return f"" def register_function( self, @@ -592,7 +591,7 @@ def quote_fts(self, query: str) -> str: bits = _quote_fts_re.split(query) bits = [b for b in bits if b and b != '""'] return " ".join( - '"{}"'.format(bit) if not bit.startswith('"') else bit for bit in bits + f'"{bit}"' if not bit.startswith('"') else bit for bit in bits ) def quote_default_value(self, value: str) -> str: @@ -609,11 +608,11 @@ def quote_default_value(self, value: str) -> str: if str(value).endswith(")"): # Expr - return "({})".format(value) + return f"({value})" return self.quote(value) - def table_names(self, fts4: bool = False, fts5: bool = False) -> List[str]: + def table_names(self, fts4: bool = False, fts5: bool = False) -> list[str]: """ List of string table names in this database. @@ -628,7 +627,7 @@ def table_names(self, fts4: bool = False, fts5: bool = False) -> List[str]: sql = "select name from sqlite_master where {}".format(" AND ".join(where)) return [r[0] for r in self.execute(sql).fetchall()] - def view_names(self) -> List[str]: + def view_names(self) -> list[str]: "List of string view names in this database." return [ r[0] @@ -638,17 +637,17 @@ def view_names(self) -> List[str]: ] @property - def tables(self) -> List["Table"]: + def tables(self) -> list["Table"]: "List of Table objects in this database." - return cast(List["Table"], [self[name] for name in self.table_names()]) + return cast(list["Table"], [self[name] for name in self.table_names()]) @property - def views(self) -> List["View"]: + def views(self) -> list["View"]: "List of View objects in this database." - return cast(List["View"], [self[name] for name in self.view_names()]) + return cast(list["View"], [self[name] for name in self.view_names()]) @property - def triggers(self) -> List[Trigger]: + def triggers(self) -> list[Trigger]: "List of ``(name, table_name, sql)`` tuples representing triggers in this database." return [ Trigger(*r) @@ -658,7 +657,7 @@ def triggers(self) -> List[Trigger]: ] @property - def triggers_dict(self) -> Dict[str, str]: + def triggers_dict(self) -> dict[str, str]: "A ``{trigger_name: sql}`` dictionary of triggers in this database." return {trigger.name: trigger.sql for trigger in self.triggers} @@ -679,18 +678,18 @@ def schema(self) -> str: def supports_strict(self) -> bool: "Does this database support STRICT mode?" try: - table_name = "t{}".format(secrets.token_hex(16)) + table_name = f"t{secrets.token_hex(16)}" with self.conn: self.conn.execute( - "create table {} (name text) strict".format(table_name) + f"create table {table_name} (name text) strict" ) - self.conn.execute("drop table {}".format(table_name)) + self.conn.execute(f"drop table {table_name}") return True except Exception: return False @property - def sqlite_version(self) -> Tuple[int, ...]: + def sqlite_version(self) -> tuple[int, ...]: "Version of SQLite, as a tuple of integers for example ``(3, 36, 0)``." row = self.execute("select sqlite_version()").fetchall()[0] return tuple(map(int, row[0].split("."))) @@ -736,14 +735,14 @@ def enable_counts(self): table.enable_counts() self.use_counts_table = True - def cached_counts(self, tables: Optional[Iterable[str]] = None) -> Dict[str, int]: + def cached_counts(self, tables: Optional[Iterable[str]] = None) -> dict[str, int]: """ Return ``{table_name: count}`` dictionary of cached counts for specified tables, or all tables if ``tables`` not provided. :param tables: Subset list of tables to return counts for. """ - sql = "select [table], count from {}".format(self._counts_table_name) + sql = f"select [table], count from {self._counts_table_name}" if tables: sql += " where [table] in ({})".format(", ".join("?" for table in tables)) try: @@ -765,12 +764,12 @@ def reset_counts(self): def execute_returning_dicts( self, sql: str, params: Optional[Union[Iterable, dict]] = None - ) -> List[dict]: + ) -> list[dict]: return list(self.query(sql, params)) def resolve_foreign_keys( self, name: str, foreign_keys: ForeignKeysType - ) -> List[ForeignKey]: + ) -> list[ForeignKey]: """ Given a list of differing foreign_keys definitions, return a list of fully resolved ForeignKey() named tuples. @@ -783,7 +782,7 @@ def resolve_foreign_keys( """ table = cast(Table, self[name]) if all(isinstance(fk, ForeignKey) for fk in foreign_keys): - return cast(List[ForeignKey], foreign_keys) + return cast(list[ForeignKey], foreign_keys) if all(isinstance(fk, str) for fk in foreign_keys): # It's a list of columns fks = [] @@ -801,7 +800,7 @@ def resolve_foreign_keys( if len(tuple_or_list) == 4: assert ( tuple_or_list[0] == name - ), "First item in {} should have been {}".format(tuple_or_list, name) + ), f"First item in {tuple_or_list} should have been {name}" assert len(tuple_or_list) in ( 2, 3, @@ -809,9 +808,9 @@ def resolve_foreign_keys( ), "foreign_keys= should be a list of tuple pairs or triples" if len(tuple_or_list) in (3, 4): if len(tuple_or_list) == 4: - tuple_or_list = cast(Tuple[str, str, str], tuple_or_list[1:]) + tuple_or_list = cast(tuple[str, str, str], tuple_or_list[1:]) else: - tuple_or_list = cast(Tuple[str, str, str], tuple_or_list) + tuple_or_list = cast(tuple[str, str, str], tuple_or_list) fks.append( ForeignKey( name, tuple_or_list[0], tuple_or_list[1], tuple_or_list[2] @@ -832,15 +831,15 @@ def resolve_foreign_keys( def create_table_sql( self, name: str, - columns: Dict[str, Any], + columns: dict[str, Any], pk: Optional[Any] = None, foreign_keys: Optional[ForeignKeysType] = None, - column_order: Optional[List[str]] = None, + column_order: Optional[list[str]] = None, not_null: Optional[Iterable[str]] = None, - defaults: Optional[Dict[str, Any]] = None, + defaults: Optional[dict[str, Any]] = None, hash_id: Optional[str] = None, hash_id_columns: Optional[Iterable[str]] = None, - extracts: Optional[Union[Dict[str, str], List[str]]] = None, + extracts: Optional[Union[dict[str, str], list[str]]] = None, if_not_exists: bool = False, strict: bool = False, ) -> str: @@ -909,7 +908,7 @@ def sort_key(p): c for c in self[fk.other_table].columns if c.name == fk.other_column ): raise AlterError( - "No such column: {}.{}".format(fk.other_table, fk.other_column) + f"No such column: {fk.other_table}.{fk.other_column}" ) column_defs = [] @@ -929,7 +928,7 @@ def sort_key(p): column_extras.append("NOT NULL") if column_name in defaults and defaults[column_name] is not None: column_extras.append( - "DEFAULT {}".format(self.quote_default_value(defaults[column_name])) + f"DEFAULT {self.quote_default_value(defaults[column_name])}" ) if column_name in foreign_keys_by_column: column_extras.append( @@ -955,7 +954,7 @@ def sort_key(p): extra_pk = "" if single_pk is None and pk and len(pk) > 1: extra_pk = ",\n PRIMARY KEY ({pks})".format( - pks=", ".join(["[{}]".format(p) for p in pk]) + pks=", ".join([f"[{p}]" for p in pk]) ) columns_sql = ",\n".join(column_defs) sql = """CREATE TABLE {if_not_exists}[{table}] ( @@ -973,15 +972,15 @@ def sort_key(p): def create_table( self, name: str, - columns: Dict[str, Any], + columns: dict[str, Any], pk: Optional[Any] = None, foreign_keys: Optional[ForeignKeysType] = None, - column_order: Optional[List[str]] = None, + column_order: Optional[list[str]] = None, not_null: Optional[Iterable[str]] = None, - defaults: Optional[Dict[str, Any]] = None, + defaults: Optional[dict[str, Any]] = None, hash_id: Optional[str] = None, hash_id_columns: Optional[Iterable[str]] = None, - extracts: Optional[Union[Dict[str, str], List[str]]] = None, + extracts: Optional[Union[dict[str, str], list[str]]] = None, if_not_exists: bool = False, replace: bool = False, ignore: bool = False, @@ -1020,11 +1019,11 @@ def create_table( should_transform = False # First add missing columns and figure out columns to drop existing_columns = table.columns_dict - missing_columns = dict( - (col_name, col_type) + missing_columns = { + col_name: col_type for col_name, col_type in columns.items() if col_name not in existing_columns - ) + } columns_to_drop = [ column for column in existing_columns if column not in columns ] @@ -1121,7 +1120,7 @@ def create_view( assert not ( ignore and replace ), "Use one or the other of ignore/replace, not both" - create_sql = "CREATE VIEW {name} AS {sql}".format(name=name, sql=sql) + create_sql = f"CREATE VIEW {name} AS {sql}" if ignore or replace: # Does view exist already? if name in self.view_names(): @@ -1135,7 +1134,7 @@ def create_view( self.execute(create_sql) return self - def m2m_table_candidates(self, table: str, other_table: str) -> List[str]: + def m2m_table_candidates(self, table: str, other_table: str) -> list[str]: """ Given two table names returns the name of tables that could define a many-to-many relationship between those two tables, based on having @@ -1153,7 +1152,7 @@ def m2m_table_candidates(self, table: str, other_table: str) -> List[str]: candidates.append(table_obj.name) return candidates - def add_foreign_keys(self, foreign_keys: Iterable[Tuple[str, str, str, str]]): + def add_foreign_keys(self, foreign_keys: Iterable[tuple[str, str, str, str]]): """ See :ref:`python_api_add_foreign_keys`. @@ -1170,21 +1169,21 @@ def add_foreign_keys(self, foreign_keys: Iterable[Tuple[str, str, str, str]]): # Verify that all tables and columns exist for table, column, other_table, other_column in foreign_keys: if not self[table].exists(): - raise AlterError("No such table: {}".format(table)) + raise AlterError(f"No such table: {table}") table_obj = self[table] if not isinstance(table_obj, Table): - raise AlterError("Must be a table, not a view: {}".format(table)) + raise AlterError(f"Must be a table, not a view: {table}") table_obj = cast(Table, table_obj) if column not in table_obj.columns_dict: - raise AlterError("No such column: {} in {}".format(column, table)) + raise AlterError(f"No such column: {column} in {table}") if not self[other_table].exists(): - raise AlterError("No such other_table: {}".format(other_table)) + raise AlterError(f"No such other_table: {other_table}") if ( other_column != "rowid" and other_column not in self[other_table].columns_dict ): raise AlterError( - "No such other_column: {} in {}".format(other_column, other_table) + f"No such other_column: {other_column} in {other_table}" ) # We will silently skip foreign keys that exist already if not any( @@ -1199,7 +1198,7 @@ def add_foreign_keys(self, foreign_keys: Iterable[Tuple[str, str, str, str]]): ) # Group them by table - by_table: Dict[str, List] = {} + by_table: dict[str, list] = {} for fk in foreign_keys_to_create: by_table.setdefault(fk[0], []).append(fk) @@ -1231,7 +1230,7 @@ def analyze(self, name=None): """ sql = "ANALYZE" if name is not None: - sql += " [{}]".format(name) + sql += f" [{name}]" self.execute(sql) def iterdump(self) -> Generator[str, None, None]: @@ -1309,7 +1308,7 @@ def count_where( :param where_args: Parameters to use with that fragment - an iterable for ``id > ?`` parameters, or a dictionary for ``id > :id`` """ - sql = "select count(*) from [{}]".format(self.name) + sql = f"select count(*) from [{self.name}]" if where is not None: sql += " where " + where return self.db.execute(sql, where_args or []).fetchone()[0] @@ -1352,15 +1351,15 @@ def rows_where( """ if not self.exists(): return - sql = "select {} from [{}]".format(select, self.name) + sql = f"select {select} from [{self.name}]" if where is not None: sql += " where " + where if order_by is not None: sql += " order by " + order_by if limit is not None: - sql += " limit {}".format(limit) + sql += f" limit {limit}" if offset is not None: - sql += " offset {}".format(offset) + sql += f" offset {offset}" cursor = self.db.execute(sql, where_args or []) columns = [c[0] for c in cursor.description] for row in cursor: @@ -1373,7 +1372,7 @@ def pks_and_rows_where( order_by: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, - ) -> Generator[Tuple[Any, Dict], None, None]: + ) -> Generator[tuple[Any, dict], None, None]: """ Like ``.rows_where()`` but returns ``(pk, row)`` pairs - ``pk`` can be a single value or tuple. @@ -1390,7 +1389,7 @@ def pks_and_rows_where( if not pks: column_names.insert(0, "rowid") pks = ["rowid"] - select = ",".join("[{}]".format(column_name) for column_name in column_names) + select = ",".join(f"[{column_name}]" for column_name in column_names) for row in self.rows_where( select=select, where=where, @@ -1405,15 +1404,15 @@ def pks_and_rows_where( yield row_pk, row @property - def columns(self) -> List["Column"]: + def columns(self) -> list["Column"]: "List of :ref:`Columns ` representing the columns in this table or view." if not self.exists(): return [] - rows = self.db.execute("PRAGMA table_info([{}])".format(self.name)).fetchall() + rows = self.db.execute(f"PRAGMA table_info([{self.name}])").fetchall() return [Column(*row) for row in rows] @property - def columns_dict(self) -> Dict[str, Any]: + def columns_dict(self) -> dict[str, Any]: "``{column_name: python-type}`` dictionary representing columns in this table or view." return {column.name: column_affinity(column.type) for column in self.columns} @@ -1462,18 +1461,18 @@ def __init__( name: str, pk: Optional[Any] = None, foreign_keys: Optional[ForeignKeysType] = None, - column_order: Optional[List[str]] = None, + column_order: Optional[list[str]] = None, not_null: Optional[Iterable[str]] = None, - defaults: Optional[Dict[str, Any]] = None, + defaults: Optional[dict[str, Any]] = None, batch_size: int = 100, hash_id: Optional[str] = None, hash_id_columns: Optional[Iterable[str]] = None, alter: bool = False, ignore: bool = False, replace: bool = False, - extracts: Optional[Union[Dict[str, str], List[str]]] = None, + extracts: Optional[Union[dict[str, str], list[str]]] = None, conversions: Optional[dict] = None, - columns: Optional[Dict[str, Any]] = None, + columns: Optional[dict[str, Any]] = None, strict: bool = False, ): super().__init__(db, name) @@ -1518,7 +1517,7 @@ def exists(self) -> bool: return self.name in self.db.table_names() @property - def pks(self) -> List[str]: + def pks(self) -> list[str]: "Primary key columns for this table." names = [column.name for column in self.columns if column.is_pk] if not names: @@ -1549,7 +1548,7 @@ def get(self, pk_values: Union[list, tuple, str, int]) -> dict: ) ) - wheres = ["[{}] = ?".format(pk_name) for pk_name in pks] + wheres = [f"[{pk_name}] = ?" for pk_name in pks] rows = self.rows_where(" and ".join(wheres), pk_values) try: row = list(rows)[0] @@ -1559,11 +1558,11 @@ def get(self, pk_values: Union[list, tuple, str, int]) -> dict: raise NotFoundError @property - def foreign_keys(self) -> List["ForeignKey"]: + def foreign_keys(self) -> list["ForeignKey"]: "List of foreign keys defined on this table." fks = [] for row in self.db.execute( - "PRAGMA foreign_key_list([{}])".format(self.name) + f"PRAGMA foreign_key_list([{self.name}])" ).fetchall(): if row is not None: id, seq, table_name, from_, to_, on_update, on_delete, match = row @@ -1586,18 +1585,18 @@ def virtual_table_using(self) -> Optional[str]: return match.groupdict()["using"].upper() @property - def indexes(self) -> List[Index]: + def indexes(self) -> list[Index]: "List of indexes defined on this table." - sql = 'PRAGMA index_list("{}")'.format(self.name) + sql = f'PRAGMA index_list("{self.name}")' indexes = [] for row in self.db.execute_returning_dicts(sql): index_name = row["name"] index_name_quoted = ( - '"{}"'.format(index_name) + f'"{index_name}"' if not index_name.startswith('"') else index_name ) - column_sql = "PRAGMA index_info({})".format(index_name_quoted) + column_sql = f"PRAGMA index_info({index_name_quoted})" columns = [] for seqno, cid, name in self.db.execute(column_sql).fetchall(): columns.append(name) @@ -1610,18 +1609,18 @@ def indexes(self) -> List[Index]: return indexes @property - def xindexes(self) -> List[XIndex]: + def xindexes(self) -> list[XIndex]: "List of indexes defined on this table using the more detailed ``XIndex`` format." - sql = 'PRAGMA index_list("{}")'.format(self.name) + sql = f'PRAGMA index_list("{self.name}")' indexes = [] for row in self.db.execute_returning_dicts(sql): index_name = row["name"] index_name_quoted = ( - '"{}"'.format(index_name) + f'"{index_name}"' if not index_name.startswith('"') else index_name ) - column_sql = "PRAGMA index_xinfo({})".format(index_name_quoted) + column_sql = f"PRAGMA index_xinfo({index_name_quoted})" index_columns = [] for info in self.db.execute(column_sql).fetchall(): index_columns.append(XIndexColumn(*info)) @@ -1629,7 +1628,7 @@ def xindexes(self) -> List[XIndex]: return indexes @property - def triggers(self) -> List[Trigger]: + def triggers(self) -> list[Trigger]: "List of triggers defined on this table." return [ Trigger(*r) @@ -1641,12 +1640,12 @@ def triggers(self) -> List[Trigger]: ] @property - def triggers_dict(self) -> Dict[str, str]: + def triggers_dict(self) -> dict[str, str]: "``{trigger_name: sql}`` dictionary of triggers defined on this table." return {trigger.name: trigger.sql for trigger in self.triggers} @property - def default_values(self) -> Dict[str, Any]: + def default_values(self) -> dict[str, Any]: "``{column_name: default_value}`` dictionary of default values for columns in this table." return { column.name: _decode_default_value(column.default_value) @@ -1663,15 +1662,15 @@ def strict(self) -> bool: def create( self, - columns: Dict[str, Any], + columns: dict[str, Any], pk: Optional[Any] = None, foreign_keys: Optional[ForeignKeysType] = None, - column_order: Optional[List[str]] = None, + column_order: Optional[list[str]] = None, not_null: Optional[Iterable[str]] = None, - defaults: Optional[Dict[str, Any]] = None, + defaults: Optional[dict[str, Any]] = None, hash_id: Optional[str] = None, hash_id_columns: Optional[Iterable[str]] = None, - extracts: Optional[Union[Dict[str, str], List[str]]] = None, + extracts: Optional[Union[dict[str, str], list[str]]] = None, if_not_exists: bool = False, replace: bool = False, ignore: bool = False, @@ -1743,11 +1742,11 @@ def transform( drop: Optional[Iterable] = None, pk: Optional[Any] = DEFAULT, not_null: Optional[Iterable[str]] = None, - defaults: Optional[Dict[str, Any]] = None, + defaults: Optional[dict[str, Any]] = None, drop_foreign_keys: Optional[Iterable[str]] = None, add_foreign_keys: Optional[ForeignKeysType] = None, foreign_keys: Optional[ForeignKeysType] = None, - column_order: Optional[List[str]] = None, + column_order: Optional[list[str]] = None, keep_table: Optional[str] = None, ) -> "Table": """ @@ -1809,14 +1808,14 @@ def transform_sql( drop: Optional[Iterable] = None, pk: Optional[Any] = DEFAULT, not_null: Optional[Iterable[str]] = None, - defaults: Optional[Dict[str, Any]] = None, + defaults: Optional[dict[str, Any]] = None, drop_foreign_keys: Optional[Iterable] = None, add_foreign_keys: Optional[ForeignKeysType] = None, foreign_keys: Optional[ForeignKeysType] = None, - column_order: Optional[List[str]] = None, + column_order: Optional[list[str]] = None, tmp_suffix: Optional[str] = None, keep_table: Optional[str] = None, - ) -> List[str]: + ) -> list[str]: """ Return a list of SQL statements that should be executed in order to apply this transformation. @@ -1839,7 +1838,7 @@ def transform_sql( rename = rename or {} drop = drop or set() - create_table_foreign_keys: List[ForeignKeyIndicator] = [] + create_table_foreign_keys: list[ForeignKeyIndicator] = [] if foreign_keys is not None: if add_foreign_keys is not None: @@ -1966,20 +1965,20 @@ def transform_sql( copy_sql = "INSERT INTO [{new_table}] ({new_cols})\n SELECT {old_cols} FROM [{old_table}];".format( new_table=new_table_name, old_table=self.name, - old_cols=", ".join("[{}]".format(col) for col in old_cols), - new_cols=", ".join("[{}]".format(col) for col in new_cols), + old_cols=", ".join(f"[{col}]" for col in old_cols), + new_cols=", ".join(f"[{col}]" for col in new_cols), ) sqls.append(copy_sql) # Drop (or keep) the old table if keep_table: sqls.append( - "ALTER TABLE [{}] RENAME TO [{}];".format(self.name, keep_table) + f"ALTER TABLE [{self.name}] RENAME TO [{keep_table}];" ) else: - sqls.append("DROP TABLE [{}];".format(self.name)) + sqls.append(f"DROP TABLE [{self.name}];") # Rename the new one sqls.append( - "ALTER TABLE [{}] RENAME TO [{}];".format(new_table_name, self.name) + f"ALTER TABLE [{new_table_name}] RENAME TO [{self.name}];" ) # Re-add existing indexes for index in self.indexes: @@ -2012,7 +2011,7 @@ def extract( columns: Union[str, Iterable[str]], table: Optional[str] = None, fk_column: Optional[str] = None, - rename: Optional[Dict[str, str]] = None, + rename: Optional[dict[str, str]] = None, ) -> "Table": """ Extract specified columns into a separate table. @@ -2035,8 +2034,8 @@ def extract( ) table = table or "_".join(columns) lookup_table = self.db[table] - fk_column = fk_column or "{}_id".format(table) - magic_lookup_column = "{}_{}".format(fk_column, os.urandom(6).hex()) + fk_column = fk_column or f"{table}_id" + magic_lookup_column = f"{fk_column}_{os.urandom(6).hex()}" # Populate the lookup table with all of the extracted unique values lookup_columns_definition = { @@ -2068,8 +2067,8 @@ def extract( self.db.execute( "INSERT OR IGNORE INTO [{lookup_table}] ({lookup_columns}) SELECT DISTINCT {table_cols} FROM [{table}]".format( lookup_table=table, - lookup_columns=", ".join("[{}]".format(c) for c in lookup_columns), - table_cols=", ".join("[{}]".format(c) for c in columns), + lookup_columns=", ".join(f"[{c}]" for c in lookup_columns), + table_cols=", ".join(f"[{c}]" for c in columns), table=self.name, ) ) @@ -2154,7 +2153,7 @@ def create_index( created_index_name = None while True: created_index_name = ( - "{}_{}".format(index_name, suffix) if suffix else index_name + f"{index_name}_{suffix}" if suffix else index_name ) sql = ( textwrap.dedent( @@ -2215,11 +2214,11 @@ def add_column( if fk is not None: # fk must be a valid table if fk not in self.db.table_names(): - raise AlterError("table '{}' does not exist".format(fk)) + raise AlterError(f"table '{fk}' does not exist") # if fk_col specified, must be a valid column if fk_col is not None: if fk_col not in self.db[fk].columns_dict: - raise AlterError("table '{}' has no column {}".format(fk, fk_col)) + raise AlterError(f"table '{fk}' has no column {fk_col}") else: # automatically set fk_col to first primary_key of fk table pks = [c for c in self.db[fk].columns if c.is_pk] @@ -2254,7 +2253,7 @@ def drop(self, ignore: bool = False): :param ignore: Set to ``True`` to ignore the error if the table does not exist """ try: - self.db.execute("DROP TABLE [{}]".format(self.name)) + self.db.execute(f"DROP TABLE [{self.name}]") except sqlite3.OperationalError: if not ignore: raise @@ -2295,7 +2294,7 @@ def guess_foreign_column(self, other_table: str): pks = [c for c in self.db[other_table].columns if c.is_pk] if len(pks) != 1: raise BadPrimaryKey( - "Could not detect single primary key for table '{}'".format(other_table) + f"Could not detect single primary key for table '{other_table}'" ) else: return pks[0].name @@ -2317,7 +2316,7 @@ def add_foreign_key( """ # Ensure column exists if column not in self.columns_dict: - raise AlterError("No such column: {}".format(column)) + raise AlterError(f"No such column: {column}") # If other_table is not specified, attempt to guess it from the column if other_table is None: other_table = self.guess_foreign_table(column) @@ -2330,7 +2329,7 @@ def add_foreign_key( not [c for c in self.db[other_table].columns if c.name == other_column] and other_column != "rowid" ): - raise AlterError("No such column: {}.{}".format(other_table, other_column)) + raise AlterError(f"No such column: {other_table}.{other_column}") # Check we do not already have an existing foreign key if any( fk @@ -2441,15 +2440,15 @@ def enable_fts( .strip() .format( table=self.name, - columns=", ".join("[{}]".format(c) for c in columns), + columns=", ".join(f"[{c}]" for c in columns), fts_version=fts_version, - tokenize="\n tokenize='{}',".format(tokenize) if tokenize else "", + tokenize=f"\n tokenize='{tokenize}'," if tokenize else "", ) ) should_recreate = False - if replace and self.db["{}_fts".format(self.name)].exists(): + if replace and self.db[f"{self.name}_fts"].exists(): # Does the table need to be recreated? - fts_schema = self.db["{}_fts".format(self.name)].schema + fts_schema = self.db[f"{self.name}_fts"].schema if fts_schema != create_fts_sql: should_recreate = True expected_triggers = {self.name + suffix for suffix in ("_ai", "_ad", "_au")} @@ -2468,8 +2467,8 @@ def enable_fts( self.populate_fts(columns) if create_triggers: - old_cols = ", ".join("old.[{}]".format(c) for c in columns) - new_cols = ", ".join("new.[{}]".format(c) for c in columns) + old_cols = ", ".join(f"old.[{c}]" for c in columns) + new_cols = ", ".join(f"new.[{c}]" for c in columns) triggers = ( textwrap.dedent( """ @@ -2488,7 +2487,7 @@ def enable_fts( .strip() .format( table=self.name, - columns=", ".join("[{}]".format(c) for c in columns), + columns=", ".join(f"[{c}]" for c in columns), old_cols=old_cols, new_cols=new_cols, ) @@ -2512,7 +2511,7 @@ def populate_fts(self, columns: Iterable[str]) -> "Table": ) .strip() .format( - table=self.name, columns=", ".join("[{}]".format(c) for c in columns) + table=self.name, columns=", ".join(f"[{c}]" for c in columns) ) ) self.db.executescript(sql) @@ -2540,7 +2539,7 @@ def disable_fts(self) -> "Table": trigger_names.append(row[0]) with self.db.conn: for trigger_name in trigger_names: - self.db.execute("DROP TRIGGER IF EXISTS [{}]".format(trigger_name)) + self.db.execute(f"DROP TRIGGER IF EXISTS [{trigger_name}]") return self def rebuild_fts(self): @@ -2573,8 +2572,8 @@ def detect_fts(self) -> Optional[str]: """ ).strip() args = { - "like": "%VIRTUAL TABLE%USING FTS%content=[{}]%".format(self.name), - "like2": '%VIRTUAL TABLE%USING FTS%content="{}"%'.format(self.name), + "like": f"%VIRTUAL TABLE%USING FTS%content=[{self.name}]%", + "like2": f'%VIRTUAL TABLE%USING FTS%content="{self.name}"%', "table": self.name, } rows = self.db.execute(sql, args).fetchall() @@ -2618,11 +2617,11 @@ def search_sql( # Pick names for table and rank column that don't clash original = "original_" if self.name == "original" else "original" columns_sql = "*" - columns_with_prefix_sql = "[{}].*".format(original) + columns_with_prefix_sql = f"[{original}].*" if columns: - columns_sql = ",\n ".join("[{}]".format(c) for c in columns) + columns_sql = ",\n ".join(f"[{c}]" for c in columns) columns_with_prefix_sql = ",\n ".join( - "[{}].[{}]".format(original, c) for c in columns + f"[{original}].[{c}]" for c in columns ) fts_table = self.detect_fts() assert fts_table, "Full-text search is not configured for table '{}'".format( @@ -2650,7 +2649,7 @@ def search_sql( """ ).strip() if virtual_table_using == "FTS5": - rank_implementation = "[{}].rank".format(fts_table) + rank_implementation = f"[{fts_table}].rank" else: self.db.register_fts4_bm25() rank_implementation = "rank_bm25(matchinfo([{}], 'pcnalx'))".format( @@ -2660,12 +2659,12 @@ def search_sql( columns_with_prefix_sql += ",\n " + rank_implementation + " rank" limit_offset = "" if limit is not None: - limit_offset += " limit {}".format(limit) + limit_offset += f" limit {limit}" if offset is not None: - limit_offset += " offset {}".format(offset) + limit_offset += f" offset {offset}" return sql.format( dbtable=self.name, - where_clause="\n where {}".format(where) if where else "", + where_clause=f"\n where {where}" if where else "", original=original, columns=columns_sql, columns_with_prefix=columns_with_prefix_sql, @@ -2737,7 +2736,7 @@ def delete(self, pk_values: Union[list, tuple, str, int, float]) -> "Table": if not isinstance(pk_values, (list, tuple)): pk_values = [pk_values] self.get(pk_values) - wheres = ["[{}] = ?".format(pk_name) for pk_name in self.pks] + wheres = [f"[{pk_name}] = ?" for pk_name in self.pks] sql = "delete from [{table}] where {wheres}".format( table=self.name, wheres=" and ".join(wheres) ) @@ -2763,7 +2762,7 @@ def delete_where( """ if not self.exists(): return self - sql = "delete from [{}]".format(self.name) + sql = f"delete from [{self.name}]" if where is not None: sql += " where " + where self.db.execute(sql, where_args or []) @@ -2806,7 +2805,7 @@ def update( for key, value in updates.items(): sets.append("[{}] = {}".format(key, conversions.get(key, "?"))) args.append(jsonify_if_needed(value)) - wheres = ["[{}] = ?".format(pk_name) for pk_name in pks] + wheres = [f"[{pk_name}] = ?" for pk_name in pks] args.extend(pk_values) sql = "update [{table}] set {sets} where {wheres}".format( table=self.name, sets=", ".join(sets), wheres=" and ".join(wheres) @@ -2829,7 +2828,7 @@ def update( def convert( self, - columns: Union[str, List[str]], + columns: Union[str, list[str]], fn: Callable, output: Optional[str] = None, output_type: Optional[Any] = None, @@ -2901,7 +2900,7 @@ def convert_value(v): for column in columns ] ), - where=" where {}".format(where) if where is not None else "", + where=f" where {where}" if where is not None else "", ) with self.db.conn: self.db.execute(sql, where_args or []) @@ -2924,7 +2923,7 @@ def _convert_multi( ) as bar: for row in self.rows_where( select=", ".join( - "[{}]".format(column_name) for column_name in (pks + [column]) + f"[{column_name}]" for column_name in (pks + [column]) ), where=where, where_args=where_args, @@ -3015,7 +3014,7 @@ def build_insert_queries_and_params( placeholders.extend(not_null) sql = "INSERT OR IGNORE INTO [{table}]({cols}) VALUES({placeholders});".format( table=self.name, - cols=", ".join(["[{}]".format(p) for p in placeholders]), + cols=", ".join([f"[{p}]" for p in placeholders]), placeholders=", ".join(["?" for p in placeholders]), ) queries_and_params.append( @@ -3030,7 +3029,7 @@ def build_insert_queries_and_params( "[{}] = {}".format(col, conversions.get(col, "?")) for col in set_cols ), - wheres=" AND ".join("[{}] = ?".format(pk) for pk in pks), + wheres=" AND ".join(f"[{pk}] = ?" for pk in pks), ) queries_and_params.append( ( @@ -3056,7 +3055,7 @@ def build_insert_queries_and_params( """.strip().format( or_what=or_what, table=self.name, - columns=", ".join("[{}]".format(c) for c in all_columns), + columns=", ".join(f"[{c}]" for c in all_columns), rows=", ".join( "({placeholders})".format( placeholders=", ".join( @@ -3167,20 +3166,20 @@ def insert_chunk( def insert( self, - record: Dict[str, Any], + record: dict[str, Any], pk=DEFAULT, foreign_keys=DEFAULT, - column_order: Optional[Union[List[str], Default]] = DEFAULT, + column_order: Optional[Union[list[str], Default]] = DEFAULT, not_null: Optional[Union[Iterable[str], Default]] = DEFAULT, - defaults: Optional[Union[Dict[str, Any], Default]] = DEFAULT, + defaults: Optional[Union[dict[str, Any], Default]] = DEFAULT, hash_id: Optional[Union[str, Default]] = DEFAULT, hash_id_columns: Optional[Union[Iterable[str], Default]] = DEFAULT, alter: Optional[Union[bool, Default]] = DEFAULT, ignore: Optional[Union[bool, Default]] = DEFAULT, replace: Optional[Union[bool, Default]] = DEFAULT, - extracts: Optional[Union[Dict[str, str], List[str], Default]] = DEFAULT, - conversions: Optional[Union[Dict[str, str], Default]] = DEFAULT, - columns: Optional[Union[Dict[str, Any], Default]] = DEFAULT, + extracts: Optional[Union[dict[str, str], list[str], Default]] = DEFAULT, + conversions: Optional[Union[dict[str, str], Default]] = DEFAULT, + columns: Optional[Union[dict[str, Any], Default]] = DEFAULT, strict: Optional[Union[bool, Default]] = DEFAULT, ) -> "Table": """ @@ -3308,12 +3307,12 @@ def insert_all( num_columns = len(first_record.keys()) assert ( num_columns <= SQLITE_MAX_VARS - ), "Rows can have a maximum of {} columns".format(SQLITE_MAX_VARS) + ), f"Rows can have a maximum of {SQLITE_MAX_VARS} columns" batch_size = max(1, min(batch_size, SQLITE_MAX_VARS // num_columns)) self.last_rowid = None self.last_pk = None if truncate and self.exists(): - self.db.execute("DELETE FROM [{}];".format(self.name)) + self.db.execute(f"DELETE FROM [{self.name}];") for chunk in chunks(itertools.chain([first_record], records), batch_size): chunk = list(chunk) num_records_processed += len(chunk) @@ -3447,7 +3446,7 @@ def upsert_all( strict=strict, ) - def add_missing_columns(self, records: Iterable[Dict[str, Any]]) -> "Table": + def add_missing_columns(self, records: Iterable[dict[str, Any]]) -> "Table": needed_columns = suggest_column_types(records) current_columns = {c.lower() for c in self.columns_dict} for col_name, col_type in needed_columns.items(): @@ -3457,16 +3456,16 @@ def add_missing_columns(self, records: Iterable[Dict[str, Any]]) -> "Table": def lookup( self, - lookup_values: Dict[str, Any], - extra_values: Optional[Dict[str, Any]] = None, + lookup_values: dict[str, Any], + extra_values: Optional[dict[str, Any]] = None, pk: Optional[str] = "id", foreign_keys: Optional[ForeignKeysType] = None, - column_order: Optional[List[str]] = None, + column_order: Optional[list[str]] = None, not_null: Optional[Iterable[str]] = None, - defaults: Optional[Dict[str, Any]] = None, - extracts: Optional[Union[Dict[str, str], List[str]]] = None, - conversions: Optional[Dict[str, str]] = None, - columns: Optional[Dict[str, Any]] = None, + defaults: Optional[dict[str, Any]] = None, + extracts: Optional[Union[dict[str, str], list[str]]] = None, + conversions: Optional[dict[str, str]] = None, + columns: Optional[dict[str, Any]] = None, strict: Optional[bool] = False, ): """ @@ -3503,7 +3502,7 @@ def lookup( unique_column_sets = [set(i.columns) for i in self.indexes] if set(lookup_values.keys()) not in unique_column_sets: self.create_index(lookup_values.keys(), unique=True) - wheres = ["[{}] = ?".format(column) for column in lookup_values] + wheres = [f"[{column}] = ?" for column in lookup_values] rows = list( self.rows_where( " and ".join(wheres), [value for _, value in lookup_values.items()] @@ -3544,10 +3543,10 @@ def m2m( self, other_table: Union[str, "Table"], record_or_iterable: Optional[ - Union[Iterable[Dict[str, Any]], Dict[str, Any]] + Union[Iterable[dict[str, Any]], dict[str, Any]] ] = None, pk: Optional[Union[Any, Default]] = DEFAULT, - lookup: Optional[Dict[str, Any]] = None, + lookup: Optional[dict[str, Any]] = None, m2m_table: Optional[str] = None, alter: bool = False, ): @@ -3581,7 +3580,7 @@ def m2m( else: assert record_or_iterable is not None, "Provide lookup= or record, not both" tables = list(sorted([self.name, other_table.name])) - columns = ["{}_id".format(t) for t in tables] + columns = [f"{t}_id" for t in tables] if m2m_table is not None: m2m_table_name = m2m_table else: @@ -3604,7 +3603,7 @@ def m2m( if isinstance(record_or_iterable, Mapping): records = [record_or_iterable] else: - records = cast(List, record_or_iterable) + records = cast(list, record_or_iterable) # Ensure each record exists in other table for record in records: id = other_table.insert( @@ -3612,8 +3611,8 @@ def m2m( ).last_pk m2m_table_obj.insert( { - "{}_id".format(other_table.name): id, - "{}_id".format(self.name): our_id, + f"{other_table.name}_id": id, + f"{self.name}_id": our_id, }, replace=True, ) @@ -3621,8 +3620,8 @@ def m2m( id = other_table.lookup(lookup) m2m_table_obj.insert( { - "{}_id".format(other_table.name): id, - "{}_id".format(self.name): our_id, + f"{other_table.name}_id": id, + f"{self.name}_id": our_id, }, replace=True, ) @@ -3667,19 +3666,19 @@ def truncate(value): return value num_null = db.execute( - "select count(*) from [{}] where [{}] is null".format(table, column) + f"select count(*) from [{table}] where [{column}] is null" ).fetchone()[0] num_blank = db.execute( - "select count(*) from [{}] where [{}] = ''".format(table, column) + f"select count(*) from [{table}] where [{column}] = ''" ).fetchone()[0] num_distinct = db.execute( - "select count(distinct [{}]) from [{}]".format(column, table) + f"select count(distinct [{column}]) from [{table}]" ).fetchone()[0] most_common_results = None least_common_results = None if num_distinct == 1: value = db.execute( - "select [{}] from [{}] limit 1".format(column, table) + f"select [{column}] from [{table}] limit 1" ).fetchone()[0] most_common_results = [(truncate(value), total_rows)] elif num_distinct != total_rows: @@ -3824,7 +3823,7 @@ def drop(self, ignore=False): """ try: - self.db.execute("DROP VIEW [{}]".format(self.name)) + self.db.execute(f"DROP VIEW [{self.name}]") except sqlite3.OperationalError: if not ignore: raise @@ -3852,7 +3851,7 @@ def jsonify_if_needed(value): def resolve_extracts( - extracts: Optional[Union[Dict[str, str], List[str], Tuple[str]]] + extracts: Optional[Union[dict[str, str], list[str], tuple[str]]] ) -> dict: if extracts is None: extracts = {} @@ -3869,7 +3868,7 @@ def validate_column_names(columns): ), "'[' and ']' cannot be used in column names" -def fix_square_braces(records: Iterable[Dict[str, Any]]): +def fix_square_braces(records: Iterable[dict[str, Any]]): for record in records: if any("[" in key or "]" in key for key in record.keys()): yield { diff --git a/sqlite_utils/utils.py b/sqlite_utils/utils.py index 9e9882a9..2f2aaaa4 100644 --- a/sqlite_utils/utils.py +++ b/sqlite_utils/utils.py @@ -9,7 +9,8 @@ import os import sys from . import recipes -from typing import Dict, cast, BinaryIO, Iterable, Optional, Tuple, Type +from typing import Dict, cast, BinaryIO, Optional, Tuple, Type +from collections.abc import Iterable import click @@ -226,7 +227,7 @@ def _extra_key_strategy( elif not extras_key: extras = row.pop(None) # type: ignore raise RowError( - "Row {} contained these extra values: {}".format(row, extras) + f"Row {row} contained these extra values: {extras}" ) else: row[extras_key] = row.pop(None) # type: ignore @@ -236,11 +237,11 @@ def _extra_key_strategy( def rows_from_file( fp: BinaryIO, format: Optional[Format] = None, - dialect: Optional[Type[csv.Dialect]] = None, + dialect: Optional[type[csv.Dialect]] = None, encoding: Optional[str] = None, ignore_extras: Optional[bool] = False, extras_key: Optional[str] = None, -) -> Tuple[Iterable[dict], Format]: +) -> tuple[Iterable[dict], Format]: """ Load a sequence of dictionaries from a file-like object containing one of four different formats. @@ -370,7 +371,7 @@ def wrap(self, iterator: Iterable[dict]) -> Iterable[dict]: yield row @property - def types(self) -> Dict[str, str]: + def types(self) -> dict[str, str]: """ A dictionary mapping column names to their detected types. This can be passed to the ``db[table_name].transform(types=tracker.types)`` method. @@ -461,13 +462,13 @@ def _compile_code(code, imports, variable="value"): body_variants = [code] # If single line and no 'return', try adding the return if "\n" not in code and not code.strip().startswith("return "): - body_variants.insert(0, "return {}".format(code)) + body_variants.insert(0, f"return {code}") code_o = None for variant in body_variants: - new_code = ["def fn({}):".format(variable)] + new_code = [f"def fn({variable}):"] for line in variant.split("\n"): - new_code.append(" {}".format(line)) + new_code.append(f" {line}") try: code_o = compile("\n".join(new_code), "", "exec") break @@ -496,7 +497,7 @@ def chunks(sequence: Iterable, size: int) -> Iterable[Iterable]: yield itertools.chain([item], itertools.islice(iterator, size - 1)) -def hash_record(record: Dict, keys: Optional[Iterable[str]] = None): +def hash_record(record: dict, keys: Optional[Iterable[str]] = None): """ ``record`` should be a Python dictionary. Returns a sha1 hash of the keys and values in that record. diff --git a/tests/test_cli.py b/tests/test_cli.py index 4e564f13..7b548524 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -182,9 +182,9 @@ def test_output_table(db_path, options, expected): db["rows"].insert_all( [ { - "c1": "verb{}".format(i), - "c2": "noun{}".format(i), - "c3": "adjective{}".format(i), + "c1": f"verb{i}", + "c2": f"noun{i}", + "c3": f"adjective{i}", } for i in range(4) ] @@ -614,9 +614,9 @@ def test_optimize(db_path, tables): db[table].insert_all( [ { - "c1": "verb{}".format(i), - "c2": "noun{}".format(i), - "c3": "adjective{}".format(i), + "c1": f"verb{i}", + "c2": f"noun{i}", + "c3": f"adjective{i}", } for i in range(10000) ] @@ -640,9 +640,9 @@ def test_rebuild_fts_fixes_docsize_error(db_path): db = Database(db_path, recursive_triggers=False) records = [ { - "c1": "verb{}".format(i), - "c2": "noun{}".format(i), - "c3": "adjective{}".format(i), + "c1": f"verb{i}", + "c2": f"noun{i}", + "c3": f"adjective{i}", } for i in range(10000) ] @@ -845,7 +845,6 @@ def test_query_json_binary(db_path): "data": { "$base64": True, "encoded": ( - ( "eJzt0c1xAyEMBeC7q1ABHleR3HxNAQrIjmb4M0gelx+RTY7p4N2WBYT0vmufUknH" "8kq5lz5pqRFXsTOl3pYkE/NJnHXoStruJEVjc0mOCyTqq/ZMJnXEZW1Js2ZvRm5U+" "DPKk9hRWqjyvTFx0YfzhT6MpGmN2lR1fzxjyfVMD9dFrS+bnkleMpMam/ZGXgrX1I" @@ -854,7 +853,6 @@ def test_query_json_binary(db_path): "iCnG7Jql7RR3UvFo8jJ4z039dtOkTFmWzL1be9lt8A5II471m6vXy+l0BR/4wAc+8" "IEPfOADH/jABz7wgQ984AMf+MAHPvCBD3zgAx/4wAc+8IEPfOADH/jABz7wgQ984A" "Mf+MAHPvCBD3zgAx/4wAc+8IEPfOADH/jABz7wgQ984PuP7xubBoN9" - ) ), }, } @@ -2093,7 +2091,7 @@ def test_long_csv_column_value(tmpdir): with open(csv_path, "w") as csv_file: long_string = "a" * 131073 csv_file.write("id,text\n") - csv_file.write("1,{}\n".format(long_string)) + csv_file.write(f"1,{long_string}\n") result = CliRunner().invoke( cli.cli, ["insert", db_path, "bigtable", csv_path, "--csv"], @@ -2396,14 +2394,14 @@ def test_load_extension(entrypoint, should_pass, should_fail): for func in should_pass: result = CliRunner().invoke( cli.cli, - ["memory", "select {}()".format(func), "--load-extension", ext], + ["memory", f"select {func}()", "--load-extension", ext], catch_exceptions=False, ) assert result.exit_code == 0 for func in should_fail: result = CliRunner().invoke( cli.cli, - ["memory", "select {}()".format(func), "--load-extension", ext], + ["memory", f"select {func}()", "--load-extension", ext], catch_exceptions=False, ) assert result.exit_code == 1 diff --git a/tests/test_cli_convert.py b/tests/test_cli_convert.py index 53c33bef..3fb2e763 100644 --- a/tests/test_cli_convert.py +++ b/tests/test_cli_convert.py @@ -425,7 +425,7 @@ def test_recipe_jsonsplit(tmpdir, delimiter): ) code = "r.jsonsplit(value)" if delimiter: - code = 'recipes.jsonsplit(value, delimiter="{}")'.format(delimiter) + code = f'recipes.jsonsplit(value, delimiter="{delimiter}")' args = ["convert", db_path, "example", "tags", code] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0, result.output @@ -453,7 +453,7 @@ def test_recipe_jsonsplit_type(fresh_db_and_path, type, expected_array): ) code = "r.jsonsplit(value)" if type: - code = "recipes.jsonsplit(value, type={})".format(type) + code = f"recipes.jsonsplit(value, type={type})" args = ["convert", db_path, "example", "records", code] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0, result.output diff --git a/tests/test_cli_insert.py b/tests/test_cli_insert.py index bf2fa741..95461139 100644 --- a/tests/test_cli_insert.py +++ b/tests/test_cli_insert.py @@ -99,7 +99,7 @@ def test_insert_with_primary_keys(db_path, tmpdir, args, expected_pks): def test_insert_multiple_with_primary_key(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") - dogs = [{"id": i, "name": "Cleo {}".format(i), "age": i + 3} for i in range(1, 21)] + dogs = [{"id": i, "name": f"Cleo {i}", "age": i + 3} for i in range(1, 21)] with open(json_path, "w") as fp: fp.write(json.dumps(dogs)) result = CliRunner().invoke( @@ -114,7 +114,7 @@ def test_insert_multiple_with_primary_key(db_path, tmpdir): def test_insert_multiple_with_compound_primary_key(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") dogs = [ - {"breed": "mixed", "id": i, "name": "Cleo {}".format(i), "age": i + 3} + {"breed": "mixed", "id": i, "name": f"Cleo {i}", "age": i + 3} for i in range(1, 21) ] with open(json_path, "w") as fp: @@ -140,7 +140,7 @@ def test_insert_multiple_with_compound_primary_key(db_path, tmpdir): def test_insert_not_null_default(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") dogs = [ - {"id": i, "name": "Cleo {}".format(i), "age": i + 3, "score": 10} + {"id": i, "name": f"Cleo {i}", "age": i + 3, "score": 10} for i in range(1, 21) ] with open(json_path, "w") as fp: @@ -587,7 +587,7 @@ def try_until(expected): return tries += 1 if tries > 10: - assert False, "Expected {}, got {}".format(expected, rows) + assert False, f"Expected {expected}, got {rows}" time.sleep(tries * 0.1) try_until([{"name": "Azi"}]) diff --git a/tests/test_cli_memory.py b/tests/test_cli_memory.py index ac0a177d..9e3f8ac8 100644 --- a/tests/test_cli_memory.py +++ b/tests/test_cli_memory.py @@ -28,7 +28,7 @@ def test_memory_csv(tmpdir, sql_from, use_stdin): fp.write(content) result = CliRunner().invoke( cli.cli, - ["memory", csv_path, "select * from {}".format(sql_from), "--nl"], + ["memory", csv_path, f"select * from {sql_from}", "--nl"], input=input, ) assert result.exit_code == 0 @@ -53,7 +53,7 @@ def test_memory_tsv(tmpdir, use_stdin): sql_from = "chickens" result = CliRunner().invoke( cli.cli, - ["memory", path, "select * from {}".format(sql_from)], + ["memory", path, f"select * from {sql_from}"], input=input, ) assert result.exit_code == 0, result.output @@ -79,7 +79,7 @@ def test_memory_json(tmpdir, use_stdin): sql_from = "chickens" result = CliRunner().invoke( cli.cli, - ["memory", path, "select * from {}".format(sql_from)], + ["memory", path, f"select * from {sql_from}"], input=input, ) assert result.exit_code == 0, result.output @@ -105,7 +105,7 @@ def test_memory_json_nl(tmpdir, use_stdin): sql_from = "chickens" result = CliRunner().invoke( cli.cli, - ["memory", path, "select * from {}".format(sql_from)], + ["memory", path, f"select * from {sql_from}"], input=input, ) assert result.exit_code == 0, result.output @@ -135,7 +135,7 @@ def test_memory_csv_encoding(tmpdir, use_stdin): CliRunner() .invoke( cli.cli, - ["memory", csv_path, "select * from {}".format(sql_from), "--nl"], + ["memory", csv_path, f"select * from {sql_from}", "--nl"], input=input, ) .exit_code diff --git a/tests/test_column_affinity.py b/tests/test_column_affinity.py index fb8f3407..85872e5a 100644 --- a/tests/test_column_affinity.py +++ b/tests/test_column_affinity.py @@ -41,5 +41,5 @@ def test_column_affinity(column_def, expected_type): @pytest.mark.parametrize("column_def,expected_type", EXAMPLES) def test_columns_dict(fresh_db, column_def, expected_type): - fresh_db.execute("create table foo (col {})".format(column_def)) + fresh_db.execute(f"create table foo (col {column_def})") assert {"col": expected_type} == fresh_db["foo"].columns_dict diff --git a/tests/test_create.py b/tests/test_create.py index 7825198a..a64a4e45 100644 --- a/tests/test_create.py +++ b/tests/test_create.py @@ -691,7 +691,7 @@ def test_bulk_insert_more_than_999_values(fresh_db): "num_columns,should_error", ((900, False), (999, False), (1000, True)) ) def test_error_if_more_than_999_columns(fresh_db, num_columns, should_error): - record = dict([("c{}".format(i), i) for i in range(num_columns)]) + record = {f"c{i}": i for i in range(num_columns)} if should_error: with pytest.raises(AssertionError): fresh_db["big"].insert(record) @@ -711,7 +711,7 @@ def test_columns_not_in_first_record_should_not_cause_batch_to_be_too_large(fres {"c0": "first record"}, # one column in first record -> batch size = 999 # fill out the batch with 99 records with enough columns to exceed THRESHOLD *[ - dict([("c{}".format(i), j) for i in range(extra_columns)]) + {f"c{i}": j for i in range(extra_columns)} for j in range(batch_size - 1) ], ] @@ -890,7 +890,7 @@ def test_insert_memoryview(fresh_db): def test_insert_thousands_using_generator(fresh_db): fresh_db["test"].insert_all( - {"i": i, "word": "word_{}".format(i)} for i in range(10000) + {"i": i, "word": f"word_{i}"} for i in range(10000) ) assert [{"name": "i", "type": "INTEGER"}, {"name": "word", "type": "TEXT"}] == [ {"name": col.name, "type": col.type} for col in fresh_db["test"].columns @@ -902,7 +902,7 @@ def test_insert_thousands_raises_exception_with_extra_columns_after_first_100(fr # https://github.com/simonw/sqlite-utils/issues/139 with pytest.raises(Exception, match="table test has no column named extra"): fresh_db["test"].insert_all( - [{"i": i, "word": "word_{}".format(i)} for i in range(100)] + [{"i": i, "word": f"word_{i}"} for i in range(100)] + [{"i": 101, "extra": "This extra column should cause an exception"}], ) @@ -910,7 +910,7 @@ def test_insert_thousands_raises_exception_with_extra_columns_after_first_100(fr def test_insert_thousands_adds_extra_columns_after_first_100_with_alter(fresh_db): # https://github.com/simonw/sqlite-utils/issues/139 fresh_db["test"].insert_all( - [{"i": i, "word": "word_{}".format(i)} for i in range(100)] + [{"i": i, "word": f"word_{i}"} for i in range(100)] + [{"i": 101, "extra": "Should trigger ALTER"}], alter=True, ) diff --git a/tests/test_default_value.py b/tests/test_default_value.py index 9ffdb144..0c946c29 100644 --- a/tests/test_default_value.py +++ b/tests/test_default_value.py @@ -27,7 +27,7 @@ @pytest.mark.parametrize("column_def,initial_value,expected_value", EXAMPLES) def test_quote_default_value(fresh_db, column_def, initial_value, expected_value): - fresh_db.execute("create table foo (col {})".format(column_def)) + fresh_db.execute(f"create table foo (col {column_def})") assert initial_value == fresh_db["foo"].columns[0].default_value assert expected_value == fresh_db.quote_default_value( fresh_db["foo"].columns[0].default_value diff --git a/tests/test_docs.py b/tests/test_docs.py index a36b0532..659f025e 100644 --- a/tests/test_docs.py +++ b/tests/test_docs.py @@ -34,7 +34,7 @@ def test_commands_are_documented(documented_commands, command): @pytest.mark.parametrize("command", cli.cli.commands.values()) def test_commands_have_help(command): - assert command.help, "{} is missing its help".format(command) + assert command.help, f"{command} is missing its help" def test_convert_help(): diff --git a/tests/test_enable_counts.py b/tests/test_enable_counts.py index d724e80d..29e37480 100644 --- a/tests/test_enable_counts.py +++ b/tests/test_enable_counts.py @@ -8,7 +8,7 @@ def test_enable_counts_specific_table(fresh_db): foo = fresh_db["foo"] assert fresh_db.table_names() == [] for i in range(10): - foo.insert({"name": "item {}".format(i)}) + foo.insert({"name": f"item {i}"}) assert fresh_db.table_names() == ["foo"] assert foo.count == 10 # Now enable counts @@ -44,7 +44,7 @@ def test_enable_counts_specific_table(fresh_db): assert list(fresh_db["_counts"].rows) == [{"count": 10, "table": "foo"}] # Add some items to test the triggers for i in range(5): - foo.insert({"name": "item {}".format(10 + i)}) + foo.insert({"name": f"item {10 + i}"}) assert foo.count == 15 assert list(fresh_db["_counts"].rows) == [{"count": 15, "table": "foo"}] # Delete some items diff --git a/tests/test_extract.py b/tests/test_extract.py index 7a663c56..fb80e87f 100644 --- a/tests/test_extract.py +++ b/tests/test_extract.py @@ -7,13 +7,13 @@ @pytest.mark.parametrize("fk_column", [None, "species"]) def test_extract_single_column(fresh_db, table, fk_column): expected_table = table or "species" - expected_fk = fk_column or "{}_id".format(expected_table) + expected_fk = fk_column or f"{expected_table}_id" iter_species = itertools.cycle(["Palm", "Spruce", "Mangrove", "Oak"]) fresh_db["tree"].insert_all( ( { "id": i, - "name": "Tree {}".format(i), + "name": f"Tree {i}", "species": next(iter_species), "end": 1, } @@ -31,7 +31,7 @@ def test_extract_single_column(fresh_db, table, fk_column): + ")" ) assert fresh_db[expected_table].schema == ( - "CREATE TABLE [{}] (\n".format(expected_table) + f"CREATE TABLE [{expected_table}] (\n" + " [id] INTEGER PRIMARY KEY,\n" " [species] TEXT\n" ")" @@ -57,7 +57,7 @@ def test_extract_multiple_columns_with_rename(fresh_db): ( { "id": i, - "name": "Tree {}".format(i), + "name": f"Tree {i}", "common_name": next(iter_common), "latin_name": next(iter_latin), } diff --git a/tests/test_extracts.py b/tests/test_extracts.py index cca16ba5..f36f6e27 100644 --- a/tests/test_extracts.py +++ b/tests/test_extracts.py @@ -51,7 +51,7 @@ def test_extracts(fresh_db, kwargs, expected_table, use_table_factory): assert [ Index( seq=0, - name="idx_{}_value".format(expected_table), + name=f"idx_{expected_table}_value", unique=1, origin="c", partial=0, diff --git a/tests/test_fts.py b/tests/test_fts.py index a35b9eef..9369ba58 100644 --- a/tests/test_fts.py +++ b/tests/test_fts.py @@ -209,20 +209,20 @@ def test_populate_fts_escape_table_names(fresh_db): @pytest.mark.parametrize("fts_version", ("4", "5")) def test_fts_tokenize(fresh_db, fts_version): - table_name = "searchable_{}".format(fts_version) + table_name = f"searchable_{fts_version}" table = fresh_db[table_name] table.insert_all(search_records) # Test without porter stemming table.enable_fts( ["text", "country"], - fts_version="FTS{}".format(fts_version), + fts_version=f"FTS{fts_version}", ) assert [] == list(table.search("bite")) # Test WITH stemming table.disable_fts() table.enable_fts( ["text", "country"], - fts_version="FTS{}".format(fts_version), + fts_version=f"FTS{fts_version}", tokenize="porter", ) rows = list(table.search("bite", order_by="rowid")) @@ -237,10 +237,10 @@ def test_fts_tokenize(fresh_db, fts_version): def test_optimize_fts(fresh_db): for fts_version in ("4", "5"): - table_name = "searchable_{}".format(fts_version) + table_name = f"searchable_{fts_version}" table = fresh_db[table_name] table.insert_all(search_records) - table.enable_fts(["text", "country"], fts_version="FTS{}".format(fts_version)) + table.enable_fts(["text", "country"], fts_version=f"FTS{fts_version}") # You can call optimize successfully against the tables OR their _fts equivalents: for table_name in ( "searchable_4", @@ -296,12 +296,12 @@ def test_disable_fts(fresh_db, create_triggers): expected_triggers = {"searchable_ai", "searchable_ad", "searchable_au"} else: expected_triggers = set() - assert expected_triggers == set( + assert expected_triggers == { r[0] for r in fresh_db.execute( "select name from sqlite_master where type = 'trigger'" ).fetchall() - ) + } # Now run .disable_fts() and confirm it worked table.disable_fts() assert ( @@ -392,7 +392,7 @@ def test_enable_fts_replace(kwargs): db["books"].enable_fts(**kwargs, replace=True) # Check that the new configuration is correct if should_have_changed_columns: - assert db["books_fts"].columns_dict.keys() == set(["title"]) + assert db["books_fts"].columns_dict.keys() == {"title"} if "create_triggers" in kwargs: assert db["books"].triggers if "fts_version" in kwargs: diff --git a/tests/test_gis.py b/tests/test_gis.py index a4ee75ec..afe414f8 100644 --- a/tests/test_gis.py +++ b/tests/test_gis.py @@ -113,7 +113,7 @@ def test_query_load_extension(use_spatialite_shortcut): [ ":memory:", "select spatialite_version()", - "--load-extension={}".format(load_extension), + f"--load-extension={load_extension}", ], ) assert result.exit_code == 0, result.stdout diff --git a/tests/test_insert_files.py b/tests/test_insert_files.py index 88e49a87..030f80ff 100644 --- a/tests/test_insert_files.py +++ b/tests/test_insert_files.py @@ -44,7 +44,7 @@ def test_insert_files(silent, pk_args, expected_pks): ) cols = [] for coltype in coltypes: - cols += ["-c", "{}:{}".format(coltype, coltype)] + cols += ["-c", f"{coltype}:{coltype}"] result = runner.invoke( cli.cli, ["insert-files", db_path, "files", str(tmpdir)] @@ -167,5 +167,5 @@ def test_insert_files_bad_text_encoding_error(): ) assert result.exit_code == 1, result.output assert result.output.strip().startswith( - "Error: Could not read file '{}' as text".format(str(latin.resolve())) + f"Error: Could not read file '{str(latin.resolve())}' as text" ) diff --git a/tests/test_introspect.py b/tests/test_introspect.py index f6683444..7ac7e548 100644 --- a/tests/test_introspect.py +++ b/tests/test_introspect.py @@ -49,8 +49,8 @@ def test_detect_fts_similar_tables(fresh_db, reverse_order): fresh_db[table2].insert({"title": "Hello"}).enable_fts( ["title"], fts_version="FTS4" ) - assert fresh_db[table1].detect_fts() == "{}_fts".format(table1) - assert fresh_db[table2].detect_fts() == "{}_fts".format(table2) + assert fresh_db[table1].detect_fts() == f"{table1}_fts" + assert fresh_db[table2].detect_fts() == f"{table2}_fts" def test_tables(existing_db): diff --git a/tests/test_m2m.py b/tests/test_m2m.py index 2cb2ca35..4c9c763b 100644 --- a/tests/test_m2m.py +++ b/tests/test_m2m.py @@ -65,8 +65,7 @@ def test_insert_m2m_iterable(fresh_db): iterable_records = ({"id": 1, "name": "Phineas"}, {"id": 2, "name": "Ferb"}) def iterable(): - for record in iterable_records: - yield record + yield from iterable_records platypuses = fresh_db["platypuses"] platypuses.insert({"id": 1, "name": "Perry"}, pk="id").m2m( diff --git a/tests/test_utils.py b/tests/test_utils.py index f728bcd3..a35df058 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -57,7 +57,7 @@ def test_maximize_csv_field_size_limit(): # Reset to default in case other tests have changed it csv.field_size_limit(utils.ORIGINAL_CSV_FIELD_SIZE_LIMIT) long_value = "a" * 131073 - long_csv = "id,text\n1,{}".format(long_value) + long_csv = f"id,text\n1,{long_value}" fp = io.BytesIO(long_csv.encode("utf-8")) # Using rows_from_file should error with pytest.raises(csv.Error): From 9c9fdc37901aa244d7f8fc7484cb11e1a44cd182 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> Date: Sun, 24 Nov 2024 11:53:00 +0200 Subject: [PATCH 3/3] Format with Black, remove unused imports, fix implicitly concatenated string --- docs/tutorial.ipynb | 102 ++++++++++++++++++++++----------------- setup.py | 1 - sqlite_utils/cli.py | 4 +- sqlite_utils/db.py | 45 +++++------------ sqlite_utils/utils.py | 6 +-- tests/test_cli.py | 24 ++++----- tests/test_cli_insert.py | 3 +- tests/test_cli_memory.py | 2 +- tests/test_create.py | 15 ++---- tests/test_extract.py | 3 +- tests/test_extracts.py | 2 +- 11 files changed, 95 insertions(+), 112 deletions(-) diff --git a/docs/tutorial.ipynb b/docs/tutorial.ipynb index 179f0104..2260b1d1 100644 --- a/docs/tutorial.ipynb +++ b/docs/tutorial.ipynb @@ -127,19 +127,21 @@ } ], "source": [ - "db[\"creatures\"].insert_all([{\n", - " \"name\": \"Cleo\",\n", - " \"species\": \"dog\",\n", - " \"age\": 6\n", - "}, {\n", - " \"name\": \"Lila\",\n", - " \"species\": \"chicken\",\n", - " \"age\": 0.8,\n", - "}, {\n", - " \"name\": \"Bants\",\n", - " \"species\": \"chicken\",\n", - " \"age\": 0.8,\n", - "}])" + "db[\"creatures\"].insert_all(\n", + " [\n", + " {\"name\": \"Cleo\", \"species\": \"dog\", \"age\": 6},\n", + " {\n", + " \"name\": \"Lila\",\n", + " \"species\": \"chicken\",\n", + " \"age\": 0.8,\n", + " },\n", + " {\n", + " \"name\": \"Bants\",\n", + " \"species\": \"chicken\",\n", + " \"age\": 0.8,\n", + " },\n", + " ]\n", + ")" ] }, { @@ -341,7 +343,9 @@ } ], "source": [ - "list(db.query(\"select * from creatures where species = :species\", {\"species\": \"chicken\"}))" + "list(\n", + " db.query(\"select * from creatures where species = :species\", {\"species\": \"chicken\"})\n", + ")" ] }, { @@ -506,22 +510,24 @@ } ], "source": [ - "db[\"creatures\"].insert_all([{\n", - " \"id\": 1,\n", - " \"name\": \"Cleo\",\n", - " \"species\": \"dog\",\n", - " \"age\": 6\n", - "}, {\n", - " \"id\": 2,\n", - " \"name\": \"Lila\",\n", - " \"species\": \"chicken\",\n", - " \"age\": 0.8,\n", - "}, {\n", - " \"id\": 3,\n", - " \"name\": \"Bants\",\n", - " \"species\": \"chicken\",\n", - " \"age\": 0.8,\n", - "}], pk=\"id\")" + "db[\"creatures\"].insert_all(\n", + " [\n", + " {\"id\": 1, \"name\": \"Cleo\", \"species\": \"dog\", \"age\": 6},\n", + " {\n", + " \"id\": 2,\n", + " \"name\": \"Lila\",\n", + " \"species\": \"chicken\",\n", + " \"age\": 0.8,\n", + " },\n", + " {\n", + " \"id\": 3,\n", + " \"name\": \"Bants\",\n", + " \"species\": \"chicken\",\n", + " \"age\": 0.8,\n", + " },\n", + " ],\n", + " pk=\"id\",\n", + ")" ] }, { @@ -575,17 +581,23 @@ } ], "source": [ - "table.insert_all([{\n", - " \"id\": 4,\n", - " \"name\": \"Azi\",\n", - " \"species\": \"chicken\",\n", - " \"age\": 0.8,\n", - "}, {\n", - " \"id\": 5,\n", - " \"name\": \"Snowy\",\n", - " \"species\": \"chicken\",\n", - " \"age\": 0.9,\n", - "}], pk=\"id\")" + "table.insert_all(\n", + " [\n", + " {\n", + " \"id\": 4,\n", + " \"name\": \"Azi\",\n", + " \"species\": \"chicken\",\n", + " \"age\": 0.8,\n", + " },\n", + " {\n", + " \"id\": 5,\n", + " \"name\": \"Snowy\",\n", + " \"species\": \"chicken\",\n", + " \"age\": 0.9,\n", + " },\n", + " ],\n", + " pk=\"id\",\n", + ")" ] }, { @@ -1006,7 +1018,9 @@ } ], "source": [ - "list(db.query(\"\"\"\n", + "list(\n", + " db.query(\n", + " \"\"\"\n", " select\n", " creatures.id,\n", " creatures.name,\n", @@ -1015,7 +1029,9 @@ " species.species\n", " from creatures\n", " join species on creatures.species_id = species.id\n", - "\"\"\"))" + "\"\"\"\n", + " )\n", + ")" ] }, { diff --git a/setup.py b/setup.py index 64cee7cb..1ad37396 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,4 @@ from setuptools import setup, find_packages -import io import os VERSION = "3.38" diff --git a/sqlite_utils/cli.py b/sqlite_utils/cli.py index 09eab47d..9eac6260 100644 --- a/sqlite_utils/cli.py +++ b/sqlite_utils/cli.py @@ -2137,9 +2137,7 @@ def search( table_columns = table_obj.columns_dict for c in column: if c not in table_columns: - raise click.ClickException( - f"Table '{dbtable}' has no column '{c}" - ) + raise click.ClickException(f"Table '{dbtable}' has no column '{c}") sql = table_obj.search_sql(columns=column, order_by=order, limit=limit) if show_sql: click.echo(sql) diff --git a/sqlite_utils/db.py b/sqlite_utils/db.py index 981d4930..2e102077 100644 --- a/sqlite_utils/db.py +++ b/sqlite_utils/db.py @@ -28,11 +28,8 @@ cast, Any, Callable, - Dict, Union, Optional, - List, - Tuple, ) from collections.abc import Generator, Iterable import uuid @@ -590,9 +587,7 @@ def quote_fts(self, query: str) -> str: query += '"' bits = _quote_fts_re.split(query) bits = [b for b in bits if b and b != '""'] - return " ".join( - f'"{bit}"' if not bit.startswith('"') else bit for bit in bits - ) + return " ".join(f'"{bit}"' if not bit.startswith('"') else bit for bit in bits) def quote_default_value(self, value: str) -> str: if any( @@ -680,9 +675,7 @@ def supports_strict(self) -> bool: try: table_name = f"t{secrets.token_hex(16)}" with self.conn: - self.conn.execute( - f"create table {table_name} (name text) strict" - ) + self.conn.execute(f"create table {table_name} (name text) strict") self.conn.execute(f"drop table {table_name}") return True except Exception: @@ -907,9 +900,7 @@ def sort_key(p): if fk.other_column != "rowid" and not any( c for c in self[fk.other_table].columns if c.name == fk.other_column ): - raise AlterError( - f"No such column: {fk.other_table}.{fk.other_column}" - ) + raise AlterError(f"No such column: {fk.other_table}.{fk.other_column}") column_defs = [] # ensure pk is a tuple @@ -1592,9 +1583,7 @@ def indexes(self) -> list[Index]: for row in self.db.execute_returning_dicts(sql): index_name = row["name"] index_name_quoted = ( - f'"{index_name}"' - if not index_name.startswith('"') - else index_name + f'"{index_name}"' if not index_name.startswith('"') else index_name ) column_sql = f"PRAGMA index_info({index_name_quoted})" columns = [] @@ -1616,9 +1605,7 @@ def xindexes(self) -> list[XIndex]: for row in self.db.execute_returning_dicts(sql): index_name = row["name"] index_name_quoted = ( - f'"{index_name}"' - if not index_name.startswith('"') - else index_name + f'"{index_name}"' if not index_name.startswith('"') else index_name ) column_sql = f"PRAGMA index_xinfo({index_name_quoted})" index_columns = [] @@ -1971,15 +1958,11 @@ def transform_sql( sqls.append(copy_sql) # Drop (or keep) the old table if keep_table: - sqls.append( - f"ALTER TABLE [{self.name}] RENAME TO [{keep_table}];" - ) + sqls.append(f"ALTER TABLE [{self.name}] RENAME TO [{keep_table}];") else: sqls.append(f"DROP TABLE [{self.name}];") # Rename the new one - sqls.append( - f"ALTER TABLE [{new_table_name}] RENAME TO [{self.name}];" - ) + sqls.append(f"ALTER TABLE [{new_table_name}] RENAME TO [{self.name}];") # Re-add existing indexes for index in self.indexes: if index.origin != "pk": @@ -2152,9 +2135,7 @@ def create_index( suffix = None created_index_name = None while True: - created_index_name = ( - f"{index_name}_{suffix}" if suffix else index_name - ) + created_index_name = f"{index_name}_{suffix}" if suffix else index_name sql = ( textwrap.dedent( """ @@ -2510,9 +2491,7 @@ def populate_fts(self, columns: Iterable[str]) -> "Table": """ ) .strip() - .format( - table=self.name, columns=", ".join(f"[{c}]" for c in columns) - ) + .format(table=self.name, columns=", ".join(f"[{c}]" for c in columns)) ) self.db.executescript(sql) return self @@ -3677,9 +3656,9 @@ def truncate(value): most_common_results = None least_common_results = None if num_distinct == 1: - value = db.execute( - f"select [{column}] from [{table}] limit 1" - ).fetchone()[0] + value = db.execute(f"select [{column}] from [{table}] limit 1").fetchone()[ + 0 + ] most_common_results = [(truncate(value), total_rows)] elif num_distinct != total_rows: if most_common: diff --git a/sqlite_utils/utils.py b/sqlite_utils/utils.py index 2f2aaaa4..106b5ae1 100644 --- a/sqlite_utils/utils.py +++ b/sqlite_utils/utils.py @@ -9,7 +9,7 @@ import os import sys from . import recipes -from typing import Dict, cast, BinaryIO, Optional, Tuple, Type +from typing import cast, BinaryIO, Optional from collections.abc import Iterable import click @@ -226,9 +226,7 @@ def _extra_key_strategy( yield row elif not extras_key: extras = row.pop(None) # type: ignore - raise RowError( - f"Row {row} contained these extra values: {extras}" - ) + raise RowError(f"Row {row} contained these extra values: {extras}") else: row[extras_key] = row.pop(None) # type: ignore yield row diff --git a/tests/test_cli.py b/tests/test_cli.py index 7b548524..ace75c37 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -845,14 +845,14 @@ def test_query_json_binary(db_path): "data": { "$base64": True, "encoded": ( - "eJzt0c1xAyEMBeC7q1ABHleR3HxNAQrIjmb4M0gelx+RTY7p4N2WBYT0vmufUknH" - "8kq5lz5pqRFXsTOl3pYkE/NJnHXoStruJEVjc0mOCyTqq/ZMJnXEZW1Js2ZvRm5U+" - "DPKk9hRWqjyvTFx0YfzhT6MpGmN2lR1fzxjyfVMD9dFrS+bnkleMpMam/ZGXgrX1I" - "/K+5Au3S/9lNQRh0k4Gq/RUz8GiKfsQm+7JLsJ6fTo5JhVG00ZU76kZZkxePx49uI" - "jnpNoJyYlWUsoaSl/CcVATje/Kxu13RANnrHweaH3V5Jh4jvGyKCnxJLiXPKhmW3f" - "iCnG7Jql7RR3UvFo8jJ4z039dtOkTFmWzL1be9lt8A5II471m6vXy+l0BR/4wAc+8" - "IEPfOADH/jABz7wgQ984AMf+MAHPvCBD3zgAx/4wAc+8IEPfOADH/jABz7wgQ984A" - "Mf+MAHPvCBD3zgAx/4wAc+8IEPfOADH/jABz7wgQ984PuP7xubBoN9" + "eJzt0c1xAyEMBeC7q1ABHleR3HxNAQrIjmb4M0gelx+RTY7p4N2WBYT0vmufUknH" + "8kq5lz5pqRFXsTOl3pYkE/NJnHXoStruJEVjc0mOCyTqq/ZMJnXEZW1Js2ZvRm5U+" + "DPKk9hRWqjyvTFx0YfzhT6MpGmN2lR1fzxjyfVMD9dFrS+bnkleMpMam/ZGXgrX1I" + "/K+5Au3S/9lNQRh0k4Gq/RUz8GiKfsQm+7JLsJ6fTo5JhVG00ZU76kZZkxePx49uI" + "jnpNoJyYlWUsoaSl/CcVATje/Kxu13RANnrHweaH3V5Jh4jvGyKCnxJLiXPKhmW3f" + "iCnG7Jql7RR3UvFo8jJ4z039dtOkTFmWzL1be9lt8A5II471m6vXy+l0BR/4wAc+8" + "IEPfOADH/jABz7wgQ984AMf+MAHPvCBD3zgAx/4wAc+8IEPfOADH/jABz7wgQ984A" + "Mf+MAHPvCBD3zgAx/4wAc+8IEPfOADH/jABz7wgQ984PuP7xubBoN9" ), }, } @@ -1171,12 +1171,12 @@ def test_upsert_alter(db_path, tmpdir): # Not null: ( ["name", "text", "--not-null", "name"], - ("CREATE TABLE [t] (\n" " [name] TEXT NOT NULL\n" ")"), + ("CREATE TABLE [t] (\n [name] TEXT NOT NULL\n)"), ), # Default: ( ["age", "integer", "--default", "age", "3"], - ("CREATE TABLE [t] (\n" " [age] INTEGER DEFAULT '3'\n" ")"), + ("CREATE TABLE [t] (\n [age] INTEGER DEFAULT '3'\n)"), ), # Compound primary key ( @@ -2052,7 +2052,7 @@ def test_triggers(tmpdir, extra_args, expected): ), ( ["dogs"], - ("CREATE TABLE [dogs] (\n" " [id] INTEGER,\n" " [name] TEXT\n" ")\n"), + ("CREATE TABLE [dogs] (\n [id] INTEGER,\n [name] TEXT\n)\n"), ), ( ["chickens", "dogs"], @@ -2328,7 +2328,7 @@ def test_rename_table(tmpdir): ) assert result_error.exit_code == 1 assert result_error.output == ( - 'Error: Table "missing" could not be renamed. ' "no such table: missing\n" + 'Error: Table "missing" could not be renamed. no such table: missing\n' ) # And check --ignore works result_error2 = CliRunner().invoke( diff --git a/tests/test_cli_insert.py b/tests/test_cli_insert.py index 95461139..0700a086 100644 --- a/tests/test_cli_insert.py +++ b/tests/test_cli_insert.py @@ -140,8 +140,7 @@ def test_insert_multiple_with_compound_primary_key(db_path, tmpdir): def test_insert_not_null_default(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") dogs = [ - {"id": i, "name": f"Cleo {i}", "age": i + 3, "score": 10} - for i in range(1, 21) + {"id": i, "name": f"Cleo {i}", "age": i + 3, "score": 10} for i in range(1, 21) ] with open(json_path, "w") as fp: fp.write(json.dumps(dogs)) diff --git a/tests/test_cli_memory.py b/tests/test_cli_memory.py index 9e3f8ac8..3cffda52 100644 --- a/tests/test_cli_memory.py +++ b/tests/test_cli_memory.py @@ -118,7 +118,7 @@ def test_memory_json_nl(tmpdir, use_stdin): @pytest.mark.parametrize("use_stdin", (True, False)) def test_memory_csv_encoding(tmpdir, use_stdin): latin1_csv = ( - b"date,name,latitude,longitude\n" b"2020-03-04,S\xe3o Paulo,-23.561,-46.645\n" + b"date,name,latitude,longitude\n2020-03-04,S\xe3o Paulo,-23.561,-46.645\n" ) input = None if use_stdin: diff --git a/tests/test_create.py b/tests/test_create.py index a64a4e45..5e173ae1 100644 --- a/tests/test_create.py +++ b/tests/test_create.py @@ -710,10 +710,7 @@ def test_columns_not_in_first_record_should_not_cause_batch_to_be_too_large(fres records = [ {"c0": "first record"}, # one column in first record -> batch size = 999 # fill out the batch with 99 records with enough columns to exceed THRESHOLD - *[ - {f"c{i}": j for i in range(extra_columns)} - for j in range(batch_size - 1) - ], + *[{f"c{i}": j for i in range(extra_columns)} for j in range(batch_size - 1)], ] try: fresh_db["too_many_columns"].insert_all( @@ -810,7 +807,7 @@ def test_create_index_desc(fresh_db): "select sql from sqlite_master where name='idx_dogs_age_name'" ).fetchone()[0] assert sql == ( - "CREATE INDEX [idx_dogs_age_name]\n" " ON [dogs] ([age] desc, [name])" + "CREATE INDEX [idx_dogs_age_name]\n ON [dogs] ([age] desc, [name])" ) @@ -889,9 +886,7 @@ def test_insert_memoryview(fresh_db): def test_insert_thousands_using_generator(fresh_db): - fresh_db["test"].insert_all( - {"i": i, "word": f"word_{i}"} for i in range(10000) - ) + fresh_db["test"].insert_all({"i": i, "word": f"word_{i}"} for i in range(10000)) assert [{"name": "i", "type": "INTEGER"}, {"name": "word", "type": "TEXT"}] == [ {"name": col.name, "type": col.type} for col in fresh_db["test"].columns ] @@ -1228,7 +1223,7 @@ def test_create_replace(fresh_db): fresh_db["t"].create({"id": int}) # This should not fresh_db["t"].create({"name": str}, replace=True) - assert fresh_db["t"].schema == ("CREATE TABLE [t] (\n" " [name] TEXT\n" ")") + assert fresh_db["t"].schema == ("CREATE TABLE [t] (\n [name] TEXT\n)") @pytest.mark.parametrize( @@ -1352,7 +1347,7 @@ def test_insert_upsert_strict(fresh_db, method_name, strict): def test_create_table_strict(fresh_db, strict): table = fresh_db.create_table("t", {"id": int, "f": float}, strict=strict) assert table.strict == strict or not fresh_db.supports_strict - expected_schema = "CREATE TABLE [t] (\n" " [id] INTEGER,\n" " [f] FLOAT\n" ")" + expected_schema = "CREATE TABLE [t] (\n [id] INTEGER,\n [f] FLOAT\n)" if strict and not fresh_db.supports_strict: return if strict: diff --git a/tests/test_extract.py b/tests/test_extract.py index fb80e87f..adbb0386 100644 --- a/tests/test_extract.py +++ b/tests/test_extract.py @@ -31,8 +31,7 @@ def test_extract_single_column(fresh_db, table, fk_column): + ")" ) assert fresh_db[expected_table].schema == ( - f"CREATE TABLE [{expected_table}] (\n" - + " [id] INTEGER PRIMARY KEY,\n" + f"CREATE TABLE [{expected_table}] (\n [id] INTEGER PRIMARY KEY,\n" " [species] TEXT\n" ")" ) diff --git a/tests/test_extracts.py b/tests/test_extracts.py index f36f6e27..e30b0d66 100644 --- a/tests/test_extracts.py +++ b/tests/test_extracts.py @@ -25,7 +25,7 @@ def test_extracts(fresh_db, kwargs, expected_table, use_table_factory): {"id": 2, "species_id": "Oak"}, {"id": 3, "species_id": "Palm"}, ], - **insert_kwargs + **insert_kwargs, ) # Should now have two tables: Trees and Species assert {expected_table, "Trees"} == set(fresh_db.table_names())