From 470abe5b9e46aa9a89249ea53b52f9ab73315a48 Mon Sep 17 00:00:00 2001 From: Brad Schoening Date: Tue, 24 Sep 2024 16:15:57 -0400 Subject: [PATCH 1/3] Fix spelling issues --- cassandra/__init__.py | 8 ++++---- cassandra/auth.py | 4 ++-- cassandra/cluster.py | 24 ++++++++++++------------ cassandra/connection.py | 12 ++++++------ cassandra/cqlengine/connection.py | 8 ++++---- cassandra/cqlengine/management.py | 2 +- cassandra/cqlengine/models.py | 6 +++--- cassandra/cqlengine/query.py | 10 +++++----- cassandra/cqlengine/statements.py | 4 ++-- cassandra/marshal.py | 8 ++++---- cassandra/metrics.py | 2 +- cassandra/policies.py | 14 +++++++------- cassandra/pool.py | 4 ++-- cassandra/query.py | 4 ++-- cassandra/util.py | 6 +++--- 15 files changed, 58 insertions(+), 58 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a9602a9f88..badefb29de 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -28,7 +28,7 @@ def emit(self, record): class ConsistencyLevel(object): """ - Spcifies how many replicas must respond for an operation to be considered + Specifies how many replicas must respond for an operation to be considered a success. By default, ``ONE`` is used for all operations. """ @@ -247,7 +247,7 @@ def has_checksumming_support(cls, version): class WriteType(object): """ - For usage with :class:`.RetryPolicy`, this describe a type + For usage with :class:`.RetryPolicy`, this describes a type of write operation. """ @@ -272,7 +272,7 @@ class WriteType(object): COUNTER = 3 """ A counter write (for one or multiple partition keys). Such writes should - not be replayed in order to avoid overcount. + not be replayed in order to avoid over counting. """ BATCH_LOG = 4 @@ -283,7 +283,7 @@ class WriteType(object): CAS = 5 """ - A lighweight-transaction write, such as "DELETE ... IF EXISTS". + A lightweight-transaction write, such as "DELETE ... IF EXISTS". """ VIEW = 6 diff --git a/cassandra/auth.py b/cassandra/auth.py index 10200aa387..01c1ba444a 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -77,7 +77,7 @@ class Authenticator(object): 3) When the server indicates that authentication is successful, :meth:`~.on_authentication_success` will be called a token string that - that the server may optionally have sent. + the server may optionally have sent. The exact nature of the negotiation between the client and server is specific to the authentication mechanism configured server-side. @@ -90,7 +90,7 @@ class Authenticator(object): def initial_response(self): """ - Returns an message to send to the server to initiate the SASL handshake. + Returns a message to send to the server to initiate the SASL handshake. :const:`None` may be returned to send an empty message. """ return None diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d5f80290a9..02df46526d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -101,7 +101,7 @@ from cassandra.io.eventletreactor import EventletConnection # PYTHON-1364 # -# At the moment eventlet initialization is chucking AttributeErrors due to it's dependence on pyOpenSSL +# At the moment eventlet initialization is chucking AttributeErrors due to its dependence on pyOpenSSL # and some changes in Python 3.12 which have some knock-on effects there. except (ImportError, AttributeError): EventletConnection = None @@ -174,7 +174,7 @@ def _connection_reduce_fn(val,import_fn): DefaultConnection = conn_class # Forces load of utf8 encoding module to avoid deadlock that occurs -# if code that is being imported tries to import the module in a seperate +# if code that is being imported tries to import the module in a separate # thread. # See http://bugs.python.org/issue10923 "".encode('utf8') @@ -1024,7 +1024,7 @@ def default_retry_policy(self, policy): application_version = '' """ - A string identifiying this application's version to Insights + A string identifying this application's version to Insights """ cloud = None @@ -1154,7 +1154,7 @@ def __init__(self, column_encryption_policy=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as - extablishing connection pools or refreshing metadata. + establishing connection pools or refreshing metadata. Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ @@ -1461,7 +1461,7 @@ def register_user_type(self, keyspace, user_type, klass): for. `klass` should be a class with attributes whose names match the - fields of the user-defined type. The constructor must accepts kwargs + fields of the user-defined type. The constructor must accept kwargs for each of the fields in the UDT. This method should only be called after the type has been created @@ -3168,7 +3168,7 @@ def prepare_on_all_hosts(self, query, excluded_host, keyspace=None): continue if request_id is None: - # the error has already been logged by ResponsFuture + # the error has already been logged by ResponseFuture log.debug("Failed to prepare query for host %s: %r", host, future._errors.get(host)) continue @@ -3965,7 +3965,7 @@ def _handle_status_change(self, event): elif change_type == "DOWN": # Note that there is a slight risk we can receive the event late and thus # mark the host down even though we already had reconnected successfully. - # But it is unlikely, and don't have too much consequence since we'll try reconnecting + # This is unlikely, and will not have much consequence because we'll try reconnecting # right away, so we favor the detection to make the Host.is_up more accurate. if host is not None: # this will be run by the scheduler @@ -4447,7 +4447,7 @@ def _on_speculative_execute(self): # PYTHON-836, the speculative queries must be after # the query is sent from the main thread, otherwise the # query from the main thread may raise NoHostAvailable - # if the _query_plan has been exhausted by the specualtive queries. + # if the _query_plan has been exhausted by the speculative queries. # This also prevents a race condition accessing the iterator. # We reschedule this call until the main thread has succeeded # making a query @@ -4559,7 +4559,7 @@ def warnings(self): Ensure the future is complete before trying to access this property (call :meth:`.result()`, or after callback is invoked). - Otherwise it may throw if the response has not been received. + Otherwise, it may throw if the response has not been received. """ # TODO: When timers are introduced, just make this wait if not self._event.is_set(): @@ -4575,7 +4575,7 @@ def custom_payload(self): Ensure the future is complete before trying to access this property (call :meth:`.result()`, or after callback is invoked). - Otherwise it may throw if the response has not been received. + Otherwise, it may throw if the response has not been received. :return: :ref:`custom_payload`. """ @@ -5285,7 +5285,7 @@ def cancel_continuous_paging(self): try: self.response_future._continuous_paging_session.cancel() except AttributeError: - raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousdPagingOptions.") + raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousPagingOptions.") @property def was_applied(self): @@ -5296,7 +5296,7 @@ def was_applied(self): a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch succeeds or fails. - Only valid when one of the of the internal row factories is in use. + Only valid when one of the internal row factories is in use. """ if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,)) diff --git a/cassandra/connection.py b/cassandra/connection.py index bfe38fc702..4af6b0624a 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -825,7 +825,7 @@ def initialize_reactor(cls): @classmethod def handle_fork(cls): """ - Called after a forking. This should cleanup any remaining reactor state + Called after a forking. This should clean up any remaining reactor state from the parent process. """ pass @@ -862,7 +862,7 @@ def _build_ssl_context_from_options(self): ssl_context_opt_names = ['ssl_version', 'cert_reqs', 'check_hostname', 'keyfile', 'certfile', 'ca_certs', 'ciphers'] opts = {k:self.ssl_options.get(k, None) for k in ssl_context_opt_names if k in self.ssl_options} - # Python >= 3.10 requires either PROTOCOL_TLS_CLIENT or PROTOCOL_TLS_SERVER so we'll get ahead of things by always + # Python >= 3.10 requires either PROTOCOL_TLS_CLIENT or PROTOCOL_TLS_SERVER, so we'll get ahead of things by always # being explicit ssl_version = opts.get('ssl_version', None) or ssl.PROTOCOL_TLS_CLIENT cert_reqs = opts.get('cert_reqs', None) or ssl.CERT_REQUIRED @@ -891,7 +891,7 @@ def _wrap_socket_from_context(self): opts = {k:self.ssl_options.get(k, None) for k in wrap_socket_opt_names if k in self.ssl_options} # PYTHON-1186: set the server_hostname only if the SSLContext has - # check_hostname enabled and it is not already provided by the EndPoint ssl options + # check_hostname enabled, and it is not already provided by the EndPoint ssl options #opts['server_hostname'] = self.endpoint.address if (self.ssl_context.check_hostname and 'server_hostname' not in opts): server_hostname = self.endpoint.address @@ -1210,11 +1210,11 @@ def process_io_buffer(self): if not self._current_frame or pos < self._current_frame.end_pos: if self._is_checksumming_enabled and self._io_buffer.readable_io_bytes(): - # We have a multi-segments message and we need to read more + # We have a multi-segments message, and we need to read more # data to complete the current cql frame continue - # we don't have a complete header yet or we + # we don't have a complete header yet, or we # already saw a header, but we don't have a # complete message yet return @@ -1713,7 +1713,7 @@ def run(self): else: log.debug("Cannot send heartbeat message on connection (%s) to %s", id(connection), connection.endpoint) - # make sure the owner sees this defunt/closed connection + # make sure the owner sees this defunct/closed connection owner.return_connection(connection) self._raise_if_stopped() diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 20ef497a73..60aee8d9e7 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -88,7 +88,7 @@ def from_session(cls, name, session): return instance def setup(self): - """Setup the connection""" + """Set up the connection""" global cluster, session if 'username' in self.cluster_options or 'password' in self.cluster_options: @@ -134,7 +134,7 @@ def setup_session(self): def handle_lazy_connect(self): - # if lazy_connect is False, it means the cluster is setup and ready + # if lazy_connect is False, it means the cluster is set up and ready # No need to acquire the lock if not self.lazy_connect: return @@ -280,7 +280,7 @@ def set_session(s): try: conn = get_connection() except CQLEngineException: - # no default connection set; initalize one + # no default connection set; initialize one register_connection('default', session=s, default=True) conn = get_connection() else: @@ -316,7 +316,7 @@ def setup( retry_connect=False, **kwargs): """ - Setup a the driver connection used by the mapper + Set up the driver connection used by the mapper :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`) :param str default_keyspace: The default keyspace to use diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 6790a117c7..7ca30f9e15 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -525,7 +525,7 @@ def _drop_table(model, connection=None): connection = connection or model._get_connection() - # don't try to delete non existant tables + # don't try to delete non existent tables meta = get_cluster(connection).metadata ks_name = model._get_keyspace() diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index bc00001666..ff57d7c53e 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -183,7 +183,7 @@ def __call__(self, *args, **kwargs): class IfNotExistsDescriptor(object): """ - return a query set descriptor with a if_not_exists flag specified + return a query set descriptor with an if_not_exists flag specified """ def __get__(self, instance, model): if instance: @@ -201,7 +201,7 @@ def __call__(self, *args, **kwargs): class IfExistsDescriptor(object): """ - return a query set descriptor with a if_exists flag specified + return a query set descriptor with an if_exists flag specified """ def __get__(self, instance, model): if instance: @@ -398,7 +398,7 @@ def __init__(self, **values): self._values = {} for name, column in self._columns.items(): # Set default values on instantiation. Thanks to this, we don't have - # to wait anylonger for a call to validate() to have CQLengine set + # to wait any longer for a call to validate() to have CQLengine set # default columns values. column_default = column.get_default() if column.has_default else None value = values.get(name, column_default) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index afc7ceeef6..ee09e70d2d 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -938,7 +938,7 @@ def fetch_size(self, v): def allow_filtering(self): """ - Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key + Enables the (usually) unwise practice of querying on a clustering key without also defining a partition key """ clone = copy.deepcopy(self) clone._allow_filtering = True @@ -1417,7 +1417,7 @@ def update(self): prior to calling this. """ if self.instance is None: - raise CQLEngineException("DML Query intance attribute is None") + raise CQLEngineException("DML Query instance attribute is None") assert type(self.instance) == self.model null_clustering_key = False if len(self.instance._clustering_keys) == 0 else True static_changed_only = True @@ -1429,7 +1429,7 @@ def update(self): updated_columns = set() # get defined fields and their column names for name, col in self.model._columns.items(): - # if clustering key is null, don't include non static columns + # if clustering key is null, don't include non-static columns if null_clustering_key and not col.static and not col.partition_key: continue if not col.is_primary_key: @@ -1448,7 +1448,7 @@ def update(self): if statement.assignments: for name, col in self.model._primary_keys.items(): - # only include clustering key if clustering key is not null, and non static columns are changed to avoid cql error + # only include clustering key if clustering key is not null, and non-static columns are changed to avoid cql error if (null_clustering_key or static_changed_only) and (not col.partition_key): continue statement.add_where(col, EqualsOperator(), getattr(self.instance, name)) @@ -1468,7 +1468,7 @@ def save(self): prior to calling this. """ if self.instance is None: - raise CQLEngineException("DML Query intance attribute is None") + raise CQLEngineException("DML Query instance attribute is None") assert type(self.instance) == self.model nulled_fields = set() diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index d92d0b2452..75e4426a5f 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -550,7 +550,7 @@ def get_context(self): def add_conditional_clause(self, clause): """ - Adds a iff clause to this statement + Adds an iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: ConditionalClause @@ -575,7 +575,7 @@ def update_context_id(self, i): @property def timestamp_normalized(self): """ - we're expecting self.timestamp to be either a long, int, a datetime, or a timedelta + We're expecting self.timestamp to be either a long, int, a datetime, or a timedelta :return: """ if not self.timestamp: diff --git a/cassandra/marshal.py b/cassandra/marshal.py index a527a9e1d7..c60944b6c9 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -122,8 +122,8 @@ def vints_pack(values): num_extra_bytes = 0 num_bits = v.bit_length() # We need to reserve (num_extra_bytes+1) bits in the first byte - # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved - # ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved + # i.e. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved + # i.e. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved reserved_bits = num_extra_bytes + 1 while num_bits > (8-(reserved_bits)): num_extra_bytes += 1 @@ -167,8 +167,8 @@ def uvint_pack(val): num_extra_bytes = 0 num_bits = v.bit_length() # We need to reserve (num_extra_bytes+1) bits in the first byte - # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved - # ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved + # i.e. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved + # i.e. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved reserved_bits = num_extra_bytes + 1 while num_bits > (8-(reserved_bits)): num_extra_bytes += 1 diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 223b0c7c6e..d3fe4a6fc3 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -185,7 +185,7 @@ def get_stats(self): def set_stats_name(self, stats_name): """ Set the metrics stats name. - The stats_name is a string used to access the metris through scales: scales.getStats()[] + The stats_name is a string used to access the metrics through scales: scales.getStats()[] Default is 'cassandra-'. """ diff --git a/cassandra/policies.py b/cassandra/policies.py index c60e558465..2357639c48 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -27,7 +27,7 @@ # This is done this way because WriteType was originally # defined here and in order not to break the API. -# It may removed in the next mayor. +# It may be removed in the next major. WriteType = WT from cassandra import ConsistencyLevel, OperationTimedOut @@ -126,7 +126,7 @@ def populate(self, cluster, hosts): def make_query_plan(self, working_keyspace=None, query=None): """ - Given a :class:`~.query.Statement` instance, return a iterable + Given a :class:`~.query.Statement` instance, return an iterable of :class:`.Host` instances which should be queried in that order. A generator may work well for custom implementations of this method. @@ -809,8 +809,8 @@ def on_write_timeout(self, query, consistency, write_type, `retry_num` counts how many times the operation has been retried, so the first time this method is called, `retry_num` will be 0. - By default, failed write operations will retried at most once, and - they will only be retried if the `write_type` was + By default, a failed write operations will be retried at most once, and + will only be retried if the `write_type` was :attr:`~.WriteType.BATCH_LOG`. """ if retry_num != 0: @@ -907,7 +907,7 @@ class DowngradingConsistencyRetryPolicy(RetryPolicy): policy unless you have understood the cases where this can happen and are ok with that. It is also recommended to subclass this class so that queries that required a consistency level downgrade can be - recorded (so that repairs can be made later, etc). + recorded (so that repairs can be made later, etc.). This policy implements the same retries as :class:`.RetryPolicy`, but on top of that, it also retries in the following cases: @@ -1006,7 +1006,7 @@ class AddressTranslator(object): The driver discovers nodes using server metadata and topology change events. Normally, the endpoint defined by the server is the right way to connect to a node. In some environments, these addresses may not be reachable, or not preferred (public vs. private IPs in cloud environments, - suboptimal routing, etc). This interface allows for translating from server defined endpoints to + suboptimal routing, etc.). This interface allows for translating from server defined endpoints to preferred addresses for driver connections. *Note:* :attr:`~Cluster.contact_points` provided while creating the :class:`~.Cluster` instance are not @@ -1036,7 +1036,7 @@ def translate(self, addr): Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter. """ - # get family of this address so we translate to the same + # get family of this address, so we translate to the same family = socket.getaddrinfo(addr, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][0] host = socket.getfqdn(addr) for a in socket.getaddrinfo(host, 0, family, socket.SOCK_STREAM): diff --git a/cassandra/pool.py b/cassandra/pool.py index d61e81cd0d..2ffe66e7af 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -666,7 +666,7 @@ def borrow_connection(self, timeout): # wait_for_conn will increment in_flight on the conn least_busy, request_id = self._wait_for_conn(timeout) - # if we have too many requests on this connection but we still + # if we have too many requests on this connection, but we still # have space to open a new connection against this host, go ahead # and schedule the creation of a new connection if least_busy.in_flight >= max_reqs and len(self._connections) < max_conns: @@ -716,7 +716,7 @@ def _add_conn_if_under_max(self): with self._lock: new_connections = self._connections[:] + [conn] self._connections = new_connections - log.debug("Added new connection (%s) to pool for host %s, signaling availablility", + log.debug("Added new connection (%s) to pool for host %s, signaling availability", id(conn), self.host) self._signal_available_conn() return True diff --git a/cassandra/query.py b/cassandra/query.py index e29c2a3113..4a732df275 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -990,7 +990,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): This can be used to query events from partial sessions. `query_cl` specifies a consistency level to use for polling the trace tables, - if it should be different than the session default. + if different from the session default. """ attempt = 0 start = time.time() @@ -1014,7 +1014,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): if is_complete: log.debug("Fetched trace info for trace ID: %s", self.trace_id) else: - log.debug("Fetching parital trace info for trace ID: %s", self.trace_id) + log.debug("Fetching partial trace info for trace ID: %s", self.trace_id) self.request_type = session_row.request self.duration = timedelta(microseconds=session_row.duration) if is_complete else None diff --git a/cassandra/util.py b/cassandra/util.py index 9c07339759..f069c439f7 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -640,7 +640,7 @@ def _find_insertion(self, x): # could not compare a[mid] with x # start scanning to find insertion point while swallowing type errors lo = 0 - compared_one = False # flag is used to determine whether uncomparables are grouped at the front or back + compared_one = False # flag is used to determine whether un-comparables are grouped at the front or back while lo < hi: try: if a[lo] == x or a[lo] >= x: break @@ -1393,7 +1393,7 @@ def _round_to_precision(cls, ms, precision, default_dt): if precision_idx <= cls._to_int(DateRangePrecision.MINUTE): replace_kwargs['second'] = default_dt.second if precision_idx <= cls._to_int(DateRangePrecision.SECOND): - # truncate to nearest 1000 so we deal in ms, not us + # truncate to nearest 1000, so we deal in ms, not us replace_kwargs['microsecond'] = (default_dt.microsecond // 1000) * 1000 if precision_idx == cls._to_int(DateRangePrecision.MILLISECOND): replace_kwargs['microsecond'] = int(round(dt.microsecond, -3)) @@ -1402,7 +1402,7 @@ def _round_to_precision(cls, ms, precision, default_dt): @classmethod def round_up_to_precision(cls, ms, precision): # PYTHON-912: this is the only case in which we can't take as upper bound - # datetime.datetime.max because the month from ms may be February and we'd + # datetime.datetime.max because the month from ms may be February, and we'd # be setting 31 as the month day if precision == cls.MONTH: date_ms = utc_datetime_from_ms_timestamp(ms) From d39c04383ed3bf75cdb67ac39e535614399bc116 Mon Sep 17 00:00:00 2001 From: Brad Schoening Date: Wed, 25 Sep 2024 17:20:36 -0400 Subject: [PATCH 2/3] additional mispelling corrections in tests --- tests/integration/advanced/graph/test_graph_query.py | 2 +- tests/integration/advanced/test_spark.py | 2 +- .../cqlengine/connections/test_connection.py | 2 +- tests/integration/standard/test_metadata.py | 8 ++++---- tests/integration/standard/test_metrics.py | 8 ++++---- .../integration/standard/test_prepared_statements.py | 12 ++++++------ tests/unit/test_policies.py | 4 ++-- tests/unit/test_resultset.py | 4 ++-- 8 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 0c889938d8..9a4bfb5897 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -55,7 +55,7 @@ def test_consistency_passing(self): cl_attrs = ('graph_read_consistency_level', 'graph_write_consistency_level') # Iterates over the graph options and constructs an array containing - # The graph_options that correlate to graoh read and write consistency levels + # The graph_options that correlate to graph read and write consistency levels graph_params = [a[2] for a in _graph_options if a[0] in cl_attrs] s = self.session diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py index a307913abb..c926e9f077 100644 --- a/tests/integration/advanced/test_spark.py +++ b/tests/integration/advanced/test_spark.py @@ -42,7 +42,7 @@ def test_spark_analytic_query(self): self.session.execute_graph(ClassicGraphFixtures.classic()) spark_master = find_spark_master(self.session) - # Run multipltle times to ensure we don't round robin + # Run multiple times to ensure we don't round robin for i in range(3): to_run = SimpleGraphStatement("g.V().count()") rs = self.session.execute_graph(to_run, execution_profile=EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 92b6992573..5d6fb248d6 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -102,7 +102,7 @@ def tearDown(self): def test_connection_session_switch(self): """ Test to ensure that when the default keyspace is changed in a session and that session, - is set in the connection class, that the new defaul keyspace is honored. + is set in the connection class, that the new default keyspace is honored. @since 3.1 @jira_ticket PYTHON-486 diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 30db02fbd8..cd9b4bb39b 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -669,11 +669,11 @@ def test_refresh_table_metadata(self): """ test for synchronously refreshing table metadata - test_refresh_table_metatadata tests that table metadata is refreshed when calling test_refresh_table_metatadata(). + test_refresh_table_metadata tests that table metadata is refreshed when calling test_refresh_table_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then alters the table, adding a new column, using the first cluster object, and verifies that the table metadata has not changed in the second cluster object. Finally, it calls - test_refresh_table_metatadata() and verifies that the table metadata is updated in the second cluster object. + test_refresh_table_metadata() and verifies that the table metadata is updated in the second cluster object. @since 2.6.0 @jira_ticket PYTHON-291 @@ -703,10 +703,10 @@ def test_refresh_metadata_for_mv(self): test for synchronously refreshing materialized view metadata test_refresh_table_metadata_for_materialized_views tests that materialized view metadata is refreshed when calling - test_refresh_table_metatadata() with the materialized view name as the table. It creates a second cluster object + test_refresh_table_metadata() with the materialized view name as the table. It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then creates a new materialized view , using the first cluster object, and verifies that the materialized view - metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metatadata() with the + metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metadata() with the materialized view name as the table name, and verifies that the materialized view metadata is updated in the second cluster object. diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index ddc1091dc6..12c13a76fa 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -276,7 +276,7 @@ def test_duplicate_metrics_per_cluster(self): session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True) session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True) - # Basic validation that naming metrics doesn't impact their segration or accuracy + # Basic validation that naming metrics doesn't impact their segregation or accuracy for i in range(10): query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) session2.execute(query) @@ -370,7 +370,7 @@ def test_metrics_per_cluster(self): check to ensure that on_success and on_error methods are invoked appropriately. @since 3.7.0 @jira_ticket PYTHON-284 - @expected_result in_error, and on_success should be invoked apropriately + @expected_result in_error, and on_success should be invoked appropriately @test_category metrics """ @@ -381,7 +381,7 @@ def test_metrics_per_cluster(self): for _ in range(3): try: - self.session.execute("nonesense") + self.session.execute("nonsense") except SyntaxException: continue @@ -398,6 +398,6 @@ def test_metrics_per_cluster(self): RequestAnalyzer(self.session, throw_on_success=True) try: - self.session.execute("nonesense") + self.session.execute("nonsense") except SyntaxException: pass diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index a643b19c07..c25f8b90d2 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -241,7 +241,7 @@ def test_unset_values(self): @since 2.6.0 @jira_ticket PYTHON-317 - @expected_result UNSET_VALUE is implicitly added to bind parameters, and properly encoded, leving unset values unaffected. + @expected_result UNSET_VALUE is implicitly added to bind parameters, and properly encoded, leaving unset values unaffected. @test_category prepared_statements:binding """ @@ -464,7 +464,7 @@ def test_prepared_id_is_update(self): @since 3.12 @jira_ticket PYTHON-808 - The query id from the prepared statment must have changed + The query id from the prepared statement must have changed """ prepared_statement = self.session.prepare("SELECT * from {} WHERE a = ?".format(self.table_name)) id_before = prepared_statement.result_metadata_id @@ -552,7 +552,7 @@ def test_not_reprepare_invalid_statements(self): def test_id_is_not_updated_conditional_v4(self): """ Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements + result_metadata_id are updated correctly in conditional statements in protocol V4 @since 3.13 @@ -567,7 +567,7 @@ def test_id_is_not_updated_conditional_v4(self): def test_id_is_not_updated_conditional_v5(self): """ Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements + result_metadata_id are updated correctly in conditional statements in protocol V5 @since 3.13 @jira_ticket PYTHON-847 @@ -581,7 +581,7 @@ def test_id_is_not_updated_conditional_v5(self): def test_id_is_not_updated_conditional_dsev1(self): """ Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements + result_metadata_id are updated correctly in conditional statements in protocol DSE V1 @since 3.13 @@ -596,7 +596,7 @@ def test_id_is_not_updated_conditional_dsev1(self): def test_id_is_not_updated_conditional_dsev2(self): """ Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements + result_metadata_id are updated correctly in conditional statements in protocol DSE V2 @since 3.13 diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index e1bd17a00c..9c60e58760 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -303,7 +303,7 @@ def test_modification_during_generation(self): policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3) policy.populate(Mock(), hosts) - # The general concept here is to change thee internal state of the + # The general concept here is to change the internal state of the # policy during plan generation. In this case we use a grey-box # approach that changes specific things during known phases of the # generator. @@ -1196,7 +1196,7 @@ def test_write_timeout(self): query=None, consistency=ONE, write_type=write_type, required_responses=1, received_responses=2, retry_num=0) self.assertEqual(retry, RetryPolicy.IGNORE) - # retrhow if we can't be sure we have a replica + # rethrow if we can't be sure we have a replica retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=write_type, required_responses=1, received_responses=0, retry_num=0) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 7ff6352394..e3a79c3c0a 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -120,7 +120,7 @@ def test_index_list_mode(self): # index access before iteration causes list to be materialized self.assertEqual(rs[0], expected[0]) - # resusable iteration + # reusable iteration self.assertListEqual(list(rs), expected) self.assertListEqual(list(rs), expected) @@ -135,7 +135,7 @@ def test_index_list_mode(self): # index access before iteration causes list to be materialized self.assertEqual(rs[0], expected[0]) self.assertEqual(rs[9], expected[9]) - # resusable iteration + # reusable iteration self.assertListEqual(list(rs), expected) self.assertListEqual(list(rs), expected) From 3e3b2d326bfe6f02ef65d17a5e457b3264189fd6 Mon Sep 17 00:00:00 2001 From: Brad Schoening Date: Thu, 26 Sep 2024 09:27:56 -0400 Subject: [PATCH 3/3] fixed merge error --- tests/integration/advanced/test_spark.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py index c926e9f077..3f0d04d892 100644 --- a/tests/integration/advanced/test_spark.py +++ b/tests/integration/advanced/test_spark.py @@ -42,7 +42,7 @@ def test_spark_analytic_query(self): self.session.execute_graph(ClassicGraphFixtures.classic()) spark_master = find_spark_master(self.session) - # Run multiple times to ensure we don't round robin + # Run multiple times to ensure we don't round-robin for i in range(3): to_run = SimpleGraphStatement("g.V().count()") rs = self.session.execute_graph(to_run, execution_profile=EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT)