Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix spelling issues in python code comments and messages #1231

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions cassandra/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def emit(self, record):

class ConsistencyLevel(object):
"""
Spcifies how many replicas must respond for an operation to be considered
Specifies how many replicas must respond for an operation to be considered
a success. By default, ``ONE`` is used for all operations.
"""

Expand Down Expand Up @@ -247,7 +247,7 @@ def has_checksumming_support(cls, version):

class WriteType(object):
"""
For usage with :class:`.RetryPolicy`, this describe a type
For usage with :class:`.RetryPolicy`, this describes a type
of write operation.
"""

Expand All @@ -272,7 +272,7 @@ class WriteType(object):
COUNTER = 3
"""
A counter write (for one or multiple partition keys). Such writes should
not be replayed in order to avoid overcount.
not be replayed in order to avoid over counting.
"""

BATCH_LOG = 4
Expand All @@ -283,7 +283,7 @@ class WriteType(object):

CAS = 5
"""
A lighweight-transaction write, such as "DELETE ... IF EXISTS".
A lightweight-transaction write, such as "DELETE ... IF EXISTS".
"""

VIEW = 6
Expand Down
4 changes: 2 additions & 2 deletions cassandra/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class Authenticator(object):

3) When the server indicates that authentication is successful,
:meth:`~.on_authentication_success` will be called a token string that
that the server may optionally have sent.
the server may optionally have sent.

The exact nature of the negotiation between the client and server is specific
to the authentication mechanism configured server-side.
Expand All @@ -90,7 +90,7 @@ class Authenticator(object):

def initial_response(self):
"""
Returns an message to send to the server to initiate the SASL handshake.
Returns a message to send to the server to initiate the SASL handshake.
:const:`None` may be returned to send an empty message.
"""
return None
Expand Down
24 changes: 12 additions & 12 deletions cassandra/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@
from cassandra.io.eventletreactor import EventletConnection
# PYTHON-1364
#
# At the moment eventlet initialization is chucking AttributeErrors due to it's dependence on pyOpenSSL
# At the moment eventlet initialization is chucking AttributeErrors due to its dependence on pyOpenSSL
# and some changes in Python 3.12 which have some knock-on effects there.
except (ImportError, AttributeError):
EventletConnection = None
Expand Down Expand Up @@ -174,7 +174,7 @@ def _connection_reduce_fn(val,import_fn):
DefaultConnection = conn_class

# Forces load of utf8 encoding module to avoid deadlock that occurs
# if code that is being imported tries to import the module in a seperate
# if code that is being imported tries to import the module in a separate
# thread.
# See http://bugs.python.org/issue10923
"".encode('utf8')
Expand Down Expand Up @@ -1024,7 +1024,7 @@ def default_retry_policy(self, policy):

application_version = ''
"""
A string identifiying this application's version to Insights
A string identifying this application's version to Insights
"""

cloud = None
Expand Down Expand Up @@ -1154,7 +1154,7 @@ def __init__(self,
column_encryption_policy=None):
"""
``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as
extablishing connection pools or refreshing metadata.
establishing connection pools or refreshing metadata.

Any of the mutable Cluster attributes may be set as keyword arguments to the constructor.
"""
Expand Down Expand Up @@ -1461,7 +1461,7 @@ def register_user_type(self, keyspace, user_type, klass):
for.

`klass` should be a class with attributes whose names match the
fields of the user-defined type. The constructor must accepts kwargs
fields of the user-defined type. The constructor must accept kwargs
for each of the fields in the UDT.

This method should only be called after the type has been created
Expand Down Expand Up @@ -3168,7 +3168,7 @@ def prepare_on_all_hosts(self, query, excluded_host, keyspace=None):
continue

if request_id is None:
# the error has already been logged by ResponsFuture
# the error has already been logged by ResponseFuture
log.debug("Failed to prepare query for host %s: %r",
host, future._errors.get(host))
continue
Expand Down Expand Up @@ -3965,7 +3965,7 @@ def _handle_status_change(self, event):
elif change_type == "DOWN":
# Note that there is a slight risk we can receive the event late and thus
# mark the host down even though we already had reconnected successfully.
# But it is unlikely, and don't have too much consequence since we'll try reconnecting
# This is unlikely, and will not have much consequence because we'll try reconnecting
# right away, so we favor the detection to make the Host.is_up more accurate.
if host is not None:
# this will be run by the scheduler
Expand Down Expand Up @@ -4447,7 +4447,7 @@ def _on_speculative_execute(self):
# PYTHON-836, the speculative queries must be after
# the query is sent from the main thread, otherwise the
# query from the main thread may raise NoHostAvailable
# if the _query_plan has been exhausted by the specualtive queries.
# if the _query_plan has been exhausted by the speculative queries.
# This also prevents a race condition accessing the iterator.
# We reschedule this call until the main thread has succeeded
# making a query
Expand Down Expand Up @@ -4559,7 +4559,7 @@ def warnings(self):

Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the response has not been received.
Otherwise, it may throw if the response has not been received.
"""
# TODO: When timers are introduced, just make this wait
if not self._event.is_set():
Expand All @@ -4575,7 +4575,7 @@ def custom_payload(self):

Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the response has not been received.
Otherwise, it may throw if the response has not been received.

:return: :ref:`custom_payload`.
"""
Expand Down Expand Up @@ -5285,7 +5285,7 @@ def cancel_continuous_paging(self):
try:
self.response_future._continuous_paging_session.cancel()
except AttributeError:
raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousdPagingOptions.")
raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousPagingOptions.")

@property
def was_applied(self):
Expand All @@ -5296,7 +5296,7 @@ def was_applied(self):
a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch
succeeds or fails.

Only valid when one of the of the internal row factories is in use.
Only valid when one of the internal row factories is in use.
"""
if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory):
raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,))
Expand Down
12 changes: 6 additions & 6 deletions cassandra/connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -825,7 +825,7 @@ def initialize_reactor(cls):
@classmethod
def handle_fork(cls):
"""
Called after a forking. This should cleanup any remaining reactor state
Called after a forking. This should clean up any remaining reactor state
from the parent process.
"""
pass
Expand Down Expand Up @@ -862,7 +862,7 @@ def _build_ssl_context_from_options(self):
ssl_context_opt_names = ['ssl_version', 'cert_reqs', 'check_hostname', 'keyfile', 'certfile', 'ca_certs', 'ciphers']
opts = {k:self.ssl_options.get(k, None) for k in ssl_context_opt_names if k in self.ssl_options}

# Python >= 3.10 requires either PROTOCOL_TLS_CLIENT or PROTOCOL_TLS_SERVER so we'll get ahead of things by always
# Python >= 3.10 requires either PROTOCOL_TLS_CLIENT or PROTOCOL_TLS_SERVER, so we'll get ahead of things by always
# being explicit
ssl_version = opts.get('ssl_version', None) or ssl.PROTOCOL_TLS_CLIENT
cert_reqs = opts.get('cert_reqs', None) or ssl.CERT_REQUIRED
Expand Down Expand Up @@ -891,7 +891,7 @@ def _wrap_socket_from_context(self):
opts = {k:self.ssl_options.get(k, None) for k in wrap_socket_opt_names if k in self.ssl_options}

# PYTHON-1186: set the server_hostname only if the SSLContext has
# check_hostname enabled and it is not already provided by the EndPoint ssl options
# check_hostname enabled, and it is not already provided by the EndPoint ssl options
#opts['server_hostname'] = self.endpoint.address
if (self.ssl_context.check_hostname and 'server_hostname' not in opts):
server_hostname = self.endpoint.address
Expand Down Expand Up @@ -1210,11 +1210,11 @@ def process_io_buffer(self):

if not self._current_frame or pos < self._current_frame.end_pos:
if self._is_checksumming_enabled and self._io_buffer.readable_io_bytes():
# We have a multi-segments message and we need to read more
# We have a multi-segments message, and we need to read more
# data to complete the current cql frame
continue

# we don't have a complete header yet or we
# we don't have a complete header yet, or we
# already saw a header, but we don't have a
# complete message yet
return
Expand Down Expand Up @@ -1713,7 +1713,7 @@ def run(self):
else:
log.debug("Cannot send heartbeat message on connection (%s) to %s",
id(connection), connection.endpoint)
# make sure the owner sees this defunt/closed connection
# make sure the owner sees this defunct/closed connection
owner.return_connection(connection)
self._raise_if_stopped()

Expand Down
8 changes: 4 additions & 4 deletions cassandra/cqlengine/connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def from_session(cls, name, session):
return instance

def setup(self):
"""Setup the connection"""
"""Set up the connection"""
global cluster, session

if 'username' in self.cluster_options or 'password' in self.cluster_options:
Expand Down Expand Up @@ -134,7 +134,7 @@ def setup_session(self):

def handle_lazy_connect(self):

# if lazy_connect is False, it means the cluster is setup and ready
# if lazy_connect is False, it means the cluster is set up and ready
# No need to acquire the lock
if not self.lazy_connect:
return
Expand Down Expand Up @@ -280,7 +280,7 @@ def set_session(s):
try:
conn = get_connection()
except CQLEngineException:
# no default connection set; initalize one
# no default connection set; initialize one
register_connection('default', session=s, default=True)
conn = get_connection()
else:
Expand Down Expand Up @@ -316,7 +316,7 @@ def setup(
retry_connect=False,
**kwargs):
"""
Setup a the driver connection used by the mapper
Set up the driver connection used by the mapper

:param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`)
:param str default_keyspace: The default keyspace to use
Expand Down
2 changes: 1 addition & 1 deletion cassandra/cqlengine/management.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,7 @@ def _drop_table(model, connection=None):

connection = connection or model._get_connection()

# don't try to delete non existant tables
# don't try to delete non existent tables
meta = get_cluster(connection).metadata

ks_name = model._get_keyspace()
Expand Down
6 changes: 3 additions & 3 deletions cassandra/cqlengine/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def __call__(self, *args, **kwargs):

class IfNotExistsDescriptor(object):
"""
return a query set descriptor with a if_not_exists flag specified
return a query set descriptor with an if_not_exists flag specified
"""
def __get__(self, instance, model):
if instance:
Expand All @@ -201,7 +201,7 @@ def __call__(self, *args, **kwargs):

class IfExistsDescriptor(object):
"""
return a query set descriptor with a if_exists flag specified
return a query set descriptor with an if_exists flag specified
"""
def __get__(self, instance, model):
if instance:
Expand Down Expand Up @@ -398,7 +398,7 @@ def __init__(self, **values):
self._values = {}
for name, column in self._columns.items():
# Set default values on instantiation. Thanks to this, we don't have
# to wait anylonger for a call to validate() to have CQLengine set
# to wait any longer for a call to validate() to have CQLengine set
# default columns values.
column_default = column.get_default() if column.has_default else None
value = values.get(name, column_default)
Expand Down
10 changes: 5 additions & 5 deletions cassandra/cqlengine/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -938,7 +938,7 @@ def fetch_size(self, v):

def allow_filtering(self):
"""
Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key
Enables the (usually) unwise practice of querying on a clustering key without also defining a partition key
"""
clone = copy.deepcopy(self)
clone._allow_filtering = True
Expand Down Expand Up @@ -1417,7 +1417,7 @@ def update(self):
prior to calling this.
"""
if self.instance is None:
raise CQLEngineException("DML Query intance attribute is None")
raise CQLEngineException("DML Query instance attribute is None")
assert type(self.instance) == self.model
null_clustering_key = False if len(self.instance._clustering_keys) == 0 else True
static_changed_only = True
Expand All @@ -1429,7 +1429,7 @@ def update(self):
updated_columns = set()
# get defined fields and their column names
for name, col in self.model._columns.items():
# if clustering key is null, don't include non static columns
# if clustering key is null, don't include non-static columns
if null_clustering_key and not col.static and not col.partition_key:
continue
if not col.is_primary_key:
Expand All @@ -1448,7 +1448,7 @@ def update(self):

if statement.assignments:
for name, col in self.model._primary_keys.items():
# only include clustering key if clustering key is not null, and non static columns are changed to avoid cql error
# only include clustering key if clustering key is not null, and non-static columns are changed to avoid cql error
if (null_clustering_key or static_changed_only) and (not col.partition_key):
continue
statement.add_where(col, EqualsOperator(), getattr(self.instance, name))
Expand All @@ -1468,7 +1468,7 @@ def save(self):
prior to calling this.
"""
if self.instance is None:
raise CQLEngineException("DML Query intance attribute is None")
raise CQLEngineException("DML Query instance attribute is None")
assert type(self.instance) == self.model

nulled_fields = set()
Expand Down
4 changes: 2 additions & 2 deletions cassandra/cqlengine/statements.py
Original file line number Diff line number Diff line change
Expand Up @@ -550,7 +550,7 @@ def get_context(self):

def add_conditional_clause(self, clause):
"""
Adds a iff clause to this statement
Adds an iff clause to this statement

:param clause: The clause that will be added to the iff statement
:type clause: ConditionalClause
Expand All @@ -575,7 +575,7 @@ def update_context_id(self, i):
@property
def timestamp_normalized(self):
"""
we're expecting self.timestamp to be either a long, int, a datetime, or a timedelta
We're expecting self.timestamp to be either a long, int, a datetime, or a timedelta
:return:
"""
if not self.timestamp:
Expand Down
8 changes: 4 additions & 4 deletions cassandra/marshal.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,8 @@ def vints_pack(values):
num_extra_bytes = 0
num_bits = v.bit_length()
# We need to reserve (num_extra_bytes+1) bits in the first byte
# ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved
# ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved
# i.e. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved
# i.e. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved
reserved_bits = num_extra_bytes + 1
while num_bits > (8-(reserved_bits)):
num_extra_bytes += 1
Expand Down Expand Up @@ -167,8 +167,8 @@ def uvint_pack(val):
num_extra_bytes = 0
num_bits = v.bit_length()
# We need to reserve (num_extra_bytes+1) bits in the first byte
# ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved
# ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved
# i.e. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved
# i.e. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved
reserved_bits = num_extra_bytes + 1
while num_bits > (8-(reserved_bits)):
num_extra_bytes += 1
Expand Down
2 changes: 1 addition & 1 deletion cassandra/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def get_stats(self):
def set_stats_name(self, stats_name):
"""
Set the metrics stats name.
The stats_name is a string used to access the metris through scales: scales.getStats()[<stats_name>]
The stats_name is a string used to access the metrics through scales: scales.getStats()[<stats_name>]
Default is 'cassandra-<num>'.
"""

Expand Down
Loading