From 719922ea4882b31262913523dd07d49bcd3b737a Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sun, 30 Jul 2023 16:34:27 +0200 Subject: [PATCH 01/11] Pass namespace/cluster mode explicitly to the mocked k8s streams Otherwise, some tests may populate the cluster-scoped URLs while consuming the namespace-scoped URLs and thus catching errors, which are cryptic and hard to debug. Signed-off-by: Sergey Vasilyev --- tests/conftest.py | 6 +++--- tests/reactor/conftest.py | 2 +- tests/reactor/test_queueing.py | 16 +++++++++------- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 683abc10..1abe5d91 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,7 +7,7 @@ import re import sys import time -from typing import Set +from typing import Set, Optional import aiohttp.web import pytest @@ -310,7 +310,7 @@ def version_api(resp_mocker, aresponses, hostname, resource): def stream(fake_vault, resp_mocker, aresponses, hostname, resource, version_api): """ A mock for the stream of events as if returned by K8s client. """ - def feed(*args, namespace=None): + def feed(*args, namespace: Optional[str]): for arg in args: # Prepare the stream response pre-rendered (for simplicity, no actual streaming). @@ -335,7 +335,7 @@ def feed(*args, namespace=None): aresponses.add(hostname, list_url, 'get', list_resp, match_querystring=True) # TODO: One day, find a better way to terminate a ``while-true`` reconnection cycle. - def close(*, namespace=None): + def close(*, namespace: Optional[str]): """ A way to stop the stream from reconnecting: say it that the resource version is gone (we know a priori that it stops on this condition, and escalates to `infinite_stream`). diff --git a/tests/reactor/conftest.py b/tests/reactor/conftest.py index 99adc261..450d3dde 100644 --- a/tests/reactor/conftest.py +++ b/tests/reactor/conftest.py @@ -48,7 +48,7 @@ async def do_nothing(*args, **kwargs): pass # Prevent any real streaming for the very beginning, before it even starts. - stream.feed([]) + stream.feed([], namespace=None) # Spawn a watcher in the background. coro = watcher( diff --git a/tests/reactor/test_queueing.py b/tests/reactor/test_queueing.py index bebed44d..22e3ea55 100644 --- a/tests/reactor/test_queueing.py +++ b/tests/reactor/test_queueing.py @@ -56,8 +56,8 @@ async def test_watchevent_demultiplexing(worker_mock, timer, resource, processor settings.batching.batch_window = 100 # should not be involved, fail if it is # Inject the events of unique objects - to produce a few streams/workers. - stream.feed(events) - stream.close() + stream.feed(events, namespace=None) + stream.close(namespace=None) # Run the watcher (near-instantly and test-blocking). with timer: @@ -132,8 +132,8 @@ async def test_watchevent_batching(settings, resource, processor, timer, settings.batching.batch_window = 0.3 # the time period being tested (make bigger than overhead) # Inject the events of unique objects - to produce a few streams/workers. - stream.feed(events) - stream.close() + stream.feed(events, namespace=None) + stream.close(namespace=None) # Run the watcher (near-instantly and test-blocking). with timer: @@ -179,7 +179,9 @@ async def test_watchevent_batching(settings, resource, processor, timer, ]) @pytest.mark.usefixtures('watcher_in_background') -async def test_garbage_collection_of_streams(settings, stream, events, unique, worker_spy): +async def test_garbage_collection_of_streams( + settings, stream, events, unique, worker_spy, namespace +): # Override the default timeouts to make the tests faster. settings.batching.exit_timeout = 100 # should exit instantly, fail if it didn't @@ -188,8 +190,8 @@ async def test_garbage_collection_of_streams(settings, stream, events, unique, w settings.watching.reconnect_backoff = 1.0 # to prevent src depletion # Inject the events of unique objects - to produce a few streams/workers. - stream.feed(events) - stream.close() + stream.feed(events, namespace=None) + stream.close(namespace=None) # Give it a moment to populate the streams and spawn all the workers. # Intercept and remember _any_ seen dict of streams for further checks. From e52175788f9dac74bd4bf85560909121155e4390 Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sun, 13 Nov 2022 16:56:08 +0100 Subject: [PATCH 02/11] Log the retries limit errors the same as the time limit errors Signed-off-by: Sergey Vasilyev --- kopf/_core/actions/execution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kopf/_core/actions/execution.py b/kopf/_core/actions/execution.py index 37573f69..654b32c9 100644 --- a/kopf/_core/actions/execution.py +++ b/kopf/_core/actions/execution.py @@ -301,7 +301,7 @@ async def execute_handler_once( return Outcome(final=False, exception=e, delay=e.delay, subrefs=subrefs) # Same as permanent errors below, but with better logging for our internal cases. - except HandlerTimeoutError as e: + except (HandlerTimeoutError, HandlerRetriesError) as e: logger.error(f"{str(e) or repr(e)}") # already formatted return Outcome(final=True, exception=e, subrefs=subrefs) # TODO: report the handling failure somehow (beside logs/events). persistent status? From d30079806a51eac1ce7b80a28295db49e8ceb953 Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sun, 13 Nov 2022 16:56:19 +0100 Subject: [PATCH 03/11] Prevent handler retries if they are doomed to fail on the next run Signed-off-by: Sergey Vasilyev --- kopf/_core/actions/execution.py | 64 ++++++++++++++++++-- tests/handling/daemons/test_daemon_errors.py | 10 ++- tests/handling/daemons/test_timer_errors.py | 14 ++--- tests/handling/test_error_handling.py | 2 +- tests/handling/test_event_handling.py | 2 +- 5 files changed, 70 insertions(+), 22 deletions(-) diff --git a/kopf/_core/actions/execution.py b/kopf/_core/actions/execution.py index 654b32c9..c8619292 100644 --- a/kopf/_core/actions/execution.py +++ b/kopf/_core/actions/execution.py @@ -267,9 +267,10 @@ async def execute_handler_once( try: logger.debug(f"{handler} is invoked.") + # Strict checks — contrary to the look-ahead checks below, which are approximate. + # The unforeseen extra time could be added by e.g. operator or cluster downtime. if handler.timeout is not None and state.runtime.total_seconds() >= handler.timeout: raise HandlerTimeoutError(f"{handler} has timed out after {state.runtime}.") - if handler.retries is not None and state.retries >= handler.retries: raise HandlerRetriesError(f"{handler} has exceeded {state.retries} retries.") @@ -297,8 +298,27 @@ async def execute_handler_once( # Definitely a temporary error, regardless of the error strictness. except TemporaryError as e: - logger.error(f"{handler} failed temporarily: {str(e) or repr(e)}") - return Outcome(final=False, exception=e, delay=e.delay, subrefs=subrefs) + # Maybe false-negative but never false-positive checks to save extra cycles & time wasted. + lookahead_runtime = state.runtime.total_seconds() + (e.delay or 0) + lookahead_timeout = handler.timeout is not None and lookahead_runtime >= handler.timeout + lookahead_retries = handler.retries is not None and state.retries + 1 >= handler.retries + if lookahead_timeout: + msg = ( + f"{handler} failed temporarily but will time out after {handler.timeout} seconds: " + f"{str(e) or repr(e)}" + ) + logger.error(msg) + return Outcome(final=True, exception=HandlerTimeoutError(msg), subrefs=subrefs) + elif lookahead_retries: + msg = ( + f"{handler} failed temporarily but will exceed {handler.retries} retries: " + f"{str(e) or repr(e)}" + ) + logger.error(msg) + return Outcome(final=True, exception=HandlerRetriesError(msg), subrefs=subrefs) + else: + logger.error(f"{handler} failed temporarily: {str(e) or repr(e)}") + return Outcome(final=False, exception=e, delay=e.delay, subrefs=subrefs) # Same as permanent errors below, but with better logging for our internal cases. except (HandlerTimeoutError, HandlerRetriesError) as e: @@ -314,14 +334,46 @@ async def execute_handler_once( # Regular errors behave as either temporary or permanent depending on the error strictness. except Exception as e: + # Maybe false-negative but never false-positive checks to save extra cycles & time wasted. + lookahead_runtime = state.runtime.total_seconds() + backoff + lookahead_timeout = handler.timeout is not None and lookahead_runtime >= handler.timeout + lookahead_retries = handler.retries is not None and state.retries + 1 >= handler.retries if errors_mode == ErrorsMode.IGNORED: - logger.exception(f"{handler} failed with an exception. Will ignore.") + msg = ( + f"{handler} failed with an exception and will ignore it: " + f"{str(e) or repr(e)}" + ) + logger.exception(msg) return Outcome(final=True, subrefs=subrefs) + elif errors_mode == ErrorsMode.TEMPORARY and lookahead_timeout: + msg = ( + f"{handler} failed with an exception and will stop now " + f"(it would time out in {handler.timeout} seconds on the next attempt): " + f"{str(e) or repr(e)}" + ) + logger.exception(msg) + return Outcome(final=True, exception=HandlerTimeoutError(msg), subrefs=subrefs) + elif errors_mode == ErrorsMode.TEMPORARY and lookahead_retries: + msg = ( + f"{handler} failed with an exception and will stop now " + f"(it would exceed {handler.retries} retries on the next attempt): " + f"{str(e) or repr(e)}" + ) + logger.exception(msg) + return Outcome(final=True, exception=HandlerRetriesError(msg), subrefs=subrefs) elif errors_mode == ErrorsMode.TEMPORARY: - logger.exception(f"{handler} failed with an exception. Will retry.") + msg = ( + f"{handler} failed with an exception and will try again in {backoff} seconds: " + f"{str(e) or repr(e)}" + ) + logger.exception(msg) return Outcome(final=False, exception=e, delay=backoff, subrefs=subrefs) elif errors_mode == ErrorsMode.PERMANENT: - logger.exception(f"{handler} failed with an exception. Will stop.") + msg = ( + f"{handler} failed with an exception and will stop now: " + f"{str(e) or repr(e)}" + ) + logger.exception(msg) return Outcome(final=True, exception=e, subrefs=subrefs) # TODO: report the handling failure somehow (beside logs/events). persistent status? else: diff --git a/tests/handling/daemons/test_daemon_errors.py b/tests/handling/daemons/test_daemon_errors.py index ac48bd46..1bdd7bf1 100644 --- a/tests/handling/daemons/test_daemon_errors.py +++ b/tests/handling/daemons/test_daemon_errors.py @@ -56,7 +56,7 @@ async def fn(**kwargs): assert k8s_mocked.sleep.call_count == 0 assert_logs([ - "Daemon 'fn' failed with an exception. Will stop.", + "Daemon 'fn' failed with an exception and will stop now: boo!", "Daemon 'fn' has exited on its own", ], prohibited=[ "Daemon 'fn' succeeded.", @@ -122,7 +122,7 @@ async def fn(retry, **kwargs): assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] assert_logs([ - "Daemon 'fn' failed with an exception. Will retry.", + "Daemon 'fn' failed with an exception and will try again in 1.0 seconds: boo!", "Daemon 'fn' succeeded.", "Daemon 'fn' has exited on its own", ]) @@ -142,10 +142,9 @@ async def fn(**kwargs): await dummy.steps['called'].wait() await dummy.wait_for_daemon_done() - assert k8s_mocked.sleep.call_count == 3 # one for each retry + assert k8s_mocked.sleep.call_count == 2 # one between each retry (3 attempts - 2 sleeps) assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 # [call#][args/kwargs][arg#] - assert k8s_mocked.sleep.call_args_list[2][0][0] == 1.0 # [call#][args/kwargs][arg#] async def test_daemon_retried_until_timeout( @@ -162,7 +161,6 @@ async def fn(**kwargs): await dummy.steps['called'].wait() await dummy.wait_for_daemon_done() - assert k8s_mocked.sleep.call_count == 3 # one for each retry + assert k8s_mocked.sleep.call_count == 2 # one between each retry (3 attempts - 2 sleeps) assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 # [call#][args/kwargs][arg#] - assert k8s_mocked.sleep.call_args_list[2][0][0] == 1.0 # [call#][args/kwargs][arg#] diff --git a/tests/handling/daemons/test_timer_errors.py b/tests/handling/daemons/test_timer_errors.py index 62ca8e0e..934ff4fe 100644 --- a/tests/handling/daemons/test_timer_errors.py +++ b/tests/handling/daemons/test_timer_errors.py @@ -56,7 +56,7 @@ async def fn(**kwargs): assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 assert_logs([ - "Timer 'fn' failed with an exception. Will stop.", + "Timer 'fn' failed with an exception and will stop now: boo!", ], prohibited=[ "Timer 'fn' succeeded.", ]) @@ -122,7 +122,7 @@ async def fn(retry, **kwargs): assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 # interval assert_logs([ - "Timer 'fn' failed with an exception. Will retry.", + "Timer 'fn' failed with an exception and will try again in 1.0 seconds: boo!", "Timer 'fn' succeeded.", ]) @@ -144,11 +144,10 @@ async def fn(**kwargs): await dummy.steps['called'].wait() await dummy.wait_for_daemon_done() - assert k8s_mocked.sleep.call_count >= 4 # one for each retry + assert k8s_mocked.sleep.call_count >= 3 # one between each retry (3 attempts - 2 sleeps) assert k8s_mocked.sleep.call_args_list[0][0][0] == [1.0] # delays assert k8s_mocked.sleep.call_args_list[1][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[2][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[3][0][0] == 1.0 # interval + assert k8s_mocked.sleep.call_args_list[2][0][0] == 1.0 # interval async def test_timer_retried_until_timeout( @@ -168,8 +167,7 @@ async def fn(**kwargs): await dummy.steps['called'].wait() await dummy.wait_for_daemon_done() - assert k8s_mocked.sleep.call_count >= 4 # one for each retry + assert k8s_mocked.sleep.call_count >= 3 # one between each retry (3 attempts - 2 sleeps) assert k8s_mocked.sleep.call_args_list[0][0][0] == [1.0] # delays assert k8s_mocked.sleep.call_args_list[1][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[2][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[3][0][0] == 1.0 # interval + assert k8s_mocked.sleep.call_args_list[2][0][0] == 1.0 # interval diff --git a/tests/handling/test_error_handling.py b/tests/handling/test_error_handling.py index 0a3f412b..c987d18f 100644 --- a/tests/handling/test_error_handling.py +++ b/tests/handling/test_error_handling.py @@ -145,5 +145,5 @@ async def test_arbitrary_error_delays_handler( assert patch['status']['kopf']['progress'][name1]['delayed'] assert_logs([ - "Handler .+ failed with an exception. Will retry.", + "Handler .+ failed with an exception and will try again in 60 seconds: oops", ]) diff --git a/tests/handling/test_event_handling.py b/tests/handling/test_event_handling.py index d230349c..0ddf05f6 100644 --- a/tests/handling/test_event_handling.py +++ b/tests/handling/test_event_handling.py @@ -71,7 +71,7 @@ async def test_errors_are_ignored( assert_logs([ "Handler 'event_fn' is invoked.", - "Handler 'event_fn' failed with an exception. Will ignore.", + "Handler 'event_fn' failed with an exception and will ignore it: oops", "Handler 'event_fn2' is invoked.", "Handler 'event_fn2' succeeded.", ]) From 433c76e9fd9357315793825ff66e2d3ed45c62b5 Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sat, 10 Dec 2022 20:09:48 +0100 Subject: [PATCH 04/11] Use strict checks for None in several places Signed-off-by: Sergey Vasilyev --- kopf/_core/actions/progression.py | 4 ++-- kopf/_core/engines/daemons.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kopf/_core/actions/progression.py b/kopf/_core/actions/progression.py index 65367ee5..8d4724ca 100644 --- a/kopf/_core/actions/progression.py +++ b/kopf/_core/actions/progression.py @@ -109,8 +109,8 @@ def with_outcome( return cls( active=self.active, purpose=self.purpose, - started=self.started if self.started else now, - stopped=self.stopped if self.stopped else now if outcome.final else None, + started=self.started if self.started is not None else now, + stopped=self.stopped if self.stopped is not None else now if outcome.final else None, delayed=now + datetime.timedelta(seconds=outcome.delay) if outcome.delay is not None else None, success=bool(outcome.final and outcome.exception is None), failure=bool(outcome.final and outcome.exception is not None), diff --git a/kopf/_core/engines/daemons.py b/kopf/_core/engines/daemons.py index 6d1de9ad..fff72938 100644 --- a/kopf/_core/engines/daemons.py +++ b/kopf/_core/engines/daemons.py @@ -187,7 +187,7 @@ async def stop_daemons( for daemon in list(daemons.values()): logger = daemon.logger stopper = daemon.stopper - age = (now - (stopper.when or now)) + age = (now - (stopper.when if stopper.when is not None else now)) handler = daemon.handler if isinstance(handler, handlers_.DaemonHandler): From 5a4ac1d51e2a7999309a3166eece2bb60de141d9 Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sun, 11 Dec 2022 11:00:10 +0100 Subject: [PATCH 05/11] Narrow the handler state interface and shift the storage-specific fields to the implementation Signed-off-by: Sergey Vasilyev --- kopf/_cogs/configs/progress.py | 2 +- kopf/_core/actions/execution.py | 36 ++++--------------- kopf/_core/actions/progression.py | 18 ++++++++++ .../handling/indexing/test_index_exclusion.py | 19 +++++++--- 4 files changed, 40 insertions(+), 35 deletions(-) diff --git a/kopf/_cogs/configs/progress.py b/kopf/_cogs/configs/progress.py index ed945428..3f811812 100644 --- a/kopf/_cogs/configs/progress.py +++ b/kopf/_cogs/configs/progress.py @@ -49,7 +49,7 @@ from kopf._cogs.structs import bodies, dicts, ids, patches -class ProgressRecord(TypedDict, total=True): +class ProgressRecord(TypedDict, total=False): """ A single record stored for persistence of a single handler. """ started: Optional[str] stopped: Optional[str] diff --git a/kopf/_core/actions/execution.py b/kopf/_core/actions/execution.py index c8619292..86f17243 100644 --- a/kopf/_core/actions/execution.py +++ b/kopf/_core/actions/execution.py @@ -86,44 +86,22 @@ class Outcome: subrefs: Collection[ids.HandlerId] = () -@dataclasses.dataclass(frozen=True) -class HandlerState: +class HandlerState(Protocol): """ - A persisted state of a single handler, as stored on the resource's status. - - Note the difference: `Outcome` is for in-memory results of handlers, - which is then additionally converted before being storing as a state. + A minimal necessary protocol (interface) of a handler's runtime state. - Active handler states are those used in .done/.delays for the current - handling cycle & the current cause. Passive handler states are those - carried over for logging of counts/extras, and for final state purging, - but not participating in the current handling cycle. + The implementation and detailed fields are in `progression.HandlerState`. """ - started: Optional[datetime.datetime] = None # None means this information was lost. - stopped: Optional[datetime.datetime] = None # None means it is still running (e.g. delayed). - delayed: Optional[datetime.datetime] = None # None means it is finished (succeeded/failed). - retries: int = 0 - success: bool = False - failure: bool = False - - @property - def finished(self) -> bool: - return bool(self.success or self.failure) - - @property - def sleeping(self) -> bool: - ts = self.delayed - now = datetime.datetime.utcnow() - return not self.finished and ts is not None and ts > now + started: Optional[datetime.datetime] + retries: int @property def awakened(self) -> bool: - return bool(not self.finished and not self.sleeping) + raise NotImplementedError @property def runtime(self) -> datetime.timedelta: - now = datetime.datetime.utcnow() - return now - (self.started if self.started else now) + raise NotImplementedError class State(Mapping[ids.HandlerId, HandlerState]): diff --git a/kopf/_core/actions/progression.py b/kopf/_core/actions/progression.py index 8d4724ca..8ada094c 100644 --- a/kopf/_core/actions/progression.py +++ b/kopf/_core/actions/progression.py @@ -50,6 +50,24 @@ class HandlerState(execution.HandlerState): subrefs: Collection[ids.HandlerId] = () # ids of actual sub-handlers of all levels deep. _origin: Optional[progress.ProgressRecord] = None # to check later if it has actually changed. + @property + def finished(self) -> bool: + return bool(self.success or self.failure) + + @property + def sleeping(self) -> bool: + ts = self.delayed + now = datetime.datetime.utcnow() + return not self.finished and ts is not None and ts > now + + @property + def awakened(self) -> bool: + return bool(not self.finished and not self.sleeping) + + @property + def runtime(self) -> datetime.timedelta: + return datetime.datetime.utcnow() - self.started + @classmethod def from_scratch(cls, *, purpose: Optional[str] = None) -> "HandlerState": return cls( diff --git a/tests/handling/indexing/test_index_exclusion.py b/tests/handling/indexing/test_index_exclusion.py index f015613e..0298b57d 100644 --- a/tests/handling/indexing/test_index_exclusion.py +++ b/tests/handling/indexing/test_index_exclusion.py @@ -6,7 +6,9 @@ import pytest from kopf._cogs.aiokits.aiotoggles import Toggle +from kopf._cogs.configs.progress import ProgressRecord from kopf._cogs.structs.ephemera import Memo +from kopf._cogs.structs.ids import HandlerId from kopf._core.actions.execution import PermanentError, TemporaryError from kopf._core.actions.lifecycles import all_at_once from kopf._core.actions.progression import HandlerState, State @@ -29,8 +31,10 @@ async def test_successes_are_removed_from_the_indexing_state( resource, namespace, settings, registry, memories, indexers, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': namespace, 'name': 'name1'}} + record = ProgressRecord(success=True) + state = State({HandlerId('unrelated'): HandlerState.from_storage(record)}) memory = await memories.recall(raw_body=body) - memory.indexing_memory.indexing_state = State({'unrelated': HandlerState(success=True)}) + memory.indexing_memory.indexing_state = state handlers.index_mock.side_effect = 123 await process_resource_event( lifecycle=all_at_once, @@ -53,8 +57,10 @@ async def test_temporary_failures_with_no_delays_are_reindexed( resource, namespace, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': namespace, 'name': 'name1'}} + record = ProgressRecord(delayed=None) + state = State({HandlerId('index_fn'): HandlerState.from_storage(record)}) memory = await memories.recall(raw_body=body) - memory.indexing_memory.indexing_state = State({'index_fn': HandlerState(delayed=None)}) + memory.indexing_memory.indexing_state = state await process_resource_event( lifecycle=all_at_once, registry=registry, @@ -76,9 +82,10 @@ async def test_temporary_failures_with_expired_delays_are_reindexed( resource, namespace, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': namespace, 'name': 'name1'}} - delayed = datetime.datetime(2020, 12, 31, 23, 59, 59, 0) + record = ProgressRecord(delayed='2020-12-31T23:59:59.000000') + state = State({HandlerId('index_fn'): HandlerState.from_storage(record)}) memory = await memories.recall(raw_body=body) - memory.indexing_memory.indexing_state = State({'index_fn': HandlerState(delayed=delayed)}) + memory.indexing_memory.indexing_state = state await process_resource_event( lifecycle=all_at_once, registry=registry, @@ -99,8 +106,10 @@ async def test_permanent_failures_are_not_reindexed( resource, namespace, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': namespace, 'name': 'name1'}} + record = ProgressRecord(failure=True) + state = State({HandlerId('index_fn'): HandlerState.from_storage(record)}) memory = await memories.recall(raw_body=body) - memory.indexing_memory.indexing_state = State({'index_fn': HandlerState(failure=True)}) + memory.indexing_memory.indexing_state = state await process_resource_event( lifecycle=all_at_once, registry=registry, From 45746ecf27d2fcf8a91977e2b0855251beb9020b Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sun, 11 Dec 2022 10:40:49 +0100 Subject: [PATCH 06/11] Restrict the handler state's "started"/"active" from being None, which has no meaning Signed-off-by: Sergey Vasilyev --- kopf/_core/actions/execution.py | 4 ++-- kopf/_core/actions/progression.py | 8 +++----- tests/handling/test_delays.py | 13 +++++++------ 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/kopf/_core/actions/execution.py b/kopf/_core/actions/execution.py index 86f17243..40cc39de 100644 --- a/kopf/_core/actions/execution.py +++ b/kopf/_core/actions/execution.py @@ -92,7 +92,7 @@ class HandlerState(Protocol): The implementation and detailed fields are in `progression.HandlerState`. """ - started: Optional[datetime.datetime] + started: datetime.datetime retries: int @property @@ -256,7 +256,7 @@ async def execute_handler_once( handler=handler, cause=cause, retry=state.retries, - started=state.started or datetime.datetime.utcnow(), # "or" is for type-checking. + started=state.started, runtime=state.runtime, settings=settings, lifecycle=lifecycle, # just a default for the sub-handlers, not used directly. diff --git a/kopf/_core/actions/progression.py b/kopf/_core/actions/progression.py index 8ada094c..b85113bd 100644 --- a/kopf/_core/actions/progression.py +++ b/kopf/_core/actions/progression.py @@ -36,10 +36,8 @@ class HandlerState(execution.HandlerState): carried over for logging of counts/extras, and for final state purging, but not participating in the current handling cycle. """ - - # Some fields may overlap the base class's fields, but this is fine (the types are the same). - active: Optional[bool] = None # is it used in done/delays [T]? or only in counters/purges [F]? - started: Optional[datetime.datetime] = None # None means this information was lost. + active: bool # whether it is used in done/delays [T] or only in counters/purges [F]. + started: datetime.datetime stopped: Optional[datetime.datetime] = None # None means it is still running (e.g. delayed). delayed: Optional[datetime.datetime] = None # None means it is finished (succeeded/failed). purpose: Optional[str] = None # None is a catch-all marker for upgrades/rollbacks. @@ -127,7 +125,7 @@ def with_outcome( return cls( active=self.active, purpose=self.purpose, - started=self.started if self.started is not None else now, + started=self.started, stopped=self.stopped if self.stopped is not None else now if outcome.final else None, delayed=now + datetime.timedelta(seconds=outcome.delay) if outcome.delay is not None else None, success=bool(outcome.final and outcome.exception is None), diff --git a/tests/handling/test_delays.py b/tests/handling/test_delays.py index 3b00dd74..7d2ab145 100644 --- a/tests/handling/test_delays.py +++ b/tests/handling/test_delays.py @@ -6,6 +6,7 @@ import pytest import kopf +from kopf._cogs.configs.progress import ProgressRecord from kopf._cogs.structs.ephemera import Memo from kopf._core.actions.application import WAITING_KEEPALIVE_INTERVAL from kopf._core.actions.execution import TemporaryError @@ -76,16 +77,16 @@ async def test_delayed_handlers_sleep( # Simulate the original persisted state of the resource. # Make sure the finalizer is added since there are mandatory deletion handlers. - started_dt = datetime.datetime.fromisoformat('2000-01-01T00:00:00') # long time ago is fine. - delayed_dt = datetime.datetime.fromisoformat(delayed_iso) + record = ProgressRecord(started='2000-01-01T00:00:00', delayed=delayed_iso) # a long time ago + state_dict = HandlerState.from_storage(record).as_in_storage() event_type = None if cause_reason == Reason.RESUME else 'irrelevant' event_body = { 'metadata': {'finalizers': [settings.persistence.finalizer]}, 'status': {'kopf': {'progress': { - 'create_fn': HandlerState(started=started_dt, delayed=delayed_dt).as_in_storage(), - 'update_fn': HandlerState(started=started_dt, delayed=delayed_dt).as_in_storage(), - 'delete_fn': HandlerState(started=started_dt, delayed=delayed_dt).as_in_storage(), - 'resume_fn': HandlerState(started=started_dt, delayed=delayed_dt).as_in_storage(), + 'create_fn': state_dict, + 'update_fn': state_dict, + 'delete_fn': state_dict, + 'resume_fn': state_dict, }}} } cause_mock.reason = cause_reason From 12f9e5e360754133e358d3103d250cb52536e2f7 Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sun, 2 Jan 2022 23:54:05 +0100 Subject: [PATCH 07/11] Use the loop time instead of system/process time for internal time intervals MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The routines that depend only on the internal intervals and durations should use the internal loop time only — which can be later faked by e.g. `looptime`. If the system or process time is used, these routines fall out of time-faking and might take many more cycles to reach the desired real-world timestamp (just by letting the real time flow naturally). This does NOT cover the cases with intervals and durations based on the global time (i.e. UTC clock) — e.g. handlers' timeouts, retries, etc. Signed-off-by: Sergey Vasilyev --- kopf/_cogs/aiokits/aioenums.py | 2 +- kopf/_core/actions/throttlers.py | 8 ++++---- kopf/_core/engines/daemons.py | 18 +++++++++++------- kopf/_core/reactor/processing.py | 3 +-- tests/basic-structs/test_memories.py | 2 +- tests/handling/daemons/conftest.py | 7 ++++--- tests/timing/test_throttling.py | 4 ++-- 7 files changed, 24 insertions(+), 20 deletions(-) diff --git a/kopf/_cogs/aiokits/aioenums.py b/kopf/_cogs/aiokits/aioenums.py index a36d2809..608b01bd 100644 --- a/kopf/_cogs/aiokits/aioenums.py +++ b/kopf/_cogs/aiokits/aioenums.py @@ -53,7 +53,7 @@ def is_set(self, reason: Optional[FlagReasonT] = None) -> bool: def set(self, reason: Optional[FlagReasonT] = None) -> None: reason = reason if reason is not None else self.reason # to keep existing values - self.when = self.when if self.when is not None else time.monotonic() + self.when = self.when if self.when is not None else asyncio.get_running_loop().time() self.reason = reason if self.reason is None or reason is None else self.reason | reason self.sync_event.set() self.async_event.set() # it is thread-safe: always called in operator's event loop. diff --git a/kopf/_core/actions/throttlers.py b/kopf/_core/actions/throttlers.py index c591b14b..37c77c90 100644 --- a/kopf/_core/actions/throttlers.py +++ b/kopf/_core/actions/throttlers.py @@ -1,7 +1,6 @@ import asyncio import contextlib import dataclasses -import time from typing import AsyncGenerator, Iterable, Iterator, Optional, Tuple, Type, Union from kopf._cogs.aiokits import aiotime @@ -28,11 +27,12 @@ async def throttled( """ A helper to throttle any arbitrary operation. """ + clock = asyncio.get_running_loop().time # The 1st sleep: if throttling is already active, but was interrupted by a queue replenishment. # It is needed to properly process the latest known event after the successful sleep. if throttler.active_until is not None: - remaining_time = throttler.active_until - time.monotonic() + remaining_time = throttler.active_until - clock() unslept_time = await aiotime.sleep(remaining_time, wakeup=wakeup) if unslept_time is None: logger.info("Throttling is over. Switching back to normal operations.") @@ -66,7 +66,7 @@ async def throttled( delay = next(throttler.source_of_delays, throttler.last_used_delay) if delay is not None: throttler.last_used_delay = delay - throttler.active_until = time.monotonic() + delay + throttler.active_until = clock() + delay logger.exception(f"Throttling for {delay} seconds due to an unexpected error: {e!r}") else: @@ -77,7 +77,7 @@ async def throttled( # The 2nd sleep: if throttling has been just activated (i.e. there was a fresh error). # It is needed to have better logging/sleeping without workers exiting for "no events". if throttler.active_until is not None and should_run: - remaining_time = throttler.active_until - time.monotonic() + remaining_time = throttler.active_until - clock() unslept_time = await aiotime.sleep(remaining_time, wakeup=wakeup) if unslept_time is None: throttler.active_until = None diff --git a/kopf/_core/engines/daemons.py b/kopf/_core/engines/daemons.py index fff72938..223582b2 100644 --- a/kopf/_core/engines/daemons.py +++ b/kopf/_core/engines/daemons.py @@ -23,7 +23,6 @@ import abc import asyncio import dataclasses -import time import warnings from typing import Collection, Dict, Iterable, List, Mapping, \ MutableMapping, Optional, Sequence, Set @@ -36,6 +35,10 @@ from kopf._core.intents import causes, handlers as handlers_, stoppers +def _loop_time() -> float: + return asyncio.get_running_loop().time() + + @dataclasses.dataclass(frozen=True) class Daemon: task: aiotasks.Task # a guarding task of the daemon. @@ -48,7 +51,7 @@ class Daemon: class DaemonsMemory: # For background and timed threads/tasks (invoked with the kwargs of the last-seen body). live_fresh_body: Optional[bodies.Body] = None - idle_reset_time: float = dataclasses.field(default_factory=time.monotonic) + idle_reset_time: float = dataclasses.field(default_factory=_loop_time) forever_stopped: Set[ids.HandlerId] = dataclasses.field(default_factory=set) running_daemons: Dict[ids.HandlerId, Daemon] = dataclasses.field(default_factory=dict) @@ -183,7 +186,7 @@ async def stop_daemons( (as by their surrounding circumstances: deletion handlers and finalizers). """ delays: List[float] = [] - now = time.monotonic() + now = asyncio.get_running_loop().time() for daemon in list(daemons.values()): logger = daemon.logger stopper = daemon.stopper @@ -537,6 +540,7 @@ async def _timer( await aiotime.sleep(handler.initial_delay, wakeup=stopper.async_event) # Similar to activities (in-memory execution), but applies patches on every attempt. + clock = asyncio.get_running_loop().time state = progression.State.from_scratch().with_handlers([handler]) while not stopper.is_set(): # NB: ignore state.done! it is checked below explicitly. @@ -548,14 +552,14 @@ async def _timer( # Both `now` and `last_seen_time` are moving targets: the last seen time is updated # on every watch-event received, and prolongs the sleep. The sleep is never shortened. if handler.idle is not None: - while not stopper.is_set() and time.monotonic() - memory.idle_reset_time < handler.idle: - delay = memory.idle_reset_time + handler.idle - time.monotonic() + while not stopper.is_set() and clock() - memory.idle_reset_time < handler.idle: + delay = memory.idle_reset_time + handler.idle - clock() await aiotime.sleep(delay, wakeup=stopper.async_event) if stopper.is_set(): continue # Remember the start time for the sharp timing and idle-time-waster below. - started = time.monotonic() + started = clock() # Execute the handler as usually, in-memory, but handle its outcome on every attempt. outcomes = await execution.execute_handlers_once( @@ -585,7 +589,7 @@ async def _timer( # |-----|-----|-----|-----|-----|-----|---> (interval=5, sharp=True) # [slow_handler]....[slow_handler]....[slow... elif handler.interval is not None and handler.sharp: - passed_duration = time.monotonic() - started + passed_duration = clock() - started remaining_delay = handler.interval - (passed_duration % handler.interval) await aiotime.sleep(remaining_delay, wakeup=stopper.async_event) diff --git a/kopf/_core/reactor/processing.py b/kopf/_core/reactor/processing.py index e2ab6c19..ddaddf84 100644 --- a/kopf/_core/reactor/processing.py +++ b/kopf/_core/reactor/processing.py @@ -14,7 +14,6 @@ and therefore do not trigger the user-defined handlers. """ import asyncio -import time from typing import Collection, Optional, Tuple from kopf._cogs.aiokits import aiotoggles @@ -329,7 +328,7 @@ async def process_spawning_cause( if memory.daemons_memory.live_fresh_body is None: memory.daemons_memory.live_fresh_body = cause.body if cause.reset: - memory.daemons_memory.idle_reset_time = time.monotonic() + memory.daemons_memory.idle_reset_time = asyncio.get_running_loop().time() if finalizers.is_deletion_ongoing(cause.body): stopping_delays = await daemons.stop_daemons( diff --git a/tests/basic-structs/test_memories.py b/tests/basic-structs/test_memories.py index 65406804..7da2c420 100644 --- a/tests/basic-structs/test_memories.py +++ b/tests/basic-structs/test_memories.py @@ -11,7 +11,7 @@ } -def test_creation_with_defaults(): +async def test_creation_with_defaults(): ResourceMemory() diff --git a/tests/handling/daemons/conftest.py b/tests/handling/daemons/conftest.py index 623d0d43..d64e3c9a 100644 --- a/tests/handling/daemons/conftest.py +++ b/tests/handling/daemons/conftest.py @@ -99,15 +99,16 @@ async def background_daemon_killer(settings, memories, operator_paused): @pytest.fixture() -def frozen_time(): +async def frozen_time(): """ A helper to simulate time movements to step over long sleeps/timeouts. """ - # TODO LATER: Either freezegun should support the system clock, or find something else. with freezegun.freeze_time("2020-01-01 00:00:00") as frozen: # Use freezegun-supported time instead of system clocks -- for testing purposes only. # NB: Patch strictly after the time is frozen -- to use fake_time(), not real time(). - with patch('time.monotonic', time.time), patch('time.perf_counter', time.time): + # NB: StdLib's event loops use time.monotonic(), but uvloop uses its own C-level time, + # so patch the loop object directly instead of its implied underlying implementation. + with patch.object(asyncio.get_running_loop(), 'time', time.time): yield frozen diff --git a/tests/timing/test_throttling.py b/tests/timing/test_throttling.py index a77282a9..4a27faed 100644 --- a/tests/timing/test_throttling.py +++ b/tests/timing/test_throttling.py @@ -8,8 +8,8 @@ @pytest.fixture(autouse=True) -def clock(mocker): - return mocker.patch('time.monotonic', return_value=0) +async def clock(mocker): + return mocker.patch.object(asyncio.get_running_loop(), 'time', return_value=0) @pytest.fixture(autouse=True) From 01789c94f7c597904f64a36a9c3d5f4d78f0d291 Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sun, 30 Jul 2023 20:35:31 +0200 Subject: [PATCH 08/11] Test ISO timestamp parsing & rendering Signed-off-by: Sergey Vasilyev --- tests/persistence/test_iso8601_times.py | 43 +++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 tests/persistence/test_iso8601_times.py diff --git a/tests/persistence/test_iso8601_times.py b/tests/persistence/test_iso8601_times.py new file mode 100644 index 00000000..09e05461 --- /dev/null +++ b/tests/persistence/test_iso8601_times.py @@ -0,0 +1,43 @@ +import datetime + +import pytest + +from kopf._core.actions.progression import _datetime_toisoformat, _datetime_fromisoformat + +UTC = datetime.timezone.utc +WEST11 = datetime.timezone(datetime.timedelta(hours=-11)) +EAST11 = datetime.timezone(datetime.timedelta(hours=11)) + + +@pytest.mark.parametrize('timestamp, expected', [ + (None, None), + (datetime.datetime(2000, 1, 1), '2000-01-01T00:00:00.000000'), + (datetime.datetime(2000, 1, 1, 9, 8, 7, 654321), '2000-01-01T09:08:07.654321'), + (datetime.datetime(2000, 1, 1, tzinfo=UTC), '2000-01-01T00:00:00.000000+00:00'), + (datetime.datetime(2000, 1, 1, 9, 8, 7, 654321, tzinfo=UTC), '2000-01-01T09:08:07.654321+00:00'), + (datetime.datetime(2000, 1, 1, 0, 0, 0, tzinfo=WEST11), '2000-01-01T00:00:00.000000-11:00'), + (datetime.datetime(2000, 1, 1, 9, 8, 7, 654321, tzinfo=WEST11), '2000-01-01T09:08:07.654321-11:00'), + (datetime.datetime(2000, 1, 1, 0, 0, 0, tzinfo=EAST11), '2000-01-01T00:00:00.000000+11:00'), + (datetime.datetime(2000, 1, 1, 9, 8, 7, 654321, tzinfo=EAST11), '2000-01-01T09:08:07.654321+11:00'), +]) +def test_datetime_to_iso8601(timestamp, expected): + result = _datetime_toisoformat(timestamp) + assert result == expected + + +@pytest.mark.parametrize('timestamp, expected', [ + (None, None), + ('2000-01-01T00:00:00.000000', datetime.datetime(2000, 1, 1)), + ('2000-01-01T09:08:07.654321', datetime.datetime(2000, 1, 1, 9, 8, 7, 654321)), + ('2000-01-01T00:00:00.000000Z', datetime.datetime(2000, 1, 1, tzinfo=UTC)), + ('2000-01-01T09:08:07.654321Z', datetime.datetime(2000, 1, 1, 9, 8, 7, 654321, tzinfo=UTC)), + ('2000-01-01T00:00:00.000000+00:00', datetime.datetime(2000, 1, 1, tzinfo=UTC)), + ('2000-01-01T09:08:07.654321+00:00', datetime.datetime(2000, 1, 1, 9, 8, 7, 654321, tzinfo=UTC)), + ('2000-01-01T00:00:00.000000-11:00', datetime.datetime(2000, 1, 1, tzinfo=WEST11)), + ('2000-01-01T09:08:07.654321-11:00', datetime.datetime(2000, 1, 1, 9, 8, 7, 654321, tzinfo=WEST11)), + ('2000-01-01T00:00:00.000000+11:00', datetime.datetime(2000, 1, 1, tzinfo=EAST11)), + ('2000-01-01T09:08:07.654321+11:00', datetime.datetime(2000, 1, 1, 9, 8, 7, 654321, tzinfo=EAST11)), +]) +def test_iso8601_to_datetime(timestamp, expected): + result = _datetime_fromisoformat(timestamp) + assert result == expected From 628e925f5eb990cf31eac7ff3bd37864e55f948d Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sun, 11 Dec 2022 10:44:34 +0100 Subject: [PATCH 09/11] Use the loop time instead of wall clock for handler states (started/stopped/delayed) Signed-off-by: Sergey Vasilyev --- kopf/_core/actions/progression.py | 133 ++++++++++++++---- .../handling/indexing/test_index_exclusion.py | 12 +- tests/handling/test_delays.py | 3 +- tests/lifecycles/test_handler_selection.py | 6 +- tests/persistence/test_states.py | 98 ++++++------- 5 files changed, 171 insertions(+), 81 deletions(-) diff --git a/kopf/_core/actions/progression.py b/kopf/_core/actions/progression.py index b85113bd..a81119b6 100644 --- a/kopf/_core/actions/progression.py +++ b/kopf/_core/actions/progression.py @@ -9,8 +9,69 @@ and the overall handling routine should persist the handler status somewhere. The states are persisted in a state storage: see `kopf._cogs.configs.progress`. -""" +MOCKED LOOP TIME: + +**For testability,** we use ``basetime + timedelta(seconds=loop.time())`` +to calculate the "now" moment instead of ``datetime.utcnow()``. + +The "basetime" is an imaginary UTC time when the loop clock was zero (``0.0``) +and is calculated as ``datetime.utcnow() - timedelta(seconds=loop.time())`` +(assuming these two calls are almost instant and the precision loss is low). + +In normal run mode, the "basetime" remains constant for the entire life time +of an event loop, since both loop time and wall-clock time move forward with +the same speed: the calculation of "basetime" always produces the same result. + +In test mode, the loop time is mocked and moves as events (e.g. sleeps) happen: +it can move (much) faster than the wall-clock time, e.g. 100s of loop seconds +in 1/100th of a wall-clock second; or it can freeze and not move at all. + +PROBLEMATIC INACCURACY: + +Because of a highly unprecise and everchanging component in the formula +of the "basetime" — the non-mockable UTC clock — the "basetime" calculation +can give different results at different times even if executed fast enough. + +To reduce the inaccuracy introduced by sequential UTC time measurements, +we calculate the "basetime" once per every global state object created +and push it down to owned state objects of the individual handlers +in this halding cycle of this resource object in this unit-test. + +That gives us sufficient accuracy while remaining simple enough, assuming that +there are no multiple concurrent global state objects per single unit-test. +_(An alternative would be to calculate the "basetime" on event loop creation +or to cache it per event loop in a global WeakDict, but that is an overkill.)_ + +SUFFICIENT ACCURACY: + +With this approach and ``looptime``__, we can detach from the wall-clock time +in tests and simulate the time's rapid movement into the future by "recovering" +the "now" moment as ``basetime + timedelta(seconds=loop.time())`` (see above) — +without wall-clock delays or hitting the issues with code execution overhead. + +Note that there is no UTC clock involved now, only the controled loop clock, +so multiple sequential calculation will lead to predictable abd precise results, +especially when the loop clock is frozen (i.e. constant for a short duration). + +__ https://github.com/nolar/looptime + +USER PERSPECTIVE: + +This time math is never exposed to users and never persisted in storages. +It is used only internally to decouple the operator routines from the system +clock and strictly couple it to the time of the loop. + +IMPLEMENTATION DETAILS: + +Q: Why do we store UTC time in the fields instead of the floats with loop time? +A: If we store floats in the fields, we need to do the math on every +fetching/storing operation, which introduces minor divergence in supposedly +constant data as stored in the external storages. Instead, we only calculate +the "now" moment. As a result, the precision loss is seen only at runtime checks +and is indistinguishanle from the loop clock sensitivity. +""" +import asyncio import collections.abc import copy import dataclasses @@ -37,6 +98,7 @@ class HandlerState(execution.HandlerState): but not participating in the current handling cycle. """ active: bool # whether it is used in done/delays [T] or only in counters/purges [F]. + basetime: datetime.datetime # a moment when the loop time was zero started: datetime.datetime stopped: Optional[datetime.datetime] = None # None means it is still running (e.g. delayed). delayed: Optional[datetime.datetime] = None # None means it is finished (succeeded/failed). @@ -54,9 +116,8 @@ def finished(self) -> bool: @property def sleeping(self) -> bool: - ts = self.delayed - now = datetime.datetime.utcnow() - return not self.finished and ts is not None and ts > now + now = self.basetime + datetime.timedelta(seconds=asyncio.get_running_loop().time()) + return not self.finished and self.delayed is not None and self.delayed > now @property def awakened(self) -> bool: @@ -64,21 +125,31 @@ def awakened(self) -> bool: @property def runtime(self) -> datetime.timedelta: - return datetime.datetime.utcnow() - self.started + now = self.basetime + datetime.timedelta(seconds=asyncio.get_running_loop().time()) + return now - self.started @classmethod - def from_scratch(cls, *, purpose: Optional[str] = None) -> "HandlerState": - return cls( - active=True, - started=datetime.datetime.utcnow(), - purpose=purpose, - ) + def from_scratch( + cls, + *, + basetime: datetime.datetime, + purpose: Optional[str] = None, + ) -> "HandlerState": + now = basetime + datetime.timedelta(seconds=asyncio.get_running_loop().time()) + return cls(active=True, basetime=basetime, started=now, purpose=purpose) @classmethod - def from_storage(cls, __d: progress.ProgressRecord) -> "HandlerState": + def from_storage( + cls, + __d: progress.ProgressRecord, + *, + basetime: datetime.datetime, + ) -> "HandlerState": + now = basetime + datetime.timedelta(seconds=asyncio.get_running_loop().time()) return cls( active=False, - started=_datetime_fromisoformat(__d.get('started')) or datetime.datetime.utcnow(), + basetime=basetime, + started=_datetime_fromisoformat(__d.get('started')) or now, stopped=_datetime_fromisoformat(__d.get('stopped')), delayed=_datetime_fromisoformat(__d.get('delayed')), purpose=__d.get('purpose') if __d.get('purpose') else None, @@ -120,10 +191,11 @@ def with_outcome( self, outcome: execution.Outcome, ) -> "HandlerState": - now = datetime.datetime.utcnow() + now = self.basetime + datetime.timedelta(seconds=asyncio.get_running_loop().time()) cls = type(self) return cls( active=self.active, + basetime=self.basetime, purpose=self.purpose, started=self.started, stopped=self.stopped if self.stopped is not None else now if outcome.final else None, @@ -156,19 +228,25 @@ class State(execution.State): """ _states: Mapping[ids.HandlerId, HandlerState] + # Eliminate even the smallest microsecond-scale deviations by using the shared base time. + # The deviations can come from UTC wall-clock time slowly moving during the run (CPU overhead). + basetime: datetime.datetime + def __init__( self, __src: Mapping[ids.HandlerId, HandlerState], *, + basetime: datetime.datetime, purpose: Optional[str] = None, ): super().__init__() self._states = dict(__src) self.purpose = purpose + self.basetime = basetime @classmethod def from_scratch(cls) -> "State": - return cls({}) + return cls({}, basetime=_get_basetime()) @classmethod def from_storage( @@ -178,13 +256,14 @@ def from_storage( storage: progress.ProgressStorage, handlers: Iterable[execution.Handler], ) -> "State": + basetime = _get_basetime() handler_ids = {handler.id for handler in handlers} handler_states: Dict[ids.HandlerId, HandlerState] = {} for handler_id in handler_ids: content = storage.fetch(key=handler_id, body=body) if content is not None: - handler_states[handler_id] = HandlerState.from_storage(content) - return cls(handler_states) + handler_states[handler_id] = HandlerState.from_storage(content, basetime=basetime) + return cls(handler_states, basetime=basetime) def with_purpose( self, @@ -195,7 +274,7 @@ def with_purpose( for handler in handlers: handler_states[handler.id] = handler_states[handler.id].with_purpose(purpose) cls = type(self) - return cls(handler_states, purpose=purpose) + return cls(handler_states, basetime=self.basetime, purpose=purpose) def with_handlers( self, @@ -204,11 +283,12 @@ def with_handlers( handler_states: Dict[ids.HandlerId, HandlerState] = dict(self) for handler in handlers: if handler.id not in handler_states: - handler_states[handler.id] = HandlerState.from_scratch(purpose=self.purpose) + handler_states[handler.id] = HandlerState.from_scratch( + basetime=self.basetime, purpose=self.purpose) else: handler_states[handler.id] = handler_states[handler.id].as_active() cls = type(self) - return cls(handler_states, purpose=self.purpose) + return cls(handler_states, basetime=self.basetime, purpose=self.purpose) def with_outcomes( self, @@ -223,7 +303,7 @@ def with_outcomes( handler_id: (handler_state if handler_id not in outcomes else handler_state.with_outcome(outcomes[handler_id])) for handler_id, handler_state in self._states.items() - }, purpose=self.purpose) + }, basetime=self.basetime, purpose=self.purpose) def without_successes(self) -> "State": cls = type(self) @@ -231,7 +311,7 @@ def without_successes(self) -> "State": handler_id: handler_state for handler_id, handler_state in self._states.items() if not handler_state.success # i.e. failures & in-progress/retrying - }) + }, basetime=self.basetime) def store( self, @@ -329,9 +409,9 @@ def delays(self) -> Collection[float]: processing routine, based on all delays of different origin: e.g. postponed daemons, stopping daemons, temporarily failed handlers. """ - now = datetime.datetime.utcnow() + now = self.basetime + datetime.timedelta(seconds=asyncio.get_running_loop().time()) return [ - max(0, (handler_state.delayed - now).total_seconds()) if handler_state.delayed else 0 + max(0.0, (handler_state.delayed - now).total_seconds()) if handler_state.delayed else 0 for handler_state in self._states.values() if handler_state.active and not handler_state.finished ] @@ -398,3 +478,8 @@ def _datetime_fromisoformat(val: Optional[str]) -> Optional[datetime.datetime]: return None else: return datetime.datetime.fromisoformat(val) + + +def _get_basetime() -> datetime.datetime: + loop = asyncio.get_running_loop() + return datetime.datetime.utcnow() - datetime.timedelta(seconds=loop.time()) diff --git a/tests/handling/indexing/test_index_exclusion.py b/tests/handling/indexing/test_index_exclusion.py index 0298b57d..49132009 100644 --- a/tests/handling/indexing/test_index_exclusion.py +++ b/tests/handling/indexing/test_index_exclusion.py @@ -30,9 +30,10 @@ async def test_successes_are_removed_from_the_indexing_state( resource, namespace, settings, registry, memories, indexers, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) + basetime = datetime.datetime.utcnow() # any "future" time works and affects nothing as long as it is the same body = {'metadata': {'namespace': namespace, 'name': 'name1'}} record = ProgressRecord(success=True) - state = State({HandlerId('unrelated'): HandlerState.from_storage(record)}) + state = State({HandlerId('unrelated'): HandlerState.from_storage(record, basetime=basetime)}, basetime=basetime) memory = await memories.recall(raw_body=body) memory.indexing_memory.indexing_state = state handlers.index_mock.side_effect = 123 @@ -56,9 +57,10 @@ async def test_successes_are_removed_from_the_indexing_state( async def test_temporary_failures_with_no_delays_are_reindexed( resource, namespace, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) + basetime = datetime.datetime.utcnow() # any "future" time works and affects nothing as long as it is the same body = {'metadata': {'namespace': namespace, 'name': 'name1'}} record = ProgressRecord(delayed=None) - state = State({HandlerId('index_fn'): HandlerState.from_storage(record)}) + state = State({HandlerId('index_fn'): HandlerState.from_storage(record, basetime=basetime)}, basetime=basetime) memory = await memories.recall(raw_body=body) memory.indexing_memory.indexing_state = state await process_resource_event( @@ -81,9 +83,10 @@ async def test_temporary_failures_with_no_delays_are_reindexed( async def test_temporary_failures_with_expired_delays_are_reindexed( resource, namespace, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) + basetime = datetime.datetime.utcnow() # any "future" time works and affects nothing as long as it is the same body = {'metadata': {'namespace': namespace, 'name': 'name1'}} record = ProgressRecord(delayed='2020-12-31T23:59:59.000000') - state = State({HandlerId('index_fn'): HandlerState.from_storage(record)}) + state = State({HandlerId('index_fn'): HandlerState.from_storage(record, basetime=basetime)}, basetime=basetime) memory = await memories.recall(raw_body=body) memory.indexing_memory.indexing_state = state await process_resource_event( @@ -105,9 +108,10 @@ async def test_temporary_failures_with_expired_delays_are_reindexed( async def test_permanent_failures_are_not_reindexed( resource, namespace, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) + basetime = datetime.datetime.utcnow() # any "future" time works and affects nothing as long as it is the same body = {'metadata': {'namespace': namespace, 'name': 'name1'}} record = ProgressRecord(failure=True) - state = State({HandlerId('index_fn'): HandlerState.from_storage(record)}) + state = State({HandlerId('index_fn'): HandlerState.from_storage(record, basetime=basetime)}, basetime=basetime) memory = await memories.recall(raw_body=body) memory.indexing_memory.indexing_state = state await process_resource_event( diff --git a/tests/handling/test_delays.py b/tests/handling/test_delays.py index 7d2ab145..0fc2a82e 100644 --- a/tests/handling/test_delays.py +++ b/tests/handling/test_delays.py @@ -74,11 +74,12 @@ async def test_delayed_handlers_sleep( registry, settings, handlers, resource, cause_mock, cause_reason, caplog, assert_logs, k8s_mocked, now, delayed_iso, delay): caplog.set_level(logging.DEBUG) + basetime = datetime.datetime.utcnow() # any "future" time works and affects nothing as long as it is the same # Simulate the original persisted state of the resource. # Make sure the finalizer is added since there are mandatory deletion handlers. record = ProgressRecord(started='2000-01-01T00:00:00', delayed=delayed_iso) # a long time ago - state_dict = HandlerState.from_storage(record).as_in_storage() + state_dict = HandlerState.from_storage(record, basetime=basetime).as_in_storage() event_type = None if cause_reason == Reason.RESUME else 'irrelevant' event_body = { 'metadata': {'finalizers': [settings.persistence.finalizer]}, diff --git a/tests/lifecycles/test_handler_selection.py b/tests/lifecycles/test_handler_selection.py index aca6bdac..6b0a2e79 100644 --- a/tests/lifecycles/test_handler_selection.py +++ b/tests/lifecycles/test_handler_selection.py @@ -12,7 +12,7 @@ kopf.lifecycles.shuffled, kopf.lifecycles.asap, ]) -def test_with_empty_input(lifecycle): +async def test_with_empty_input(lifecycle): state = State.from_scratch() handlers = [] selected = lifecycle(handlers, state=state) @@ -79,7 +79,7 @@ def test_shuffled_takes_them_all(): assert set(selected) == {handler1, handler2, handler3} -def test_asap_takes_the_first_one_when_no_retries(mocker): +async def test_asap_takes_the_first_one_when_no_retries(mocker): handler1 = mocker.Mock(id='id1', spec_set=['id']) handler2 = mocker.Mock(id='id2', spec_set=['id']) handler3 = mocker.Mock(id='id3', spec_set=['id']) @@ -92,7 +92,7 @@ def test_asap_takes_the_first_one_when_no_retries(mocker): assert selected[0] is handler1 -def test_asap_takes_the_least_retried(mocker): +async def test_asap_takes_the_least_retried(mocker): handler1 = mocker.Mock(id='id1', spec_set=['id']) handler2 = mocker.Mock(id='id2', spec_set=['id']) handler3 = mocker.Mock(id='id3', spec_set=['id']) diff --git a/tests/persistence/test_states.py b/tests/persistence/test_states.py index c73a8195..41d7354b 100644 --- a/tests/persistence/test_states.py +++ b/tests/persistence/test_states.py @@ -40,7 +40,7 @@ def handler(): # -def test_created_empty_from_scratch(): +async def test_created_empty_from_scratch(): state = State.from_scratch() assert len(state) == 0 assert state.purpose is None @@ -57,7 +57,7 @@ def test_created_empty_from_scratch(): ({'status': {'kopf': {}}}), ({'status': {'kopf': {'progress': {}}}}), ]) -def test_created_empty_from_empty_storage_with_handlers(storage, handler, body): +async def test_created_empty_from_empty_storage_with_handlers(storage, handler, body): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) assert len(state) == 0 assert state.purpose is None @@ -73,7 +73,7 @@ def test_created_empty_from_empty_storage_with_handlers(storage, handler, body): ({'status': {'kopf': {'progress': {'some-id': {'success': True}}}}}), ({'status': {'kopf': {'progress': {'some-id': {'failure': True}}}}}), ]) -def test_created_empty_from_filled_storage_without_handlers(storage, handler, body): +async def test_created_empty_from_filled_storage_without_handlers(storage, handler, body): state = State.from_storage(body=Body(body), handlers=[], storage=storage) assert len(state) == 0 assert state.purpose is None @@ -89,21 +89,21 @@ def test_created_empty_from_filled_storage_without_handlers(storage, handler, bo # -def test_created_from_storage_as_passive(storage, handler): +async def test_created_from_storage_as_passive(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) assert len(state) == 1 assert state['some-id'].active is False -def test_created_from_handlers_as_active(storage, handler): +async def test_created_from_handlers_as_active(storage, handler): state = State.from_scratch() state = state.with_handlers([handler]) assert len(state) == 1 assert state['some-id'].active is True -def test_switched_from_passive_to_active(storage, handler): +async def test_switched_from_passive_to_active(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'purpose': None}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) @@ -111,7 +111,7 @@ def test_switched_from_passive_to_active(storage, handler): assert state['some-id'].active is True -def test_passed_through_with_outcomes_when_passive(storage, handler): +async def test_passed_through_with_outcomes_when_passive(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'purpose': None}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_outcomes({'some-id': Outcome(final=True)}) @@ -119,7 +119,7 @@ def test_passed_through_with_outcomes_when_passive(storage, handler): assert state['some-id'].active is False -def test_passed_through_with_outcomes_when_active(storage, handler): +async def test_passed_through_with_outcomes_when_active(storage, handler): state = State.from_scratch() state = state.with_handlers([handler]) state = state.with_outcomes({'some-id': Outcome(final=True)}) @@ -127,14 +127,14 @@ def test_passed_through_with_outcomes_when_active(storage, handler): assert state['some-id'].active is True -def test_passive_states_are_not_used_in_done_calculation(storage, handler): +async def test_passive_states_are_not_used_in_done_calculation(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) assert len(state) == 1 assert state.done is True # because the unfinished handler state is ignored -def test_active_states_are_used_in_done_calculation(storage, handler): +async def test_active_states_are_used_in_done_calculation(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) @@ -143,7 +143,7 @@ def test_active_states_are_used_in_done_calculation(storage, handler): @freezegun.freeze_time(TS0) -def test_passive_states_are_not_used_in_delays_calculation(storage, handler): +async def test_passive_states_are_not_used_in_delays_calculation(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'delayed': TS1_ISO}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) assert len(state) == 1 @@ -151,7 +151,7 @@ def test_passive_states_are_not_used_in_delays_calculation(storage, handler): @freezegun.freeze_time(TS0) -def test_active_states_are_used_in_delays_calculation(storage, handler): +async def test_active_states_are_used_in_delays_calculation(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'delayed': TS1_ISO}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) @@ -164,7 +164,7 @@ def test_active_states_are_used_in_delays_calculation(storage, handler): # -def test_created_from_purposeless_storage(storage, handler): +async def test_created_from_purposeless_storage(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'purpose': None}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) assert len(state) == 1 @@ -173,7 +173,7 @@ def test_created_from_purposeless_storage(storage, handler): @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_created_from_purposeful_storage(storage, handler, reason): +async def test_created_from_purposeful_storage(storage, handler, reason): body = {'status': {'kopf': {'progress': {'some-id': {'purpose': reason.value}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) assert len(state) == 1 @@ -182,7 +182,7 @@ def test_created_from_purposeful_storage(storage, handler, reason): @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_enriched_with_handlers_keeps_the_original_purpose(handler, reason): +async def test_enriched_with_handlers_keeps_the_original_purpose(handler, reason): state = State.from_scratch() state = state.with_purpose(reason) state = state.with_handlers([handler]) @@ -190,7 +190,7 @@ def test_enriched_with_handlers_keeps_the_original_purpose(handler, reason): @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_enriched_with_outcomes_keeps_the_original_purpose(reason): +async def test_enriched_with_outcomes_keeps_the_original_purpose(reason): state = State.from_scratch() state = state.with_purpose(reason) state = state.with_outcomes({}) @@ -198,7 +198,7 @@ def test_enriched_with_outcomes_keeps_the_original_purpose(reason): @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_repurposed_before_handlers(handler, reason): +async def test_repurposed_before_handlers(handler, reason): state = State.from_scratch() state = state.with_purpose(reason).with_handlers([handler]) assert len(state) == 1 @@ -207,7 +207,7 @@ def test_repurposed_before_handlers(handler, reason): @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_repurposed_after_handlers(handler, reason): +async def test_repurposed_after_handlers(handler, reason): state = State.from_scratch() state = state.with_handlers([handler]).with_purpose(reason) assert len(state) == 1 @@ -216,7 +216,7 @@ def test_repurposed_after_handlers(handler, reason): @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_repurposed_with_handlers(handler, reason): +async def test_repurposed_with_handlers(handler, reason): state = State.from_scratch() state = state.with_handlers([handler]).with_purpose(reason, handlers=[handler]) assert len(state) == 1 @@ -225,7 +225,7 @@ def test_repurposed_with_handlers(handler, reason): @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_repurposed_not_affecting_the_existing_handlers_from_scratch(handler, reason): +async def test_repurposed_not_affecting_the_existing_handlers_from_scratch(handler, reason): state = State.from_scratch() state = state.with_handlers([handler]).with_purpose(reason).with_handlers([handler]) assert len(state) == 1 @@ -234,7 +234,7 @@ def test_repurposed_not_affecting_the_existing_handlers_from_scratch(handler, re @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_repurposed_not_affecting_the_existing_handlers_from_storage(storage, handler, reason): +async def test_repurposed_not_affecting_the_existing_handlers_from_storage(storage, handler, reason): body = {'status': {'kopf': {'progress': {'some-id': {'purpose': None}}}}} state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]).with_purpose(reason).with_handlers([handler]) @@ -258,7 +258,7 @@ def test_repurposed_not_affecting_the_existing_handlers_from_storage(storage, ha # All combinations except for same-to-same (it is not an "extra" then). (a, b) for a in HANDLER_REASONS for b in HANDLER_REASONS if a != b ]) -def test_with_handlers_irrelevant_to_the_purpose( +async def test_with_handlers_irrelevant_to_the_purpose( storage, handler, body, expected_extras, stored_reason, processed_reason): body['status']['kopf']['progress']['some-id']['purpose'] = stored_reason.value state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) @@ -277,7 +277,7 @@ def test_with_handlers_irrelevant_to_the_purpose( (StateCounters(1, 0, 0), True, [], {'status': {'kopf': {'progress': {'some-id': {'success': True}}}}}), ]) @pytest.mark.parametrize('reason', HANDLER_REASONS) -def test_with_handlers_relevant_to_the_purpose( +async def test_with_handlers_relevant_to_the_purpose( storage, handler, body, expected_counts, expected_done, expected_delays, reason): body['status']['kopf']['progress']['some-id']['purpose'] = reason.value state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) @@ -296,7 +296,7 @@ def test_with_handlers_relevant_to_the_purpose( ]) @pytest.mark.parametrize('reason', HANDLER_REASONS) @freezegun.freeze_time(TS0) -def test_with_handlers_relevant_to_the_purpose_and_delayed( +async def test_with_handlers_relevant_to_the_purpose_and_delayed( storage, handler, body, expected_counts, expected_done, expected_delays, reason): body['status']['kopf']['progress']['some-id']['delayed'] = TS1_ISO body['status']['kopf']['progress']['some-id']['purpose'] = reason.value @@ -311,7 +311,7 @@ def test_with_handlers_relevant_to_the_purpose_and_delayed( @pytest.mark.parametrize('reason', [Reason.CREATE, Reason.UPDATE, Reason.RESUME]) @freezegun.freeze_time(TS0) -def test_issue_601_deletion_supersedes_other_processing(storage, reason): +async def test_issue_601_deletion_supersedes_other_processing(storage, reason): body = {'status': {'kopf': {'progress': { 'fn1': {'purpose': reason.value, 'failure': True}, @@ -349,7 +349,7 @@ def test_issue_601_deletion_supersedes_other_processing(storage, reason): @freezegun.freeze_time(TS0) -def test_started_from_scratch(storage, handler): +async def test_started_from_scratch(storage, handler): patch = Patch() state = State.from_scratch() state = state.with_handlers([handler]) @@ -365,7 +365,7 @@ def test_started_from_scratch(storage, handler): (TSA_ISO, {'status': {'kopf': {'progress': {'some-id': {'started': TSA_ISO}}}}}), ]) @freezegun.freeze_time(TS0) -def test_started_from_storage(storage, handler, body, expected): +async def test_started_from_storage(storage, handler, body, expected): patch = Patch() state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state.store(body=Body({}), patch=patch, storage=storage) @@ -379,7 +379,7 @@ def test_started_from_storage(storage, handler, body, expected): (TSB_ISO, {'status': {'kopf': {'progress': {'some-id': {'started': TSB_ISO}}}}}), (TSA_ISO, {'status': {'kopf': {'progress': {'some-id': {'started': TSA_ISO}}}}}), ]) -def test_started_from_storage_is_preferred_over_from_scratch(storage, handler, body, expected): +async def test_started_from_storage_is_preferred_over_from_scratch(storage, handler, body, expected): with freezegun.freeze_time(TS0): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) with freezegun.freeze_time(TS1): @@ -401,7 +401,7 @@ def test_started_from_storage_is_preferred_over_from_scratch(storage, handler, b (TS0 - TSA, {'status': {'kopf': {'progress': {'some-id': {'started': TSA_ISO}}}}}), ]) @freezegun.freeze_time(TS0) -def test_runtime(storage, handler, expected, body): +async def test_runtime(storage, handler, expected, body): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) result = state[handler.id].runtime @@ -421,7 +421,7 @@ def test_runtime(storage, handler, expected, body): (True , {'status': {'kopf': {'progress': {'some-id': {'success': True}}}}}), (True , {'status': {'kopf': {'progress': {'some-id': {'failure': True}}}}}), ]) -def test_finished_flag(storage, handler, expected, body): +async def test_finished_flag(storage, handler, expected, body): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) result = state[handler.id].finished @@ -456,7 +456,7 @@ def test_finished_flag(storage, handler, expected, body): (True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO, 'failure': None}}}}}), ]) @freezegun.freeze_time(TS0) -def test_sleeping_flag(storage, handler, expected, body): +async def test_sleeping_flag(storage, handler, expected, body): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) result = state[handler.id].sleeping @@ -491,7 +491,7 @@ def test_sleeping_flag(storage, handler, expected, body): (False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO, 'failure': None}}}}}), ]) @freezegun.freeze_time(TS0) -def test_awakened_flag(storage, handler, expected, body): +async def test_awakened_flag(storage, handler, expected, body): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) result = state[handler.id].awakened @@ -507,7 +507,7 @@ def test_awakened_flag(storage, handler, expected, body): (None, {'status': {'kopf': {'progress': {'some-id': {'delayed': None}}}}}), (TS0, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO}}}}}), ]) -def test_awakening_time(storage, handler, expected, body): +async def test_awakening_time(storage, handler, expected, body): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) result = state[handler.id].delayed @@ -522,7 +522,7 @@ def test_awakening_time(storage, handler, expected, body): (0, {'status': {'kopf': {'progress': {'some-id': {'retries': None}}}}}), (6, {'status': {'kopf': {'progress': {'some-id': {'retries': 6}}}}}), ]) -def test_get_retry_count(storage, handler, expected, body): +async def test_get_retry_count(storage, handler, expected, body): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) result = state[handler.id].retries @@ -535,7 +535,7 @@ def test_get_retry_count(storage, handler, expected, body): ({}, 1, TS1_ISO), ]) @freezegun.freeze_time(TS0) -def test_set_awake_time(storage, handler, expected, body, delay): +async def test_set_awake_time(storage, handler, expected, body, delay): patch = Patch() state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) @@ -558,7 +558,7 @@ def test_set_awake_time(storage, handler, expected, body, delay): (6, TS1_ISO, 1, {'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}}), ]) @freezegun.freeze_time(TS0) -def test_set_retry_time(storage, handler, expected_retries, expected_delayed, body, delay): +async def test_set_retry_time(storage, handler, expected_retries, expected_delayed, body, delay): patch = Patch() state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) @@ -573,7 +573,7 @@ def test_set_retry_time(storage, handler, expected_retries, expected_delayed, bo # -def test_subrefs_added_to_empty_state(storage, handler): +async def test_subrefs_added_to_empty_state(storage, handler): body = {} patch = Patch() outcome_subrefs = ['sub2/b', 'sub2/a', 'sub2', 'sub1', 'sub3'] @@ -586,7 +586,7 @@ def test_subrefs_added_to_empty_state(storage, handler): assert patch['status']['kopf']['progress']['some-id']['subrefs'] == expected_subrefs -def test_subrefs_added_to_preexisting_subrefs(storage, handler): +async def test_subrefs_added_to_preexisting_subrefs(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'subrefs': ['sub9/2', 'sub9/1']}}}}} patch = Patch() outcome_subrefs = ['sub2/b', 'sub2/a', 'sub2', 'sub1', 'sub3'] @@ -599,7 +599,7 @@ def test_subrefs_added_to_preexisting_subrefs(storage, handler): assert patch['status']['kopf']['progress']['some-id']['subrefs'] == expected_subrefs -def test_subrefs_ignored_when_not_specified(storage, handler): +async def test_subrefs_ignored_when_not_specified(storage, handler): body = {} patch = Patch() outcome = Outcome(final=True, subrefs=[]) @@ -620,7 +620,7 @@ def test_subrefs_ignored_when_not_specified(storage, handler): (6, TS0_ISO, {'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}}), ]) @freezegun.freeze_time(TS0) -def test_store_failure(storage, handler, expected_retries, expected_stopped, body): +async def test_store_failure(storage, handler, expected_retries, expected_stopped, body): error = Exception('some-error') patch = Patch() state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) @@ -639,7 +639,7 @@ def test_store_failure(storage, handler, expected_retries, expected_stopped, bod (6, TS0_ISO, {'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}}), ]) @freezegun.freeze_time(TS0) -def test_store_success(storage, handler, expected_retries, expected_stopped, body): +async def test_store_success(storage, handler, expected_retries, expected_stopped, body): patch = Patch() state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) @@ -657,7 +657,7 @@ def test_store_success(storage, handler, expected_retries, expected_stopped, bod ('string', {'status': {'some-id': 'string'}}), ({'field': 'value'}, {'status': {'some-id': {'field': 'value'}}}), ]) -def test_store_result(handler, expected_patch, result): +async def test_store_result(handler, expected_patch, result): patch = Patch() outcomes = {handler.id: Outcome(final=True, result=result)} deliver_results(outcomes=outcomes, patch=patch) @@ -669,7 +669,7 @@ def test_store_result(handler, expected_patch, result): # -def test_purge_progress_when_exists_in_body(storage, handler): +async def test_purge_progress_when_exists_in_body(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}} patch = Patch() state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) @@ -677,7 +677,7 @@ def test_purge_progress_when_exists_in_body(storage, handler): assert patch == {'status': {'kopf': {'progress': {'some-id': None}}}} -def test_purge_progress_when_already_empty_in_body_and_patch(storage, handler): +async def test_purge_progress_when_already_empty_in_body_and_patch(storage, handler): body = {} patch = Patch() state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) @@ -685,7 +685,7 @@ def test_purge_progress_when_already_empty_in_body_and_patch(storage, handler): assert not patch -def test_purge_progress_when_already_empty_in_body_but_not_in_patch(storage, handler): +async def test_purge_progress_when_already_empty_in_body_but_not_in_patch(storage, handler): body = {} patch = Patch({'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}}) state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) @@ -693,7 +693,7 @@ def test_purge_progress_when_already_empty_in_body_but_not_in_patch(storage, han assert not patch -def test_purge_progress_when_known_at_restoration_only(storage, handler): +async def test_purge_progress_when_known_at_restoration_only(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}} patch = Patch() state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) @@ -701,7 +701,7 @@ def test_purge_progress_when_known_at_restoration_only(storage, handler): assert patch == {'status': {'kopf': {'progress': {'some-id': None}}}} -def test_purge_progress_when_known_at_purge_only(storage, handler): +async def test_purge_progress_when_known_at_purge_only(storage, handler): body = {'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}} patch = Patch() state = State.from_storage(body=Body(body), handlers=[], storage=storage) @@ -709,7 +709,7 @@ def test_purge_progress_when_known_at_purge_only(storage, handler): assert patch == {'status': {'kopf': {'progress': {'some-id': None}}}} -def test_purge_progress_cascades_to_subrefs(storage, handler): +async def test_purge_progress_cascades_to_subrefs(storage, handler): body = {'status': {'kopf': {'progress': { 'some-id': {'subrefs': ['sub1', 'sub2', 'sub3']}, 'sub1': {}, @@ -727,7 +727,7 @@ def test_purge_progress_cascades_to_subrefs(storage, handler): }}}} -def test_original_body_is_not_modified_by_storing(storage, handler): +async def test_original_body_is_not_modified_by_storing(storage, handler): body = Body({}) patch = Patch() state = State.from_storage(body=body, handlers=[handler], storage=storage) From 58eaa6923eedc18e041ffe0bf5467dff949622c6 Mon Sep 17 00:00:00 2001 From: Sergey Vasilyev Date: Sat, 25 Dec 2021 12:49:08 +0100 Subject: [PATCH 10/11] Refactor time-based tests to use fake and sharp loop time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Utilise a new library — https://github.com/nolar/looptime — to have a fake time in event loops. The library does a lot of things, but the most important one is the sharp time with predictable steps with **no code overhead included**, which is typically random. Everything else goes as a side-effect: e.g. the fast execution of such tests with zero real-time for any fake duration of loop time, convenient `looptime` fixture for assertions, etc. Signed-off-by: Sergey Vasilyev --- pytest.ini | 1 + requirements.txt | 1 + tests/apis/test_api_requests.py | 63 ++++++----- tests/conftest.py | 71 ++---------- .../daemons/test_daemon_rematching.py | 7 +- .../daemons/test_daemon_termination.py | 21 ++-- .../indexing/test_blocking_until_indexed.py | 72 ++++++------- .../handling/subhandling/test_subhandling.py | 8 +- tests/handling/test_activity_triggering.py | 32 ++---- tests/handling/test_cause_handling.py | 7 -- tests/handling/test_delays.py | 6 +- tests/handling/test_error_handling.py | 12 +-- tests/handling/test_multistep.py | 8 +- tests/handling/test_no_handlers.py | 2 - tests/handling/test_retrying_limits.py | 8 +- tests/k8s/test_watching_with_freezes.py | 45 ++++---- .../test_processing_of_namespaces.py | 28 ++--- .../test_processing_of_resources.py | 63 +++++------ tests/peering/test_freeze_mode.py | 68 ++++++------ tests/persistence/test_states.py | 21 +++- tests/posting/test_threadsafety.py | 69 ++++++------ tests/primitives/test_conditions.py | 16 +-- tests/primitives/test_containers.py | 59 +++++----- tests/primitives/test_flags.py | 26 ++--- tests/primitives/test_toggles.py | 30 +++--- tests/primitives/test_togglesets.py | 28 ++--- tests/reactor/test_queueing.py | 55 +++++----- tests/references/test_backbone.py | 21 ++-- tests/test_async.py | 14 +-- tests/timing/test_sleeping.py | 60 +++++------ tests/timing/test_throttling.py | 23 ++-- tests/utilities/aiotasks/test_scheduler.py | 102 ++++++++---------- .../utilities/aiotasks/test_task_selection.py | 2 +- .../utilities/aiotasks/test_task_stopping.py | 37 ++++--- tests/utilities/aiotasks/test_task_waiting.py | 8 +- 35 files changed, 496 insertions(+), 598 deletions(-) diff --git a/pytest.ini b/pytest.ini index da506c41..66de6a28 100644 --- a/pytest.ini +++ b/pytest.ini @@ -4,3 +4,4 @@ mock_use_standalone_module = true asyncio_mode = auto addopts = --strict-markers + --looptime diff --git a/requirements.txt b/requirements.txt index b97f81ba..9e8cc225 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,7 @@ coveralls freezegun import-linter isort +looptime lxml # Generally, `unittest.mock` is enough, but it lacks `AsyncMock` for Py 3.7. # TODO: Once 3.7 is removed (Jun 2023), roll back to unittest.mock. diff --git a/tests/apis/test_api_requests.py b/tests/apis/test_api_requests.py index 2c000da0..20bfc46b 100644 --- a/tests/apis/test_api_requests.py +++ b/tests/apis/test_api_requests.py @@ -138,23 +138,23 @@ async def test_parsing_in_streams( (delete, 'delete'), ]) async def test_direct_timeout_in_requests( - resp_mocker, aresponses, hostname, fn, method, settings, logger, timer): + resp_mocker, aresponses, hostname, fn, method, settings, logger, looptime): async def serve_slowly(): - await asyncio.sleep(1.0) + await asyncio.sleep(10) return aiohttp.web.json_response({}) mock = resp_mocker(side_effect=serve_slowly) aresponses.add(hostname, '/url', method, mock) - with timer, pytest.raises(asyncio.TimeoutError): - timeout = aiohttp.ClientTimeout(total=0.1) + with pytest.raises(asyncio.TimeoutError): + timeout = aiohttp.ClientTimeout(total=1.23) # aiohttp raises an asyncio.TimeoutError which is automatically retried. # To reduce the test duration we disable retries for this test. settings.networking.error_backoffs = None await fn('/url', timeout=timeout, settings=settings, logger=logger) - assert 0.1 < timer.seconds < 0.2 + assert looptime == 1.23 @pytest.mark.parametrize('fn, method', [ @@ -164,85 +164,86 @@ async def serve_slowly(): (delete, 'delete'), ]) async def test_settings_timeout_in_requests( - resp_mocker, aresponses, hostname, fn, method, settings, logger, timer): + resp_mocker, aresponses, hostname, fn, method, settings, logger, looptime): async def serve_slowly(): - await asyncio.sleep(1.0) + await asyncio.sleep(10) return aiohttp.web.json_response({}) mock = resp_mocker(side_effect=serve_slowly) aresponses.add(hostname, '/url', method, mock) - with timer, pytest.raises(asyncio.TimeoutError): - settings.networking.request_timeout = 0.1 + with pytest.raises(asyncio.TimeoutError): + settings.networking.request_timeout = 1.23 # aiohttp raises an asyncio.TimeoutError which is automatically retried. # To reduce the test duration we disable retries for this test. settings.networking.error_backoffs = None await fn('/url', settings=settings, logger=logger) - assert 0.1 < timer.seconds < 0.2 + assert looptime == 1.23 @pytest.mark.parametrize('method', ['get']) # the only supported method at the moment async def test_direct_timeout_in_streams( - resp_mocker, aresponses, hostname, method, settings, logger, timer): + resp_mocker, aresponses, hostname, method, settings, logger, looptime): async def serve_slowly(): - await asyncio.sleep(1.0) + await asyncio.sleep(10) return "{}" mock = resp_mocker(side_effect=serve_slowly) aresponses.add(hostname, '/url', method, mock) - with timer, pytest.raises(asyncio.TimeoutError): - timeout = aiohttp.ClientTimeout(total=0.1) + with pytest.raises(asyncio.TimeoutError): + timeout = aiohttp.ClientTimeout(total=1.23) # aiohttp raises an asyncio.TimeoutError which is automatically retried. # To reduce the test duration we disable retries for this test. settings.networking.error_backoffs = None async for _ in stream('/url', timeout=timeout, settings=settings, logger=logger): pass - assert 0.1 < timer.seconds < 0.2 + assert looptime == 1.23 @pytest.mark.parametrize('method', ['get']) # the only supported method at the moment async def test_settings_timeout_in_streams( - resp_mocker, aresponses, hostname, method, settings, logger, timer): + resp_mocker, aresponses, hostname, method, settings, logger, looptime): async def serve_slowly(): - await asyncio.sleep(1.0) + await asyncio.sleep(10) return "{}" mock = resp_mocker(side_effect=serve_slowly) aresponses.add(hostname, '/url', method, mock) - with timer, pytest.raises(asyncio.TimeoutError): - settings.networking.request_timeout = 0.1 + with pytest.raises(asyncio.TimeoutError): + settings.networking.request_timeout = 1.23 # aiohttp raises an asyncio.TimeoutError which is automatically retried. # To reduce the test duration we disable retries for this test. settings.networking.error_backoffs = None async for _ in stream('/url', settings=settings, logger=logger): pass - assert 0.1 < timer.seconds < 0.2 + assert looptime == 1.23 -@pytest.mark.parametrize('delay, expected', [ - pytest.param(0.0, [], id='instant-none'), - pytest.param(0.1, [{'fake': 'result1'}], id='fast-single'), - pytest.param(9.9, [{'fake': 'result1'}, {'fake': 'result2'}], id='inf-double'), +@pytest.mark.parametrize('delay, expected_times, expected_items', [ + pytest.param(0, [], [], id='instant-none'), + pytest.param(2, [1], [{'fake': 'result1'}], id='fast-single'), + pytest.param(9, [1, 4], [{'fake': 'result1'}, {'fake': 'result2'}], id='inf-double'), ]) @pytest.mark.parametrize('method', ['get']) # the only supported method at the moment async def test_stopper_in_streams( - resp_mocker, aresponses, hostname, method, delay, expected, settings, logger): + resp_mocker, aresponses, hostname, method, delay, settings, logger, looptime, + expected_items, expected_times): async def stream_slowly(request: aiohttp.web.Request) -> aiohttp.web.StreamResponse: response = aiohttp.web.StreamResponse() await response.prepare(request) try: - await asyncio.sleep(0.05) + await asyncio.sleep(1) await response.write(b'{"fake": "result1"}\n') - await asyncio.sleep(0.15) + await asyncio.sleep(3) await response.write(b'{"fake": "result2"}\n') await response.write_eof() except ConnectionError: @@ -255,9 +256,13 @@ async def stream_slowly(request: aiohttp.web.Request) -> aiohttp.web.StreamRespo asyncio.get_running_loop().call_later(delay, stopper.set_result, None) items = [] + times = [] async for item in stream('/url', stopper=stopper, settings=settings, logger=logger): items.append(item) + times.append(float(looptime)) - assert items == expected + assert items == expected_items + assert times == expected_times - await asyncio.sleep(0.2) # give the response some time to be cancelled and its tasks closed + # Give the response some time to be cancelled and its tasks closed. That is aiohttp's issue. + await asyncio.sleep(30) diff --git a/tests/conftest.py b/tests/conftest.py index 1abe5d91..66056903 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -199,7 +199,6 @@ class K8sMocks: patch: Mock delete: Mock stream: Mock - sleep: Mock @pytest.fixture() @@ -218,7 +217,6 @@ async def itr(*_, **__): patch=mocker.patch('kopf._cogs.clients.api.patch', return_value={}), delete=mocker.patch('kopf._cogs.clients.api.delete', return_value={}), stream=mocker.patch('kopf._cogs.clients.api.stream', side_effect=itr), - sleep=mocker.patch('kopf._cogs.aiokits.aiotime.sleep', return_value=None), ) @@ -535,69 +533,6 @@ def no_certvalidator(): yield from _with_module_absent('certvalidator') -# -# Helpers for the timing checks. -# - -@pytest.fixture() -def timer(): - return Timer() - - -class Timer: - """ - A helper context manager to measure the time of the code-blocks. - Also, supports direct comparison with time-deltas and the numbers of seconds. - - Usage: - - with Timer() as timer: - do_something() - print(f"Executing for {timer.seconds}s already.") - do_something_else() - - print(f"Executed in {timer.seconds}s.") - assert timer < 5.0 - """ - - def __init__(self): - super().__init__() - self._ts = None - self._te = None - - @property - def seconds(self): - if self._ts is None: - return None - elif self._te is None: - return time.perf_counter() - self._ts - else: - return self._te - self._ts - - def __repr__(self): - status = 'new' if self._ts is None else 'running' if self._te is None else 'finished' - return f'' - - def __enter__(self): - self._ts = time.perf_counter() - self._te = None - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self._te = time.perf_counter() - - async def __aenter__(self): - return self.__enter__() - - async def __aexit__(self, exc_type, exc_val, exc_tb): - return self.__exit__(exc_type, exc_val, exc_tb) - - def __int__(self): - return int(self.seconds) - - def __float__(self): - return float(self.seconds) - # # Helpers for the logging checks. # @@ -721,3 +656,9 @@ def _get_all_tasks() -> Set[asyncio.Task]: else: break return {t for t in tasks if not t.done()} + + +@pytest.fixture() +def loop(event_loop): + """Sync aiohttp's server-side timeline with kopf's client-side timeline.""" + return event_loop diff --git a/tests/handling/daemons/test_daemon_rematching.py b/tests/handling/daemons/test_daemon_rematching.py index 3190c032..4442f743 100644 --- a/tests/handling/daemons/test_daemon_rematching.py +++ b/tests/handling/daemons/test_daemon_rematching.py @@ -5,7 +5,7 @@ async def test_running_daemon_is_stopped_when_mismatches( - resource, dummy, timer, mocker, caplog, assert_logs, k8s_mocked, simulate_cycle): + resource, dummy, looptime, mocker, caplog, assert_logs, k8s_mocked, simulate_cycle): caplog.set_level(logging.DEBUG) @kopf.daemon(*resource, id='fn', when=lambda **_: is_matching) @@ -26,9 +26,8 @@ async def fn(**kwargs): mocker.resetall() is_matching = False await simulate_cycle({}) - with timer: - await dummy.wait_for_daemon_done() + await dummy.wait_for_daemon_done() - assert timer.seconds < 0.01 # near-instantly + assert looptime == 0 stopped = dummy.kwargs['stopped'] assert DaemonStoppingReason.FILTERS_MISMATCH in stopped.reason diff --git a/tests/handling/daemons/test_daemon_termination.py b/tests/handling/daemons/test_daemon_termination.py index ac0f8f3f..3a0e97e1 100644 --- a/tests/handling/daemons/test_daemon_termination.py +++ b/tests/handling/daemons/test_daemon_termination.py @@ -9,7 +9,7 @@ async def test_daemon_exits_gracefully_and_instantly_on_resource_deletion( settings, resource, dummy, simulate_cycle, - caplog, assert_logs, k8s_mocked, frozen_time, mocker, timer): + caplog, assert_logs, k8s_mocked, frozen_time, mocker, looptime): caplog.set_level(logging.DEBUG) # A daemon-under-test. @@ -31,10 +31,9 @@ async def fn(**kwargs): await simulate_cycle(event_object) # Check that the daemon has exited near-instantly, with no delays. - with timer: - await dummy.wait_for_daemon_done() + await dummy.wait_for_daemon_done() - assert timer.seconds < 0.01 # near-instantly + assert looptime == 0 assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == [] @@ -42,7 +41,7 @@ async def fn(**kwargs): async def test_daemon_exits_gracefully_and_instantly_on_operator_exiting( settings, resource, dummy, simulate_cycle, background_daemon_killer, - caplog, assert_logs, k8s_mocked, frozen_time, mocker, timer): + caplog, assert_logs, k8s_mocked, frozen_time, mocker, looptime): caplog.set_level(logging.DEBUG) # A daemon-under-test. @@ -63,10 +62,9 @@ async def fn(**kwargs): background_daemon_killer.cancel() # Check that the daemon has exited near-instantly, with no delays. - with timer: - await dummy.wait_for_daemon_done() + await dummy.wait_for_daemon_done() - assert timer.seconds < 0.01 # near-instantly + assert looptime == 0 assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch.call_count == 0 @@ -78,7 +76,7 @@ async def fn(**kwargs): @pytest.mark.usefixtures('background_daemon_killer') async def test_daemon_exits_gracefully_and_instantly_on_operator_pausing( settings, memories, resource, dummy, simulate_cycle, conflicts_found, - caplog, assert_logs, k8s_mocked, frozen_time, mocker, timer): + caplog, assert_logs, k8s_mocked, frozen_time, mocker, looptime): caplog.set_level(logging.DEBUG) # A daemon-under-test. @@ -99,9 +97,8 @@ async def fn(**kwargs): await conflicts_found.turn_to(True) # Check that the daemon has exited near-instantly, with no delays. - with timer: - await dummy.wait_for_daemon_done() - assert timer.seconds < 0.01 # near-instantly + await dummy.wait_for_daemon_done() + assert looptime == 0 # There is no way to test for re-spawning here: it is done by watch-events, # which are tested by the paused operators elsewhere (test_daemon_spawning.py). diff --git a/tests/handling/indexing/test_blocking_until_indexed.py b/tests/handling/indexing/test_blocking_until_indexed.py index 8d8c58a3..f15fb5ec 100644 --- a/tests/handling/indexing/test_blocking_until_indexed.py +++ b/tests/handling/indexing/test_blocking_until_indexed.py @@ -16,26 +16,25 @@ @pytest.mark.parametrize('event_type', EVENT_TYPES_WHEN_EXISTS) async def test_reporting_on_resource_readiness( - resource, settings, registry, indexers, caplog, event_type, handlers, timer): + resource, settings, registry, indexers, caplog, event_type, handlers, looptime): caplog.set_level(logging.DEBUG) operator_indexed = ToggleSet(all) resource_indexed = await operator_indexed.make_toggle() - with timer: - await process_resource_event( - lifecycle=all_at_once, - registry=registry, - settings=settings, - resource=resource, - indexers=indexers, - memories=ResourceMemories(), - memobase=Memo(), - raw_event={'type': event_type, 'object': {}}, - event_queue=asyncio.Queue(), - operator_indexed=operator_indexed, - resource_indexed=resource_indexed, - ) - assert timer.seconds < 0.2 # asap, nowait + await process_resource_event( + lifecycle=all_at_once, + registry=registry, + settings=settings, + resource=resource, + indexers=indexers, + memories=ResourceMemories(), + memobase=Memo(), + raw_event={'type': event_type, 'object': {}}, + event_queue=asyncio.Queue(), + operator_indexed=operator_indexed, + resource_indexed=resource_indexed, + ) + assert looptime == 0 assert operator_indexed.is_on() assert set(operator_indexed) == set() # save RAM assert handlers.event_mock.called @@ -43,13 +42,13 @@ async def test_reporting_on_resource_readiness( @pytest.mark.parametrize('event_type', EVENT_TYPES_WHEN_EXISTS) async def test_blocking_when_operator_is_not_ready( - resource, settings, registry, indexers, caplog, event_type, handlers, timer): + resource, settings, registry, indexers, caplog, event_type, handlers, looptime): caplog.set_level(logging.DEBUG) operator_indexed = ToggleSet(all) resource_listed = await operator_indexed.make_toggle() resource_indexed = await operator_indexed.make_toggle() - with pytest.raises(asyncio.TimeoutError), timer: + with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(process_resource_event( lifecycle=all_at_once, registry=registry, @@ -62,8 +61,8 @@ async def test_blocking_when_operator_is_not_ready( event_queue=asyncio.Queue(), operator_indexed=operator_indexed, resource_indexed=resource_indexed, - ), timeout=0.2) - assert 0.2 < timer.seconds < 0.4 + ), timeout=1.23) + assert looptime == 1.23 assert operator_indexed.is_off() assert set(operator_indexed) == {resource_listed} assert not handlers.event_mock.called @@ -71,7 +70,7 @@ async def test_blocking_when_operator_is_not_ready( @pytest.mark.parametrize('event_type', EVENT_TYPES_WHEN_EXISTS) async def test_unblocking_once_operator_is_ready( - resource, settings, registry, indexers, caplog, event_type, handlers, timer): + resource, settings, registry, indexers, caplog, event_type, handlers, looptime): caplog.set_level(logging.DEBUG) async def delayed_readiness(delay: float): @@ -81,22 +80,21 @@ async def delayed_readiness(delay: float): operator_indexed = ToggleSet(all) resource_listed = await operator_indexed.make_toggle() resource_indexed = await operator_indexed.make_toggle() - with timer: - asyncio.create_task(delayed_readiness(0.2)) - await process_resource_event( - lifecycle=all_at_once, - registry=registry, - settings=settings, - resource=resource, - indexers=indexers, - memories=ResourceMemories(), - memobase=Memo(), - raw_event={'type': event_type, 'object': {}}, - event_queue=asyncio.Queue(), - operator_indexed=operator_indexed, - resource_indexed=resource_indexed, - ) - assert 0.2 < timer.seconds < 0.4 + asyncio.create_task(delayed_readiness(1.23)) + await process_resource_event( + lifecycle=all_at_once, + registry=registry, + settings=settings, + resource=resource, + indexers=indexers, + memories=ResourceMemories(), + memobase=Memo(), + raw_event={'type': event_type, 'object': {}}, + event_queue=asyncio.Queue(), + operator_indexed=operator_indexed, + resource_indexed=resource_indexed, + ) + assert looptime == 1.23 assert operator_indexed.is_on() assert set(operator_indexed) == {resource_listed} assert handlers.event_mock.called diff --git a/tests/handling/subhandling/test_subhandling.py b/tests/handling/subhandling/test_subhandling.py index 9b2832d2..63059f23 100644 --- a/tests/handling/subhandling/test_subhandling.py +++ b/tests/handling/subhandling/test_subhandling.py @@ -16,7 +16,7 @@ @pytest.mark.parametrize('event_type', EVENT_TYPES_WHEN_EXISTS) async def test_1st_level(registry, settings, resource, cause_mock, event_type, - caplog, assert_logs, k8s_mocked): + caplog, assert_logs, k8s_mocked, looptime): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.CREATE @@ -57,7 +57,7 @@ async def sub1b(**_): assert sub1a_mock.call_count == 1 assert sub1b_mock.call_count == 1 - assert k8s_mocked.sleep.call_count == 0 + assert looptime == 0 assert k8s_mocked.patch.call_count == 1 assert not event_queue.empty() @@ -76,7 +76,7 @@ async def sub1b(**_): @pytest.mark.parametrize('event_type', EVENT_TYPES_WHEN_EXISTS) async def test_2nd_level(registry, settings, resource, cause_mock, event_type, - caplog, assert_logs, k8s_mocked): + caplog, assert_logs, k8s_mocked, looptime): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.CREATE @@ -137,7 +137,7 @@ def sub1b2b(**kwargs): assert sub1b2a_mock.call_count == 1 assert sub1b2b_mock.call_count == 1 - assert k8s_mocked.sleep.call_count == 0 + assert looptime == 0 assert k8s_mocked.patch.call_count == 1 assert not event_queue.empty() diff --git a/tests/handling/test_activity_triggering.py b/tests/handling/test_activity_triggering.py index 8feab694..6a82cf16 100644 --- a/tests/handling/test_activity_triggering.py +++ b/tests/handling/test_activity_triggering.py @@ -154,7 +154,7 @@ def sample_fn(**_): @pytest.mark.parametrize('activity', list(Activity)) -async def test_delays_are_simulated(settings, activity, mocker): +async def test_delays_are_simulated(settings, activity, looptime): def sample_fn(**_): raise TemporaryError('to be retried', delay=123) @@ -165,24 +165,14 @@ def sample_fn(**_): param=None, errors=None, timeout=None, retries=3, backoff=None, )) - with freezegun.freeze_time() as frozen: - - async def sleep_substitute(*_, **__): - frozen.tick(123) - - sleep = mocker.patch('kopf._cogs.aiokits.aiotime.sleep', wraps=sleep_substitute) - - with pytest.raises(ActivityError) as e: - await run_activity( - registry=registry, - settings=settings, - activity=activity, - lifecycle=all_at_once, - indices=OperatorIndexers().indices, - memo=Memo(), - ) + with pytest.raises(ActivityError) as e: + await run_activity( + registry=registry, + settings=settings, + activity=activity, + lifecycle=all_at_once, + indices=OperatorIndexers().indices, + memo=Memo(), + ) - assert sleep.call_count >= 3 # 3 retries, 1 sleep each - assert sleep.call_count <= 4 # 3 retries, 1 final success (delay=None), not more - if sleep.call_count > 3: - sleep.call_args_list[-1][0][0] is None + assert looptime == 123 * 2 diff --git a/tests/handling/test_cause_handling.py b/tests/handling/test_cause_handling.py index 5ccf3115..4e275d2e 100644 --- a/tests/handling/test_cause_handling.py +++ b/tests/handling/test_cause_handling.py @@ -39,7 +39,6 @@ async def test_create(registry, settings, handlers, resource, cause_mock, event_ assert not handlers.update_mock.called assert not handlers.delete_mock.called - assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch.call_count == 1 assert not event_queue.empty() @@ -80,7 +79,6 @@ async def test_update(registry, settings, handlers, resource, cause_mock, event_ assert handlers.update_mock.call_count == 1 assert not handlers.delete_mock.called - assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch.call_count == 1 assert not event_queue.empty() @@ -123,7 +121,6 @@ async def test_delete(registry, settings, handlers, resource, cause_mock, event_ assert not handlers.update_mock.called assert handlers.delete_mock.call_count == 1 - assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch.call_count == 1 assert not event_queue.empty() @@ -194,8 +191,6 @@ async def test_free(registry, settings, handlers, resource, cause_mock, event_ty assert not handlers.create_mock.called assert not handlers.update_mock.called assert not handlers.delete_mock.called - - assert not k8s_mocked.sleep.called assert not k8s_mocked.patch.called assert event_queue.empty() @@ -226,8 +221,6 @@ async def test_noop(registry, settings, handlers, resource, cause_mock, event_ty assert not handlers.create_mock.called assert not handlers.update_mock.called assert not handlers.delete_mock.called - - assert not k8s_mocked.sleep.called assert not k8s_mocked.patch.called assert event_queue.empty() diff --git a/tests/handling/test_delays.py b/tests/handling/test_delays.py index 0fc2a82e..c64ecb32 100644 --- a/tests/handling/test_delays.py +++ b/tests/handling/test_delays.py @@ -52,7 +52,6 @@ async def test_delayed_handlers_progress( assert handlers.delete_mock.call_count == (1 if cause_reason == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_reason == Reason.RESUME else 0) - assert not k8s_mocked.sleep.called assert k8s_mocked.patch.called fname = f'{cause_reason}_fn' @@ -72,7 +71,7 @@ async def test_delayed_handlers_progress( ], ids=['fast', 'slow']) async def test_delayed_handlers_sleep( registry, settings, handlers, resource, cause_mock, cause_reason, - caplog, assert_logs, k8s_mocked, now, delayed_iso, delay): + caplog, assert_logs, k8s_mocked, now, delayed_iso, delay, looptime): caplog.set_level(logging.DEBUG) basetime = datetime.datetime.utcnow() # any "future" time works and affects nothing as long as it is the same @@ -115,8 +114,7 @@ async def test_delayed_handlers_sleep( assert 'dummy' in k8s_mocked.patch.call_args_list[-1][1]['payload']['status']['kopf'] # The duration of sleep should be as expected. - assert k8s_mocked.sleep.called - assert k8s_mocked.sleep.call_args_list[0][0][0] == delay + assert looptime == delay assert_logs([ r"Sleeping for ([\d\.]+|[\d\.]+ \(capped [\d\.]+\)) seconds", diff --git a/tests/handling/test_error_handling.py b/tests/handling/test_error_handling.py index c987d18f..4383d7dd 100644 --- a/tests/handling/test_error_handling.py +++ b/tests/handling/test_error_handling.py @@ -16,7 +16,7 @@ @pytest.mark.parametrize('cause_type', HANDLER_REASONS) async def test_fatal_error_stops_handler( registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, - caplog, assert_logs, k8s_mocked): + caplog, assert_logs, k8s_mocked, looptime): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' @@ -44,7 +44,7 @@ async def test_fatal_error_stops_handler( assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) - assert not k8s_mocked.sleep.called + assert looptime == 0 assert k8s_mocked.patch.called patch = k8s_mocked.patch.call_args_list[0][1]['payload'] @@ -61,7 +61,7 @@ async def test_fatal_error_stops_handler( @pytest.mark.parametrize('cause_type', HANDLER_REASONS) async def test_retry_error_delays_handler( registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, - caplog, assert_logs, k8s_mocked): + caplog, assert_logs, k8s_mocked, looptime): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' @@ -89,7 +89,7 @@ async def test_retry_error_delays_handler( assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) - assert not k8s_mocked.sleep.called + assert looptime == 0 assert k8s_mocked.patch.called patch = k8s_mocked.patch.call_args_list[0][1]['payload'] @@ -107,7 +107,7 @@ async def test_retry_error_delays_handler( @pytest.mark.parametrize('cause_type', HANDLER_REASONS) async def test_arbitrary_error_delays_handler( registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, - caplog, assert_logs, k8s_mocked): + caplog, assert_logs, k8s_mocked, looptime): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' @@ -135,7 +135,7 @@ async def test_arbitrary_error_delays_handler( assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) - assert not k8s_mocked.sleep.called + assert looptime == 0 assert k8s_mocked.patch.called patch = k8s_mocked.patch.call_args_list[0][1]['payload'] diff --git a/tests/handling/test_multistep.py b/tests/handling/test_multistep.py index 93403ac1..a0340a40 100644 --- a/tests/handling/test_multistep.py +++ b/tests/handling/test_multistep.py @@ -18,7 +18,7 @@ @pytest.mark.parametrize('cause_type', HANDLER_REASONS) async def test_1st_step_stores_progress_by_patching( registry, settings, handlers, extrahandlers, - resource, cause_mock, cause_type, k8s_mocked, deletion_ts): + resource, cause_mock, cause_type, k8s_mocked, looptime, deletion_ts): name1 = f'{cause_type}_fn' name2 = f'{cause_type}_fn2' @@ -46,7 +46,7 @@ async def test_1st_step_stores_progress_by_patching( assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) - assert not k8s_mocked.sleep.called + assert looptime == 0 assert k8s_mocked.patch.called patch = k8s_mocked.patch.call_args_list[0][1]['payload'] @@ -74,7 +74,7 @@ async def test_1st_step_stores_progress_by_patching( @pytest.mark.parametrize('cause_type', HANDLER_REASONS) async def test_2nd_step_finishes_the_handlers(caplog, registry, settings, handlers, extrahandlers, - resource, cause_mock, cause_type, k8s_mocked, deletion_ts): + resource, cause_mock, cause_type, k8s_mocked, looptime, deletion_ts): name1 = f'{cause_type}_fn' name2 = f'{cause_type}_fn2' @@ -106,7 +106,7 @@ async def test_2nd_step_finishes_the_handlers(caplog, assert extrahandlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert extrahandlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) - assert not k8s_mocked.sleep.called + assert looptime == 0 assert k8s_mocked.patch.called patch = k8s_mocked.patch.call_args_list[0][1]['payload'] diff --git a/tests/handling/test_no_handlers.py b/tests/handling/test_no_handlers.py index c62d9dad..50c12f31 100644 --- a/tests/handling/test_no_handlers.py +++ b/tests/handling/test_no_handlers.py @@ -46,7 +46,6 @@ async def test_skipped_with_no_handlers( event_queue=asyncio.Queue(), ) - assert not k8s_mocked.sleep.called assert k8s_mocked.patch.called # The patch must contain ONLY the last-seen update, and nothing else. @@ -102,6 +101,5 @@ async def test_stealth_mode_with_mismatching_handlers( event_queue=asyncio.Queue(), ) - assert not k8s_mocked.sleep.called assert not k8s_mocked.patch.called assert not caplog.messages # total stealth mode! diff --git a/tests/handling/test_retrying_limits.py b/tests/handling/test_retrying_limits.py index 6bc4e7bb..9893fd8f 100644 --- a/tests/handling/test_retrying_limits.py +++ b/tests/handling/test_retrying_limits.py @@ -20,7 +20,7 @@ ], ids=['slow']) async def test_timed_out_handler_fails( registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, - caplog, assert_logs, k8s_mocked, now, ts): + caplog, assert_logs, k8s_mocked, looptime, now, ts): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' @@ -54,7 +54,7 @@ async def test_timed_out_handler_fails( assert not handlers.resume_mock.called # Progress is reset, as the handler is not going to retry. - assert not k8s_mocked.sleep.called + assert looptime == 0 assert k8s_mocked.patch.called patch = k8s_mocked.patch.call_args_list[0][1]['payload'] @@ -71,7 +71,7 @@ async def test_timed_out_handler_fails( @pytest.mark.parametrize('cause_type', HANDLER_REASONS) async def test_retries_limited_handler_fails( registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, - caplog, assert_logs, k8s_mocked): + caplog, assert_logs, k8s_mocked, looptime): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' @@ -104,7 +104,7 @@ async def test_retries_limited_handler_fails( assert not handlers.resume_mock.called # Progress is reset, as the handler is not going to retry. - assert not k8s_mocked.sleep.called + assert looptime == 0 assert k8s_mocked.patch.called patch = k8s_mocked.patch.call_args_list[0][1]['payload'] diff --git a/tests/k8s/test_watching_with_freezes.py b/tests/k8s/test_watching_with_freezes.py index c1512da8..efecbbc1 100644 --- a/tests/k8s/test_watching_with_freezes.py +++ b/tests/k8s/test_watching_with_freezes.py @@ -8,21 +8,20 @@ async def test_pausing_is_ignored_if_turned_off( - resource, namespace, timer, caplog, assert_logs): + resource, namespace, looptime, caplog, assert_logs): caplog.set_level(logging.DEBUG) operator_paused = ToggleSet(any) await operator_paused.make_toggle(False) - with timer: - async with streaming_block( - resource=resource, - namespace=namespace, - operator_paused=operator_paused, - ): - pass + async with streaming_block( + resource=resource, + namespace=namespace, + operator_paused=operator_paused, + ): + pass - assert timer.seconds < 0.2 # no waits, exits as soon as possible + assert looptime == 0 assert_logs([], prohibited=[ r"Pausing the watch-stream for", r"Resuming the watch-stream for", @@ -30,7 +29,7 @@ async def test_pausing_is_ignored_if_turned_off( async def test_pausing_waits_forever_if_not_resumed( - resource, namespace, timer, caplog, assert_logs): + resource, namespace, looptime, caplog, assert_logs): caplog.set_level(logging.DEBUG) operator_paused = ToggleSet(any) @@ -44,10 +43,10 @@ async def do_it(): ): pass - with pytest.raises(asyncio.TimeoutError), timer: - await asyncio.wait_for(do_it(), timeout=0.5) + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(do_it(), timeout=1.23) - assert timer.seconds >= 0.5 + assert looptime == 1.23 assert_logs([ r"Pausing the watch-stream for", ], prohibited=[ @@ -56,7 +55,7 @@ async def do_it(): async def test_pausing_waits_until_resumed( - resource, namespace, timer, caplog, assert_logs): + resource, namespace, looptime, caplog, assert_logs): caplog.set_level(logging.DEBUG) operator_paused = ToggleSet(any) @@ -66,17 +65,15 @@ async def delayed_resuming(delay: float): await asyncio.sleep(delay) await conflicts_found.turn_to(False) - with timer: - asyncio.create_task(delayed_resuming(0.2)) - async with streaming_block( - resource=resource, - namespace=namespace, - operator_paused=operator_paused, - ): - pass + asyncio.create_task(delayed_resuming(1.23)) + async with streaming_block( + resource=resource, + namespace=namespace, + operator_paused=operator_paused, + ): + pass - assert timer.seconds >= 0.2 - assert timer.seconds <= 0.5 + assert looptime == 1.23 assert_logs([ r"Pausing the watch-stream for", r"Resuming the watch-stream for", diff --git a/tests/observation/test_processing_of_namespaces.py b/tests/observation/test_processing_of_namespaces.py index a8c6ba4c..2fbe1a5f 100644 --- a/tests/observation/test_processing_of_namespaces.py +++ b/tests/observation/test_processing_of_namespaces.py @@ -7,7 +7,7 @@ from kopf._core.reactor.observation import process_discovered_namespace_event -async def test_initial_listing_is_ignored(): +async def test_initial_listing_is_ignored(looptime): insights = Insights() e1 = RawEvent(type=None, object=RawBody(metadata={'name': 'ns1'})) @@ -19,13 +19,15 @@ async def delayed_injection(delay: float): task = asyncio.create_task(delayed_injection(0)) with pytest.raises(asyncio.TimeoutError): async with insights.revised: - await asyncio.wait_for(insights.revised.wait(), timeout=0.1) + await asyncio.wait_for(insights.revised.wait(), timeout=1.23) await task + + assert looptime == 1.23 assert not insights.namespaces @pytest.mark.parametrize('etype', ['ADDED', 'MODIFIED']) -async def test_followups_for_addition(timer, etype): +async def test_followups_for_addition(looptime, etype): insights = Insights() e1 = RawEvent(type=etype, object=RawBody(metadata={'name': 'ns1'})) @@ -34,17 +36,16 @@ async def delayed_injection(delay: float): await process_discovered_namespace_event( insights=insights, raw_event=e1, namespaces=['ns*']) - task = asyncio.create_task(delayed_injection(0.1)) - with timer: - async with insights.revised: - await insights.revised.wait() + task = asyncio.create_task(delayed_injection(9)) + async with insights.revised: + await insights.revised.wait() await task - assert 0.1 < timer.seconds < 0.11 + assert looptime == 9 assert insights.namespaces == {'ns1'} @pytest.mark.parametrize('etype', ['DELETED']) -async def test_followups_for_deletion(timer, etype): +async def test_followups_for_deletion(looptime, etype): insights = Insights() insights.namespaces.add('ns1') e1 = RawEvent(type=etype, object=RawBody(metadata={'name': 'ns1'})) @@ -54,10 +55,9 @@ async def delayed_injection(delay: float): await process_discovered_namespace_event( insights=insights, raw_event=e1, namespaces=['ns*']) - task = asyncio.create_task(delayed_injection(0.1)) - with timer: - async with insights.revised: - await insights.revised.wait() + task = asyncio.create_task(delayed_injection(9)) + async with insights.revised: + await insights.revised.wait() await task - assert 0.1 < timer.seconds < 0.11 + assert looptime == 9 assert not insights.namespaces diff --git a/tests/observation/test_processing_of_resources.py b/tests/observation/test_processing_of_resources.py index 44b9df2d..e751fcab 100644 --- a/tests/observation/test_processing_of_resources.py +++ b/tests/observation/test_processing_of_resources.py @@ -112,7 +112,7 @@ def fn(**_): pass @pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) @pytest.mark.parametrize('etype', ['ADDED', 'MODIFIED']) async def test_nonwatchable_resources_are_ignored( - settings, registry, apis_mock, group1_mock, timer, etype, decorator, insights): + settings, registry, apis_mock, group1_mock, looptime, etype, decorator, insights): @decorator('group1', 'version1', 'plural1') def fn(**_): pass @@ -124,19 +124,18 @@ async def delayed_injection(delay: float): await process_discovered_resource_event( insights=insights, raw_event=e1, registry=registry, settings=settings) - task = asyncio.create_task(delayed_injection(0.1)) - with timer: - async with insights.revised: - await insights.revised.wait() + task = asyncio.create_task(delayed_injection(1.23)) + async with insights.revised: + await insights.revised.wait() await task - assert 0.1 < timer.seconds < 1.0 + assert looptime == 1.23 assert not insights.watched_resources assert apis_mock.called assert group1_mock.called async def test_initial_listing_is_ignored( - settings, registry, apis_mock, group1_mock, insights): + settings, registry, apis_mock, group1_mock, looptime, insights): e1 = RawEvent(type=None, object=RawBody(spec={'group': 'group1'})) @@ -148,8 +147,10 @@ async def delayed_injection(delay: float): task = asyncio.create_task(delayed_injection(0)) with pytest.raises(asyncio.TimeoutError): async with insights.revised: - await asyncio.wait_for(insights.revised.wait(), timeout=0.1) + await asyncio.wait_for(insights.revised.wait(), timeout=1.23) await task + + assert looptime == 1.23 assert not insights.indexed_resources assert not insights.watched_resources assert not insights.webhook_resources @@ -159,7 +160,7 @@ async def delayed_injection(delay: float): @pytest.mark.parametrize('etype', ['ADDED', 'MODIFIED']) async def test_followups_for_addition( - settings, registry, apis_mock, group1_mock, timer, etype, insights, insights_resources): + settings, registry, apis_mock, group1_mock, looptime, etype, insights, insights_resources): e1 = RawEvent(type=etype, object=RawBody(spec={'group': 'group1'})) r1 = Resource(group='group1', version='version1', plural='plural1') @@ -169,12 +170,12 @@ async def delayed_injection(delay: float): await process_discovered_resource_event( insights=insights, raw_event=e1, registry=registry, settings=settings) - task = asyncio.create_task(delayed_injection(0.1)) - with timer: - async with insights.revised: - await insights.revised.wait() + task = asyncio.create_task(delayed_injection(1.23)) + async with insights.revised: + await insights.revised.wait() await task - assert 0.1 < timer.seconds < 1.0 + + assert looptime == 1.23 assert insights_resources == {r1} assert apis_mock.called assert group1_mock.called @@ -182,7 +183,7 @@ async def delayed_injection(delay: float): @pytest.mark.parametrize('etype', ['ADDED', 'MODIFIED', 'DELETED']) async def test_followups_for_deletion_of_resource( - settings, registry, apis_mock, group1_empty_mock, timer, etype, + settings, registry, apis_mock, group1_empty_mock, looptime, etype, insights, insights_resources): e1 = RawEvent(type=etype, object=RawBody(spec={'group': 'group1'})) @@ -194,12 +195,12 @@ async def delayed_injection(delay: float): await process_discovered_resource_event( insights=insights, raw_event=e1, registry=registry, settings=settings) - task = asyncio.create_task(delayed_injection(0.1)) - with timer: - async with insights.revised: - await insights.revised.wait() + task = asyncio.create_task(delayed_injection(1.23)) + async with insights.revised: + await insights.revised.wait() await task - assert 0.1 < timer.seconds < 1.0 + + assert looptime == 1.23 assert not insights_resources assert apis_mock.called assert group1_empty_mock.called @@ -207,7 +208,7 @@ async def delayed_injection(delay: float): @pytest.mark.parametrize('etype', ['ADDED', 'MODIFIED', 'DELETED']) async def test_followups_for_deletion_of_group( - settings, registry, apis_mock, group1_404mock, timer, etype, insights, insights_resources): + settings, registry, apis_mock, group1_404mock, looptime, etype, insights, insights_resources): e1 = RawEvent(type=etype, object=RawBody(spec={'group': 'group1'})) r1 = Resource(group='group1', version='version1', plural='plural1') @@ -218,12 +219,12 @@ async def delayed_injection(delay: float): await process_discovered_resource_event( insights=insights, raw_event=e1, registry=registry, settings=settings) - task = asyncio.create_task(delayed_injection(0.1)) - with timer: - async with insights.revised: - await insights.revised.wait() + task = asyncio.create_task(delayed_injection(1.23)) + async with insights.revised: + await insights.revised.wait() await task - assert 0.1 < timer.seconds < 1.0 + + assert looptime == 1.23 assert not insights_resources assert apis_mock.called assert group1_404mock.called @@ -231,7 +232,7 @@ async def delayed_injection(delay: float): @pytest.mark.parametrize('etype', ['DELETED']) async def test_backbone_is_filled( - settings, registry, core_mock, corev1_mock, timer, etype, insights): + settings, registry, core_mock, corev1_mock, looptime, etype, insights): e1 = RawEvent(type=etype, object=RawBody(spec={'group': ''})) @@ -240,11 +241,11 @@ async def delayed_injection(delay: float): await process_discovered_resource_event( insights=insights, raw_event=e1, registry=registry, settings=settings) - task = asyncio.create_task(delayed_injection(0.1)) - with timer: - await insights.backbone.wait_for(NAMESPACES) + task = asyncio.create_task(delayed_injection(1.23)) + await insights.backbone.wait_for(NAMESPACES) await task - assert 0.1 < timer.seconds < 1.0 + + assert looptime == 1.23 assert NAMESPACES in insights.backbone assert core_mock.called assert corev1_mock.called diff --git a/tests/peering/test_freeze_mode.py b/tests/peering/test_freeze_mode.py index 3e42b59c..6428880c 100644 --- a/tests/peering/test_freeze_mode.py +++ b/tests/peering/test_freeze_mode.py @@ -1,3 +1,5 @@ +import asyncio + import freezegun import pytest @@ -7,7 +9,7 @@ async def test_other_peering_objects_are_ignored( - mocker, k8s_mocked, settings, + mocker, k8s_mocked, settings, looptime, peering_resource, peering_namespace): status = mocker.Mock() @@ -30,12 +32,12 @@ async def test_other_peering_objects_are_ignored( ) assert not status.items.called assert not k8s_mocked.patch.called - assert k8s_mocked.sleep.call_count == 0 + assert looptime == 0 @freezegun.freeze_time('2020-12-31T23:59:59.123456') async def test_toggled_on_for_higher_priority_peer_when_initially_off( - k8s_mocked, caplog, assert_logs, settings, + k8s_mocked, caplog, assert_logs, settings, looptime, event_loop, peering_resource, peering_namespace): event = bodies.RawEvent( @@ -54,13 +56,15 @@ async def test_toggled_on_for_higher_priority_peer_when_initially_off( settings.peering.priority = 100 conflicts_found = aiotoggles.Toggle(False) - k8s_mocked.sleep.return_value = 1 # as if interrupted by stream pressure + stream_pressure = asyncio.Event() + event_loop.call_later(1.23, stream_pressure.set) caplog.set_level(0) assert conflicts_found.is_off() await process_peering_event( raw_event=event, conflicts_found=conflicts_found, + stream_pressure=stream_pressure, autoclean=False, namespace=peering_namespace, resource=peering_resource, @@ -68,8 +72,7 @@ async def test_toggled_on_for_higher_priority_peer_when_initially_off( settings=settings, ) assert conflicts_found.is_on() - assert k8s_mocked.sleep.call_count == 1 - assert 9 < k8s_mocked.sleep.call_args[0][0][0] < 10 + assert looptime == 1.23 assert not k8s_mocked.patch.called assert_logs(["Pausing operations in favour of"], prohibited=[ "Possibly conflicting operators", @@ -80,7 +83,7 @@ async def test_toggled_on_for_higher_priority_peer_when_initially_off( @freezegun.freeze_time('2020-12-31T23:59:59.123456') async def test_ignored_for_higher_priority_peer_when_already_on( - k8s_mocked, caplog, assert_logs, settings, + k8s_mocked, caplog, assert_logs, settings, looptime, event_loop, peering_resource, peering_namespace): event = bodies.RawEvent( @@ -99,13 +102,15 @@ async def test_ignored_for_higher_priority_peer_when_already_on( settings.peering.priority = 100 conflicts_found = aiotoggles.Toggle(True) - k8s_mocked.sleep.return_value = 1 # as if interrupted by stream pressure + stream_pressure = asyncio.Event() + event_loop.call_later(1.23, stream_pressure.set) caplog.set_level(0) assert conflicts_found.is_on() await process_peering_event( raw_event=event, conflicts_found=conflicts_found, + stream_pressure=stream_pressure, autoclean=False, namespace=peering_namespace, resource=peering_resource, @@ -113,8 +118,7 @@ async def test_ignored_for_higher_priority_peer_when_already_on( settings=settings, ) assert conflicts_found.is_on() - assert k8s_mocked.sleep.call_count == 1 - assert 9 < k8s_mocked.sleep.call_args[0][0][0] < 10 + assert looptime == 1.23 assert not k8s_mocked.patch.called assert_logs([], prohibited=[ "Possibly conflicting operators", @@ -126,7 +130,7 @@ async def test_ignored_for_higher_priority_peer_when_already_on( @freezegun.freeze_time('2020-12-31T23:59:59.123456') async def test_toggled_off_for_lower_priority_peer_when_initially_on( - k8s_mocked, caplog, assert_logs, settings, + k8s_mocked, caplog, assert_logs, settings, looptime, event_loop, peering_resource, peering_namespace): event = bodies.RawEvent( @@ -145,13 +149,15 @@ async def test_toggled_off_for_lower_priority_peer_when_initially_on( settings.peering.priority = 100 conflicts_found = aiotoggles.Toggle(True) - k8s_mocked.sleep.return_value = 1 # as if interrupted by stream pressure + stream_pressure = asyncio.Event() + event_loop.call_later(1.23, stream_pressure.set) caplog.set_level(0) assert conflicts_found.is_on() await process_peering_event( raw_event=event, conflicts_found=conflicts_found, + stream_pressure=stream_pressure, autoclean=False, namespace=peering_namespace, resource=peering_resource, @@ -159,8 +165,7 @@ async def test_toggled_off_for_lower_priority_peer_when_initially_on( settings=settings, ) assert conflicts_found.is_off() - assert k8s_mocked.sleep.call_count == 1 - assert k8s_mocked.sleep.call_args[0][0] == [] + assert looptime == 0 assert not k8s_mocked.patch.called assert_logs(["Resuming operations after the pause"], prohibited=[ "Possibly conflicting operators", @@ -171,7 +176,7 @@ async def test_toggled_off_for_lower_priority_peer_when_initially_on( @freezegun.freeze_time('2020-12-31T23:59:59.123456') async def test_ignored_for_lower_priority_peer_when_already_off( - k8s_mocked, caplog, assert_logs, settings, + k8s_mocked, caplog, assert_logs, settings, looptime, event_loop, peering_resource, peering_namespace): event = bodies.RawEvent( @@ -190,13 +195,15 @@ async def test_ignored_for_lower_priority_peer_when_already_off( settings.peering.priority = 100 conflicts_found = aiotoggles.Toggle(False) - k8s_mocked.sleep.return_value = 1 # as if interrupted by stream pressure + stream_pressure = asyncio.Event() + event_loop.call_later(1.23, stream_pressure.set) caplog.set_level(0) assert conflicts_found.is_off() await process_peering_event( raw_event=event, conflicts_found=conflicts_found, + stream_pressure=stream_pressure, autoclean=False, namespace=peering_namespace, resource=peering_resource, @@ -204,8 +211,7 @@ async def test_ignored_for_lower_priority_peer_when_already_off( settings=settings, ) assert conflicts_found.is_off() - assert k8s_mocked.sleep.call_count == 1 - assert k8s_mocked.sleep.call_args[0][0] == [] + assert looptime == 0 assert not k8s_mocked.patch.called assert_logs([], prohibited=[ "Possibly conflicting operators", @@ -217,7 +223,7 @@ async def test_ignored_for_lower_priority_peer_when_already_off( @freezegun.freeze_time('2020-12-31T23:59:59.123456') async def test_toggled_on_for_same_priority_peer_when_initially_off( - k8s_mocked, caplog, assert_logs, settings, + k8s_mocked, caplog, assert_logs, settings, looptime, event_loop, peering_resource, peering_namespace): event = bodies.RawEvent( @@ -236,13 +242,15 @@ async def test_toggled_on_for_same_priority_peer_when_initially_off( settings.peering.priority = 100 conflicts_found = aiotoggles.Toggle(False) - k8s_mocked.sleep.return_value = 1 # as if interrupted by stream pressure + stream_pressure = asyncio.Event() + event_loop.call_later(1.23, stream_pressure.set) caplog.set_level(0) assert conflicts_found.is_off() await process_peering_event( raw_event=event, conflicts_found=conflicts_found, + stream_pressure=stream_pressure, autoclean=False, namespace=peering_namespace, resource=peering_resource, @@ -250,8 +258,7 @@ async def test_toggled_on_for_same_priority_peer_when_initially_off( settings=settings, ) assert conflicts_found.is_on() - assert k8s_mocked.sleep.call_count == 1 - assert 9 < k8s_mocked.sleep.call_args[0][0][0] < 10 + assert looptime == 1.23 assert not k8s_mocked.patch.called assert_logs([ "Possibly conflicting operators", @@ -264,7 +271,7 @@ async def test_toggled_on_for_same_priority_peer_when_initially_off( @freezegun.freeze_time('2020-12-31T23:59:59.123456') async def test_ignored_for_same_priority_peer_when_already_on( - k8s_mocked, caplog, assert_logs, settings, + k8s_mocked, caplog, assert_logs, settings, looptime, event_loop, peering_resource, peering_namespace): event = bodies.RawEvent( @@ -283,13 +290,15 @@ async def test_ignored_for_same_priority_peer_when_already_on( settings.peering.priority = 100 conflicts_found = aiotoggles.Toggle(True) - k8s_mocked.sleep.return_value = 1 # as if interrupted by stream pressure + stream_pressure = asyncio.Event() + event_loop.call_later(1.23, stream_pressure.set) caplog.set_level(0) assert conflicts_found.is_on() await process_peering_event( raw_event=event, conflicts_found=conflicts_found, + stream_pressure=stream_pressure, autoclean=False, namespace=peering_namespace, resource=peering_resource, @@ -297,8 +306,7 @@ async def test_ignored_for_same_priority_peer_when_already_on( settings=settings, ) assert conflicts_found.is_on() - assert k8s_mocked.sleep.call_count == 1 - assert 9 < k8s_mocked.sleep.call_args[0][0][0] < 10 + assert looptime == 1.23 assert not k8s_mocked.patch.called assert_logs([ "Possibly conflicting operators", @@ -312,7 +320,7 @@ async def test_ignored_for_same_priority_peer_when_already_on( @freezegun.freeze_time('2020-12-31T23:59:59.123456') @pytest.mark.parametrize('priority', [100, 101]) async def test_resumes_immediately_on_expiration_of_blocking_peers( - k8s_mocked, caplog, assert_logs, settings, priority, + k8s_mocked, caplog, assert_logs, settings, priority, looptime, event_loop, peering_resource, peering_namespace): event = bodies.RawEvent( @@ -331,13 +339,14 @@ async def test_resumes_immediately_on_expiration_of_blocking_peers( settings.peering.priority = 100 conflicts_found = aiotoggles.Toggle(True) - k8s_mocked.sleep.return_value = None # as if finished sleeping uninterrupted + stream_pressure = asyncio.Event() caplog.set_level(0) assert conflicts_found.is_on() await process_peering_event( raw_event=event, conflicts_found=conflicts_found, + stream_pressure=stream_pressure, autoclean=False, namespace=peering_namespace, resource=peering_resource, @@ -345,6 +354,5 @@ async def test_resumes_immediately_on_expiration_of_blocking_peers( settings=settings, ) assert conflicts_found.is_on() - assert k8s_mocked.sleep.call_count == 1 - assert 9 < k8s_mocked.sleep.call_args[0][0][0] < 10 + assert looptime == 9.876544 assert k8s_mocked.patch.called diff --git a/tests/persistence/test_states.py b/tests/persistence/test_states.py index 41d7354b..91cfe9d2 100644 --- a/tests/persistence/test_states.py +++ b/tests/persistence/test_states.py @@ -1,3 +1,5 @@ +import asyncio + import datetime import freezegun @@ -379,12 +381,17 @@ async def test_started_from_storage(storage, handler, body, expected): (TSB_ISO, {'status': {'kopf': {'progress': {'some-id': {'started': TSB_ISO}}}}}), (TSA_ISO, {'status': {'kopf': {'progress': {'some-id': {'started': TSA_ISO}}}}}), ]) -async def test_started_from_storage_is_preferred_over_from_scratch(storage, handler, body, expected): - with freezegun.freeze_time(TS0): - state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) - with freezegun.freeze_time(TS1): - state = state.with_handlers([handler]) +@freezegun.freeze_time(TS0) +async def test_started_from_storage_is_preferred_over_from_scratch(storage, handler, body, expected, looptime): patch = Patch() + state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) + + # The state uses both the loop time and the wall-clock "base time", so we move them in sync. + await asyncio.sleep(123) + with freezegun.freeze_time() as ft: + ft.tick(123) + + state = state.with_handlers([handler]) state.store(body=Body({}), patch=patch, storage=storage) assert patch['status']['kopf']['progress']['some-id']['started'] == expected @@ -506,7 +513,11 @@ async def test_awakened_flag(storage, handler, expected, body): (None, {'status': {'kopf': {'progress': {'some-id': {}}}}}), (None, {'status': {'kopf': {'progress': {'some-id': {'delayed': None}}}}}), (TS0, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO}}}}}), + (TS1, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS1_ISO}}}}}), + (TSB, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSB_ISO}}}}}), + (TSA, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO}}}}}), ]) +@freezegun.freeze_time(TS0) async def test_awakening_time(storage, handler, expected, body): state = State.from_storage(body=Body(body), handlers=[handler], storage=storage) state = state.with_handlers([handler]) diff --git a/tests/posting/test_threadsafety.py b/tests/posting/test_threadsafety.py index 65914ed9..0ebfc0aa 100644 --- a/tests/posting/test_threadsafety.py +++ b/tests/posting/test_threadsafety.py @@ -33,13 +33,13 @@ If thread safety is not ensured, the operators get sporadic errors regarding thread-unsafe calls, which are difficult to catch and reproduce. """ - import asyncio import contextvars import functools import threading import time +import looptime import pytest from kopf import event @@ -48,24 +48,6 @@ 'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1'}} -@pytest.fixture() -def awakener(event_loop): - handles = [] - - def noop(): - pass - - def awaken_fn(delay, fn=noop): - handle = event_loop.call_later(delay, fn) - handles.append(handle) - - try: - yield awaken_fn - finally: - for handle in handles: - handle.cancel() - - @pytest.fixture() def threader(): threads = [] @@ -87,44 +69,59 @@ def thread_fn(): thread.join() -async def test_nonthreadsafe_indeed_fails(timer, awakener, threader, event_queue, event_queue_loop): +@pytest.mark.looptime(False) +async def test_nonthreadsafe_indeed_fails(chronometer, threader, event_queue, event_loop): + thread_was_called = threading.Event() def thread_fn(): + thread_was_called.set() event_queue.put_nowait(object()) - awakener(0.7) - threader(0.3, thread_fn) + threader(0.5, lambda: event_loop.call_soon_threadsafe(lambda: None)) + threader(0.2, thread_fn) - with timer: + with chronometer, looptime.Chronometer(event_loop.time) as loopometer: await event_queue.get() - assert 0.6 <= timer.seconds <= 0.8 + assert 0.5 <= chronometer.seconds < 0.6 + assert 0.5 <= loopometer.seconds < 0.6 + assert thread_was_called.is_set() -async def test_threadsafe_indeed_works(timer, awakener, threader, event_queue, event_queue_loop): +@pytest.mark.looptime(False) +async def test_threadsafe_indeed_works(chronometer, threader, event_queue, event_loop): + thread_was_called = threading.Event() def thread_fn(): - asyncio.run_coroutine_threadsafe(event_queue.put(object()), loop=event_queue_loop) + thread_was_called.set() + asyncio.run_coroutine_threadsafe(event_queue.put(object()), loop=event_loop) - awakener(0.7) - threader(0.3, thread_fn) + threader(0.5, lambda: event_loop.call_soon_threadsafe(lambda: None)) + threader(0.2, thread_fn) - with timer: + with chronometer, looptime.Chronometer(event_loop.time) as loopometer: await event_queue.get() - assert 0.2 <= timer.seconds <= 0.4 + assert 0.2 <= chronometer.seconds < 0.3 + assert 0.2 <= loopometer.seconds < 0.3 + assert thread_was_called.is_set() -async def test_queueing_is_threadsafe(timer, awakener, threader, event_queue, event_queue_loop, - settings_via_contextvar): +@pytest.mark.looptime(False) +@pytest.mark.usefixtures('event_queue_loop', 'settings_via_contextvar') +async def test_queueing_is_threadsafe(chronometer, threader, event_queue, event_loop): + thread_was_called = threading.Event() def thread_fn(): + thread_was_called.set() event(OBJ1, type='type1', reason='reason1', message='message1') - awakener(0.7) - threader(0.3, thread_fn) + threader(0.5, lambda: event_loop.call_soon_threadsafe(lambda: None)) + threader(0.2, thread_fn) - with timer: + with chronometer, looptime.Chronometer(event_loop.time) as loopometer: await event_queue.get() - assert 0.2 <= timer.seconds <= 0.4 + assert 0.2 <= chronometer.seconds < 0.3 + assert 0.2 <= loopometer.seconds < 0.3 + assert thread_was_called.is_set() diff --git a/tests/primitives/test_conditions.py b/tests/primitives/test_conditions.py index 75919bd8..d69d232f 100644 --- a/tests/primitives/test_conditions.py +++ b/tests/primitives/test_conditions.py @@ -5,20 +5,21 @@ from kopf._cogs.aiokits.aiobindings import condition_chain -async def test_no_triggering(): +async def test_no_triggering(looptime): source = asyncio.Condition() target = asyncio.Condition() task = asyncio.create_task(condition_chain(source, target)) try: with pytest.raises(asyncio.TimeoutError): async with target: - await asyncio.wait_for(target.wait(), timeout=0.1) + await asyncio.wait_for(target.wait(), timeout=1.23) + assert looptime == 1.23 finally: task.cancel() await asyncio.wait([task]) -async def test_triggering(event_loop, timer): +async def test_triggering(event_loop, looptime): source = asyncio.Condition() target = asyncio.Condition() task = asyncio.create_task(condition_chain(source, target)) @@ -28,13 +29,12 @@ async def delayed_trigger(): async with source: source.notify_all() - event_loop.call_later(0.1, asyncio.create_task, delayed_trigger()) + event_loop.call_later(1.23, asyncio.create_task, delayed_trigger()) - with timer: - async with target: - await target.wait() + async with target: + await target.wait() - assert 0.1 <= timer.seconds <= 0.2 + assert looptime == 1.23 finally: task.cancel() diff --git a/tests/primitives/test_containers.py b/tests/primitives/test_containers.py index d289d129..6a9691a7 100644 --- a/tests/primitives/test_containers.py +++ b/tests/primitives/test_containers.py @@ -5,79 +5,74 @@ from kopf._cogs.aiokits.aiovalues import Container -async def test_empty_by_default(): +async def test_empty_by_default(looptime): container = Container() with pytest.raises(asyncio.TimeoutError): - await asyncio.wait_for(container.wait(), timeout=0.1) + await asyncio.wait_for(container.wait(), timeout=9) + assert looptime == 9 -async def test_does_not_wake_up_when_reset(event_loop, timer): +async def test_does_not_wake_up_when_reset(event_loop, looptime): container = Container() async def reset_it(): await container.reset() - event_loop.call_later(0.05, asyncio.create_task, reset_it()) - + event_loop.call_later(1, asyncio.create_task, reset_it()) with pytest.raises(asyncio.TimeoutError): - await asyncio.wait_for(container.wait(), timeout=0.1) + await asyncio.wait_for(container.wait(), timeout=9) + assert looptime == 9 -async def test_wakes_up_when_preset(event_loop, timer): +async def test_wakes_up_when_preset(looptime): container = Container() await container.set(123) - with timer: - result = await container.wait() - - assert timer.seconds <= 0.1 + result = await container.wait() + assert looptime == 0 assert result == 123 -async def test_wakes_up_when_set(event_loop, timer): +async def test_wakes_up_when_set(event_loop, looptime): container = Container() async def set_it(): await container.set(123) - event_loop.call_later(0.1, asyncio.create_task, set_it()) - - with timer: - result = await container.wait() + event_loop.call_later(9, asyncio.create_task, set_it()) - assert 0.1 <= timer.seconds <= 0.2 + result = await container.wait() + assert looptime == 9 assert result == 123 -async def test_iterates_when_set(event_loop, timer): +async def test_iterates_when_set(event_loop, looptime): container = Container() async def set_it(v): await container.set(v) - event_loop.call_later(0.1, asyncio.create_task, set_it(123)) - event_loop.call_later(0.2, asyncio.create_task, set_it(234)) + event_loop.call_later(6, asyncio.create_task, set_it(123)) + event_loop.call_later(9, asyncio.create_task, set_it(234)) values = [] - with timer: - async for value in container.as_changed(): - values.append(value) - if value == 234: - break + async for value in container.as_changed(): + values.append(value) + if value == 234: + break - assert 0.2 <= timer.seconds <= 0.3 + assert looptime == 9 assert values == [123, 234] -async def test_iterates_when_preset(event_loop, timer): +async def test_iterates_when_preset(looptime): container = Container() await container.set(123) values = [] - with timer: - async for value in container.as_changed(): - values.append(value) - break + async for value in container.as_changed(): + values.append(value) + break - assert timer.seconds <= 0.1 + assert looptime == 0 assert values == [123] diff --git a/tests/primitives/test_flags.py b/tests/primitives/test_flags.py index f9db49ff..008c5d14 100644 --- a/tests/primitives/test_flags.py +++ b/tests/primitives/test_flags.py @@ -133,26 +133,26 @@ async def test_waiting_of_none_does_nothing(): await wait_flag(None) -async def test_waiting_for_unraised_times_out(flag, timer): - with pytest.raises(asyncio.TimeoutError), timer: - await asyncio.wait_for(wait_flag(flag), timeout=0.1) - assert timer.seconds >= 0.099 # uvloop finishes it earlier than 0.1 +async def test_waiting_for_unraised_times_out(flag, looptime): + # Beware: sync primitives consume the real time. + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(wait_flag(flag), timeout=0.123) + assert looptime == 0.123 -async def test_waiting_for_preraised_is_instant(flag, timer): +async def test_waiting_for_preraised_is_instant(flag, looptime): await raise_flag(flag) # tested separately above - with timer: - await wait_flag(flag) - assert timer.seconds < 0.5 # near-instant, plus code overhead + await wait_flag(flag) + assert looptime == 0 -async def test_waiting_for_raised_during_the_wait(flag, timer): +async def test_waiting_for_raised_during_the_wait(flag, looptime): async def raise_delayed(delay: float) -> None: await asyncio.sleep(delay) await raise_flag(flag) # tested separately above - asyncio.create_task(raise_delayed(0.2)) - with timer: - await wait_flag(flag) - assert 0.2 <= timer.seconds < 0.5 # near-instant once raised + # Beware: sync primitives consume the real time. + asyncio.create_task(raise_delayed(0.123)) + await wait_flag(flag) + assert looptime == 0.123 diff --git a/tests/primitives/test_toggles.py b/tests/primitives/test_toggles.py index d670bbd9..ca852904 100644 --- a/tests/primitives/test_toggles.py +++ b/tests/primitives/test_toggles.py @@ -37,50 +37,48 @@ async def test_turning_off(): assert toggle.is_off() -async def test_waiting_until_on_fails_when_not_turned_on(): +async def test_waiting_until_on_fails_when_not_turned_on(looptime): toggle = Toggle(False) with pytest.raises(asyncio.TimeoutError): - await asyncio.wait_for(toggle.wait_for(True), timeout=0.1) - + await asyncio.wait_for(toggle.wait_for(True), timeout=1.23) assert toggle.is_off() + assert looptime == 1.23 -async def test_waiting_until_off_fails_when_not_turned_off(): +async def test_waiting_until_off_fails_when_not_turned_off(looptime): toggle = Toggle(True) with pytest.raises(asyncio.TimeoutError): - await asyncio.wait_for(toggle.wait_for(False), timeout=0.1) - + await asyncio.wait_for(toggle.wait_for(False), timeout=1.23) assert toggle.is_on() + assert looptime == 1.23 -async def test_waiting_until_on_wakes_when_turned_on(timer): +async def test_waiting_until_on_wakes_when_turned_on(looptime): toggle = Toggle(False) async def delayed_turning_on(delay: float): await asyncio.sleep(delay) await toggle.turn_to(True) - with timer: - asyncio.create_task(delayed_turning_on(0.05)) - await toggle.wait_for(True) + asyncio.create_task(delayed_turning_on(9)) + await toggle.wait_for(True) assert toggle.is_on() - assert timer.seconds < 0.5 # approx. 0.05 plus some code overhead + assert looptime == 9 -async def test_waiting_until_off_wakes_when_turned_off(timer): +async def test_waiting_until_off_wakes_when_turned_off(looptime): toggle = Toggle(True) async def delayed_turning_off(delay: float): await asyncio.sleep(delay) await toggle.turn_to(False) - with timer: - asyncio.create_task(delayed_turning_off(0.05)) - await toggle.wait_for(False) + asyncio.create_task(delayed_turning_off(9)) + await toggle.wait_for(False) assert toggle.is_off() - assert timer.seconds < 0.5 # approx. 0.05 plus some code overhead + assert looptime == 9 async def test_secures_against_usage_as_a_boolean(): diff --git a/tests/primitives/test_togglesets.py b/tests/primitives/test_togglesets.py index 8cb71324..6b8ea6b3 100644 --- a/tests/primitives/test_togglesets.py +++ b/tests/primitives/test_togglesets.py @@ -195,25 +195,27 @@ async def test_all_toggles_must_be_off_for_anytoggleset_to_be_off(fn): @pytest.mark.parametrize('fn', [all, any]) -async def test_waiting_until_on_fails_when_not_turned_on(fn): +async def test_waiting_until_on_fails_when_not_turned_on(fn, looptime): toggleset = ToggleSet(fn) await toggleset.make_toggle(False) with pytest.raises(asyncio.TimeoutError): - await asyncio.wait_for(toggleset.wait_for(True), timeout=0.1) + await asyncio.wait_for(toggleset.wait_for(True), timeout=1.23) assert toggleset.is_off() + assert looptime == 1.23 @pytest.mark.parametrize('fn', [all, any]) -async def test_waiting_until_off_fails_when_not_turned_off(fn): +async def test_waiting_until_off_fails_when_not_turned_off(fn, looptime): toggleset = ToggleSet(fn) await toggleset.make_toggle(True) with pytest.raises(asyncio.TimeoutError): - await asyncio.wait_for(toggleset.wait_for(False), timeout=0.1) + await asyncio.wait_for(toggleset.wait_for(False), timeout=1.23) assert toggleset.is_on() + assert looptime == 1.23 @pytest.mark.parametrize('fn', [all, any]) -async def test_waiting_until_on_wakes_when_turned_on(fn, timer): +async def test_waiting_until_on_wakes_when_turned_on(fn, looptime): toggleset = ToggleSet(fn) toggle = await toggleset.make_toggle(False) @@ -221,16 +223,15 @@ async def delayed_turning_on(delay: float): await asyncio.sleep(delay) await toggle.turn_to(True) - with timer: - asyncio.create_task(delayed_turning_on(0.05)) - await asyncio.wait_for(toggleset.wait_for(True), timeout=1.0) + asyncio.create_task(delayed_turning_on(9)) + await toggleset.wait_for(True) assert toggleset.is_on() - assert timer.seconds < 0.5 # approx. 0.05 plus some code overhead + assert looptime == 9 @pytest.mark.parametrize('fn', [all, any]) -async def test_waiting_until_off_wakes_when_turned_off(fn, timer): +async def test_waiting_until_off_wakes_when_turned_off(fn, looptime): toggleset = ToggleSet(fn) toggle = await toggleset.make_toggle(True) @@ -238,12 +239,11 @@ async def delayed_turning_off(delay: float): await asyncio.sleep(delay) await toggle.turn_to(False) - with timer: - asyncio.create_task(delayed_turning_off(0.05)) - await asyncio.wait_for(toggleset.wait_for(False), timeout=1.0) + asyncio.create_task(delayed_turning_off(9)) + await toggleset.wait_for(False) assert toggleset.is_off() - assert timer.seconds < 0.5 # approx. 0.05 plus some code overhead + assert looptime == 9 @pytest.mark.parametrize('fn', [all, any]) diff --git a/tests/reactor/test_queueing.py b/tests/reactor/test_queueing.py index 22e3ea55..608865f7 100644 --- a/tests/reactor/test_queueing.py +++ b/tests/reactor/test_queueing.py @@ -46,7 +46,7 @@ ]) @pytest.mark.usefixtures('watcher_limited') -async def test_watchevent_demultiplexing(worker_mock, timer, resource, processor, +async def test_watchevent_demultiplexing(worker_mock, looptime, resource, processor, settings, stream, events, uids, cnts): """ Verify that every unique uid goes into its own queue+worker, which are never shared. """ @@ -60,17 +60,16 @@ async def test_watchevent_demultiplexing(worker_mock, timer, resource, processor stream.close(namespace=None) # Run the watcher (near-instantly and test-blocking). - with timer: - await watcher( - namespace=None, - resource=resource, - settings=settings, - processor=processor, - ) + await watcher( + namespace=None, + resource=resource, + settings=settings, + processor=processor, + ) # Extra-check: verify that the real workers were not involved: # they would do batching, which is absent in the mocked workers. - assert timer.seconds < settings.batching.batch_window + assert looptime == 0 # The processor must not be called by the watcher, only by the worker. # But the worker (even if mocked) must be called & awaited by the watcher. @@ -122,32 +121,30 @@ async def test_watchevent_demultiplexing(worker_mock, timer, resource, processor ]) @pytest.mark.usefixtures('watcher_limited') -async def test_watchevent_batching(settings, resource, processor, timer, - stream, events, uids, vals, event_loop): +async def test_watchevent_batching(settings, resource, processor, + stream, events, uids, vals, looptime): """ Verify that only the last event per uid is actually handled. """ # Override the default timeouts to make the tests faster. - settings.batching.idle_timeout = 100 # should not be involved, fail if it is - settings.batching.exit_timeout = 100 # should exit instantly, fail if it didn't - settings.batching.batch_window = 0.3 # the time period being tested (make bigger than overhead) + settings.batching.idle_timeout = 999 # should not be involved, fail if it is + settings.batching.exit_timeout = 999 # should exit instantly, fail if it didn't + settings.batching.batch_window = 123 # the time period being tested # Inject the events of unique objects - to produce a few streams/workers. stream.feed(events, namespace=None) stream.close(namespace=None) # Run the watcher (near-instantly and test-blocking). - with timer: - await watcher( - namespace=None, - resource=resource, - settings=settings, - processor=processor, - ) + await watcher( + namespace=None, + resource=resource, + settings=settings, + processor=processor, + ) # Should be batched strictly once (never twice). Note: multiple uids run concurrently, # so they all are batched in parallel, and the timing remains the same. - assert timer.seconds > settings.batching.batch_window * 1 - assert timer.seconds < settings.batching.batch_window * 2 + assert looptime == 123 # Was the processor called at all? Awaited as needed for async fns? assert processor.await_count > 0 @@ -184,10 +181,10 @@ async def test_garbage_collection_of_streams( ): # Override the default timeouts to make the tests faster. - settings.batching.exit_timeout = 100 # should exit instantly, fail if it didn't - settings.batching.idle_timeout = .05 # finish workers faster, but not as fast as batching - settings.batching.batch_window = .01 # minimize the effects of batching (not our interest) - settings.watching.reconnect_backoff = 1.0 # to prevent src depletion + settings.batching.exit_timeout = 999 # should exit instantly, fail if it didn't + settings.batching.idle_timeout = 5 # finish workers faster, but not as fast as batching + settings.batching.batch_window = 1 # minimize the effects of batching (not our interest) + settings.watching.reconnect_backoff = 100 # to prevent src depletion # Inject the events of unique objects - to produce a few streams/workers. stream.feed(events, namespace=None) @@ -196,7 +193,7 @@ async def test_garbage_collection_of_streams( # Give it a moment to populate the streams and spawn all the workers. # Intercept and remember _any_ seen dict of streams for further checks. while worker_spy.call_count < unique: - await asyncio.sleep(0.001) # give control to the loop + await asyncio.sleep(0) # give control to the loop streams = worker_spy.call_args_list[-1][1]['streams'] signaller: asyncio.Condition = worker_spy.call_args_list[0][1]['signaller'] @@ -223,7 +220,7 @@ async def test_garbage_collection_of_streams( # Let the workers to actually exit and gc their local scopes with variables. # The jobs can take a tiny moment more, but this is noticeable in the tests. - await asyncio.sleep(0.1) + await asyncio.sleep(0) # For PyPy: force the gc! (GC can be delayed in PyPy, unlike in CPython.) # https://doc.pypy.org/en/latest/cpython_differences.html#differences-related-to-garbage-collection-strategies diff --git a/tests/references/test_backbone.py b/tests/references/test_backbone.py index 767c352d..2060a3b9 100644 --- a/tests/references/test_backbone.py +++ b/tests/references/test_backbone.py @@ -47,23 +47,23 @@ async def test_refill_is_cumulative_ie_does_not_reset(): assert set(backbone) == {NAMESPACES, EVENTS} -async def test_waiting_for_absent_resources_never_ends(timer): +async def test_waiting_for_absent_resources_never_ends(looptime): backbone = Backbone() with pytest.raises(asyncio.TimeoutError): - await asyncio.wait_for(backbone.wait_for(NAMESPACES), timeout=0.1) + await asyncio.wait_for(backbone.wait_for(NAMESPACES), timeout=1.23) + assert looptime == 1.23 -async def test_waiting_for_preexisting_resources_ends_instantly(timer): +async def test_waiting_for_preexisting_resources_ends_instantly(looptime): resource = Resource('', 'v1', 'namespaces') backbone = Backbone() await backbone.fill(resources=[resource]) - with timer: - found_resource = await backbone.wait_for(NAMESPACES) - assert timer.seconds < 0.1 + found_resource = await backbone.wait_for(NAMESPACES) + assert looptime == 0 assert found_resource == resource -async def test_waiting_for_delayed_resources_ends_once_delivered(timer): +async def test_waiting_for_delayed_resources_ends_once_delivered(looptime): resource = Resource('', 'v1', 'namespaces') backbone = Backbone() @@ -71,9 +71,8 @@ async def delayed_injection(delay: float): await asyncio.sleep(delay) await backbone.fill(resources=[resource]) - task = asyncio.create_task(delayed_injection(0.1)) - with timer: - found_resource = await backbone.wait_for(NAMESPACES) + task = asyncio.create_task(delayed_injection(123)) + found_resource = await backbone.wait_for(NAMESPACES) await task - assert 0.1 < timer.seconds < 0.11 + assert looptime == 123 assert found_resource == resource diff --git a/tests/test_async.py b/tests/test_async.py index 14792cd7..90496223 100644 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -6,24 +6,18 @@ _async_was_executed = False -async def test_async_tests_are_enabled(timer): +async def test_async_tests_are_enabled(): global _async_was_executed _async_was_executed = True # asserted in a sync-test below. - with timer as t: - await asyncio.sleep(0.5) - assert t.seconds > 0.5 # real sleep - - -async def test_async_mocks_are_enabled(timer, mocker): +async def test_async_mocks_are_enabled(mocker, looptime): p = mocker.patch('asyncio.sleep') - with timer as t: - await asyncio.sleep(1.0) + await asyncio.sleep(1.0) assert p.call_count > 0 assert p.await_count > 0 - assert t.seconds < 0.01 # mocked sleep + assert looptime == 0 def test_async_test_was_executed_and_awaited(): diff --git a/tests/timing/test_sleeping.py b/tests/timing/test_sleeping.py index dfc8d77e..4bfc8fcb 100644 --- a/tests/timing/test_sleeping.py +++ b/tests/timing/test_sleeping.py @@ -5,24 +5,21 @@ from kopf._cogs.aiokits.aiotime import sleep -async def test_the_only_delay_is_awaited(timer): - with timer: - unslept = await sleep(0.10) - assert 0.10 <= timer.seconds < 0.11 +async def test_the_only_delay_is_awaited(looptime): + unslept = await sleep(123) + assert looptime == 123 assert unslept is None -async def test_the_shortest_delay_is_awaited(timer): - with timer: - unslept = await sleep([0.10, 0.20]) - assert 0.10 <= timer.seconds < 0.11 +async def test_the_shortest_delay_is_awaited(looptime): + unslept = await sleep([123, 456]) + assert looptime == 123 assert unslept is None -async def test_specific_delays_only_are_awaited(timer): - with timer: - unslept = await sleep([0.10, None]) - assert 0.10 <= timer.seconds < 0.11 +async def test_specific_delays_only_are_awaited(looptime): + unslept = await sleep([123, None]) + assert looptime == 123 assert unslept is None @@ -31,10 +28,9 @@ async def test_specific_delays_only_are_awaited(timer): pytest.param([-100, -10], id='all-negative'), pytest.param(-10, id='alone'), ]) -async def test_negative_delays_skip_sleeping(timer, delays): - with timer: - unslept = await sleep(delays) - assert timer.seconds < 0.01 +async def test_negative_delays_skip_sleeping(looptime, delays): + unslept = await sleep(delays) + assert looptime == 0 assert unslept is None @@ -42,36 +38,32 @@ async def test_negative_delays_skip_sleeping(timer, delays): pytest.param([], id='empty-list'), pytest.param([None], id='list-of-none'), ]) -async def test_no_delays_skip_sleeping(timer, delays): - with timer: - unslept = await sleep(delays) - assert timer.seconds < 0.01 +async def test_no_delays_skip_sleeping(looptime, delays): + unslept = await sleep(delays) + assert looptime == 0 assert unslept is None -async def test_by_event_set_before_time_comes(timer): +async def test_by_event_set_before_time_comes(looptime): event = asyncio.Event() - asyncio.get_running_loop().call_later(0.07, event.set) - with timer: - unslept = await sleep(0.10, event) + asyncio.get_running_loop().call_later(7, event.set) + unslept = await sleep(10, event) assert unslept is not None - assert 0.02 <= unslept <= 0.04 - assert 0.06 <= timer.seconds <= 0.08 + assert unslept == 3 + assert looptime == 7 -async def test_with_zero_time_and_event_initially_cleared(timer): +async def test_with_zero_time_and_event_initially_cleared(looptime): event = asyncio.Event() event.clear() - with timer: - unslept = await sleep(0, event) - assert timer.seconds <= 0.01 + unslept = await sleep(0, event) + assert looptime == 0 assert unslept is None -async def test_with_zero_time_and_event_initially_set(timer): +async def test_with_zero_time_and_event_initially_set(looptime): event = asyncio.Event() event.set() - with timer: - unslept = await sleep(0, event) - assert timer.seconds <= 0.01 + unslept = await sleep(0, event) + assert looptime == 0 assert not unslept # 0/None; undefined for such case: both goals reached. diff --git a/tests/timing/test_throttling.py b/tests/timing/test_throttling.py index 4a27faed..7f03db8b 100644 --- a/tests/timing/test_throttling.py +++ b/tests/timing/test_throttling.py @@ -6,10 +6,7 @@ from kopf._core.actions.throttlers import Throttler, throttled - -@pytest.fixture(autouse=True) -async def clock(mocker): - return mocker.patch.object(asyncio.get_running_loop(), 'time', return_value=0) +pytestmark = pytest.mark.looptime(start=1000) @pytest.fixture(autouse=True) @@ -164,12 +161,11 @@ async def test_renews_on_repeated_failure(sleep): assert sleep.mock_calls == [call(234, wakeup=None)] -async def test_interruption(clock, sleep): +async def test_interruption(sleep): wakeup = asyncio.Event() logger = logging.getLogger() throttler = Throttler() - clock.return_value = 1000 # simulated "now" sleep.return_value = 55 # simulated sleep time left async with throttled(throttler=throttler, logger=logger, delays=[123, 234], wakeup=wakeup): raise Exception() @@ -180,18 +176,17 @@ async def test_interruption(clock, sleep): assert sleep.mock_calls == [call(123, wakeup=wakeup)] -async def test_continuation_with_success(clock, sleep): +async def test_continuation_with_success(sleep): wakeup = asyncio.Event() logger = logging.getLogger() throttler = Throttler() - clock.return_value = 1000 # simulated "now" sleep.return_value = 55 # simulated sleep time left async with throttled(throttler=throttler, logger=logger, delays=[123, 234], wakeup=wakeup): raise Exception() + await asyncio.sleep(77) sleep.reset_mock() - clock.return_value = 1077 # simulated "now" sleep.return_value = None # simulated sleep time left async with throttled(throttler=throttler, logger=logger, delays=[...], wakeup=wakeup): pass @@ -202,18 +197,17 @@ async def test_continuation_with_success(clock, sleep): assert sleep.mock_calls == [call(123 - 77, wakeup=wakeup)] -async def test_continuation_with_error(clock, sleep): +async def test_continuation_with_error(sleep): wakeup = asyncio.Event() logger = logging.getLogger() throttler = Throttler() - clock.return_value = 1000 # simulated "now" sleep.return_value = 55 # simulated sleep time left async with throttled(throttler=throttler, logger=logger, delays=[123, 234], wakeup=wakeup): raise Exception() + await asyncio.sleep(77) sleep.reset_mock() - clock.return_value = 1077 # simulated "now" sleep.return_value = None # simulated sleep time left async with throttled(throttler=throttler, logger=logger, delays=[...], wakeup=wakeup): raise Exception() @@ -224,18 +218,17 @@ async def test_continuation_with_error(clock, sleep): assert sleep.mock_calls == [call(123 - 77, wakeup=wakeup), call(234, wakeup=wakeup)] -async def test_continuation_when_overdue(clock, sleep): +async def test_continuation_when_overdue(sleep): wakeup = asyncio.Event() logger = logging.getLogger() throttler = Throttler() - clock.return_value = 1000 # simulated "now" sleep.return_value = 55 # simulated sleep time left async with throttled(throttler=throttler, logger=logger, delays=[123, 234], wakeup=wakeup): raise Exception() + await asyncio.sleep(1000) sleep.reset_mock() - clock.return_value = 2000 # simulated "now" sleep.return_value = None # simulated sleep time left async with throttled(throttler=throttler, logger=logger, delays=[...], wakeup=wakeup): raise Exception() diff --git a/tests/utilities/aiotasks/test_scheduler.py b/tests/utilities/aiotasks/test_scheduler.py index 4a5a933c..d69dbf06 100644 --- a/tests/utilities/aiotasks/test_scheduler.py +++ b/tests/utilities/aiotasks/test_scheduler.py @@ -5,8 +5,6 @@ from kopf._cogs.aiokits.aiotasks import Scheduler -CODE_OVERHEAD = 0.01 - async def f(mock, *args): try: @@ -14,7 +12,7 @@ async def f(mock, *args): for arg in args: if isinstance(arg, asyncio.Event): arg.set() - elif isinstance(arg, float): + elif isinstance(arg, (int, float)): await asyncio.sleep(arg) elif callable(arg): arg() @@ -24,70 +22,62 @@ async def f(mock, *args): mock('finished') -async def test_empty_scheduler_lifecycle(timer): - with timer: - scheduler = Scheduler() - assert scheduler.empty() - await scheduler.wait() - assert scheduler.empty() - await scheduler.close() - assert scheduler.empty() - assert timer.seconds < CODE_OVERHEAD +async def test_empty_scheduler_lifecycle(looptime): + scheduler = Scheduler() + assert scheduler.empty() + await scheduler.wait() + assert scheduler.empty() + await scheduler.close() + assert scheduler.empty() + assert looptime == 0 -async def test_task_spawning_and_graceful_finishing(timer): +async def test_task_spawning_and_graceful_finishing(looptime): mock = Mock() flag1 = asyncio.Event() flag2 = asyncio.Event() scheduler = Scheduler() - result = await scheduler.spawn(f(mock, flag1, 0.1, flag2)) + result = await scheduler.spawn(f(mock, flag1, 123, flag2)) assert result is None - with timer: - await flag1.wait() - assert timer.seconds < CODE_OVERHEAD - assert mock.call_args[0][0] == 'started' + await flag1.wait() + assert looptime == 0 + assert mock.call_args_list[0][0][0] == 'started' - with timer: - await flag2.wait() - assert timer.seconds > 0.1 - assert timer.seconds < 0.1 + CODE_OVERHEAD - assert mock.call_args[0][0] == 'finished' + await flag2.wait() + assert looptime == 123 + assert mock.call_args_list[1][0][0] == 'finished' await scheduler.close() -async def test_task_spawning_and_cancellation(timer): +async def test_task_spawning_and_cancellation(looptime): mock = Mock() flag1 = asyncio.Event() flag2 = asyncio.Event() scheduler = Scheduler() - result = await scheduler.spawn(f(mock, flag1, 1.0, flag2)) + result = await scheduler.spawn(f(mock, flag1, 123, flag2)) assert result is None - with timer: - await flag1.wait() - assert timer.seconds < CODE_OVERHEAD - assert mock.call_args[0][0] == 'started' + await flag1.wait() + assert looptime == 0 + assert mock.call_args_list[0][0][0] == 'started' - with timer: - await scheduler.close() - assert timer.seconds < CODE_OVERHEAD # near-instant - assert mock.call_args[0][0] == 'cancelled' + await scheduler.close() + assert looptime == 0 + assert mock.call_args_list[1][0][0] == 'cancelled' async def test_no_tasks_are_accepted_after_closing(): scheduler = Scheduler() await scheduler.close() - assert scheduler._closed assert scheduler._spawning_task.done() assert scheduler._cleaning_task.done() - with pytest.raises(RuntimeError, match=r"Cannot add new coroutines"): - await scheduler.spawn(f(Mock(), 1.0)) + await scheduler.spawn(f(Mock(), 123)) async def test_successes_are_not_reported(): @@ -121,7 +111,7 @@ async def test_exceptions_are_reported(): assert exception_handler.call_args[0][0] is exception -async def test_tasks_are_parallel_if_limit_is_not_reached(timer): +async def test_tasks_are_parallel_if_limit_is_not_reached(looptime): """ time: ////////----------------------0.1s------------------0.2s--/// task1: ->spawn->start->sleep->finish->| @@ -133,23 +123,19 @@ async def test_tasks_are_parallel_if_limit_is_not_reached(timer): task2_finished = asyncio.Event() scheduler = Scheduler(limit=2) - with timer: - await scheduler.spawn(f(Mock(), task1_started, 0.1, task1_finished)) - await scheduler.spawn(f(Mock(), task2_started, 0.1, task2_finished)) - assert timer.seconds < CODE_OVERHEAD # i.e. spawning is not not blocking + await scheduler.spawn(f(Mock(), task1_started, 9, task1_finished)) + await scheduler.spawn(f(Mock(), task2_started, 9, task2_finished)) + assert looptime == 0 # i.e. spawning is not not blocking - with timer: - await task1_finished.wait() - assert task2_started.is_set() - await task2_finished.wait() - - assert timer.seconds > 0.1 - assert timer.seconds < 0.1 + CODE_OVERHEAD + await task1_finished.wait() + assert task2_started.is_set() + await task2_finished.wait() + assert looptime == 9 await scheduler.close() -async def test_tasks_are_pending_if_limit_is_reached(timer): +async def test_tasks_are_pending_if_limit_is_reached(looptime): """ time: ////////----------------------0.1s------------------0.2s--/// task1: ->spawn->start->sleep->finish->| @@ -161,17 +147,13 @@ async def test_tasks_are_pending_if_limit_is_reached(timer): task2_finished = asyncio.Event() scheduler = Scheduler(limit=1) - with timer: - await scheduler.spawn(f(Mock(), task1_started, 0.1, task1_finished)) - await scheduler.spawn(f(Mock(), task2_started, 0.1, task2_finished)) - assert timer.seconds < CODE_OVERHEAD # i.e. spawning is not not blocking - - with timer: - await task1_finished.wait() - assert not task2_started.is_set() - await task2_finished.wait() + await scheduler.spawn(f(Mock(), task1_started, 9, task1_finished)) + await scheduler.spawn(f(Mock(), task2_started, 9, task2_finished)) + assert looptime == 0 # i.e. spawning is not not blocking - assert timer.seconds > 0.2 - assert timer.seconds < 0.2 + CODE_OVERHEAD * 2 + await task1_finished.wait() + assert not task2_started.is_set() + await task2_finished.wait() + assert looptime == 18 await scheduler.close() diff --git a/tests/utilities/aiotasks/test_task_selection.py b/tests/utilities/aiotasks/test_task_selection.py index a3bf17b8..3754ec56 100644 --- a/tests/utilities/aiotasks/test_task_selection.py +++ b/tests/utilities/aiotasks/test_task_selection.py @@ -7,7 +7,7 @@ async def test_alltasks_exclusion(): flag = asyncio.Event() task1 = create_task(flag.wait()) task2 = create_task(flag.wait()) - done, pending = await asyncio.wait([task1, task2], timeout=0.01) + done, pending = await asyncio.wait([task1, task2], timeout=0.01) # let them start assert not done tasks = await all_tasks(ignored=[task2]) diff --git a/tests/utilities/aiotasks/test_task_stopping.py b/tests/utilities/aiotasks/test_task_stopping.py index 3717bd46..5ee96bd8 100644 --- a/tests/utilities/aiotasks/test_task_stopping.py +++ b/tests/utilities/aiotasks/test_task_stopping.py @@ -17,25 +17,27 @@ async def stuck() -> None: await asyncio.Event().wait() -async def test_stop_with_no_tasks(assert_logs, caplog): +async def test_stop_with_no_tasks(assert_logs, caplog, looptime): logger = logging.getLogger() caplog.set_level(0) done, pending = await stop([], title='sample', logger=logger) assert not done assert not pending assert_logs(["Sample tasks stopping is skipped: no tasks given."]) + assert looptime == 0 -async def test_stop_with_no_tasks_when_quiet(assert_logs, caplog): +async def test_stop_with_no_tasks_when_quiet(assert_logs, caplog, looptime): logger = logging.getLogger() caplog.set_level(0) done, pending = await stop([], title='sample', logger=logger, quiet=True) assert not done assert not pending assert not caplog.messages + assert looptime == 0 -async def test_stop_immediately_with_finishing(assert_logs, caplog): +async def test_stop_immediately_with_finishing(assert_logs, caplog, looptime): logger = logging.getLogger() caplog.set_level(0) task1 = create_task(simple()) @@ -46,9 +48,10 @@ async def test_stop_immediately_with_finishing(assert_logs, caplog): assert_logs(["Sample tasks are stopped: finishing normally"]) assert task1.cancelled() assert task2.cancelled() + assert looptime == 0 -async def test_stop_immediately_with_cancelling(assert_logs, caplog): +async def test_stop_immediately_with_cancelling(assert_logs, caplog, looptime): logger = logging.getLogger() caplog.set_level(0) task1 = create_task(simple()) @@ -59,27 +62,31 @@ async def test_stop_immediately_with_cancelling(assert_logs, caplog): assert_logs(["Sample tasks are stopped: cancelling normally"]) assert task1.cancelled() assert task2.cancelled() + assert looptime == 0 @pytest.mark.parametrize('cancelled', [False, True]) -async def test_stop_iteratively(assert_logs, caplog, cancelled): +async def test_stop_iteratively(assert_logs, caplog, cancelled, looptime): logger = logging.getLogger() caplog.set_level(0) task1 = create_task(simple()) task2 = create_task(stuck()) - stask = create_task(stop([task1, task2], title='sample', logger=logger, interval=0.01, cancelled=cancelled)) + stask = create_task(stop([task1, task2], title='sample', logger=logger, interval=1, cancelled=cancelled)) + assert looptime == 0 - done, pending = await asyncio.wait({stask}, timeout=0.011) + done, pending = await asyncio.wait({stask}, timeout=10) assert not done assert task1.done() assert not task2.done() + assert looptime == 10 task2.cancel() - done, pending = await asyncio.wait({stask}, timeout=0.011) + done, pending = await asyncio.wait({stask}, timeout=10) assert done assert task1.done() assert task2.done() + assert looptime == 10 # not 20! assert_logs([ r"Sample tasks are not stopped: (finishing|cancelling) normally; tasks left: \{ Date: Sun, 30 Jul 2023 21:04:12 +0200 Subject: [PATCH 11/11] =?UTF-8?q?WIP:=20mixed=20dummy=20+=20looptime=20tes?= =?UTF-8?q?ts=20for=20daemons=20=E2=80=94=20split=20and=20extract?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sergey Vasilyev --- tests/handling/daemons/conftest.py | 64 +++----- tests/handling/daemons/test_daemon_errors.py | 116 ++++++-------- .../daemons/test_daemon_filtration.py | 11 +- .../daemons/test_daemon_rematching.py | 11 +- .../handling/daemons/test_daemon_spawning.py | 32 ++-- .../daemons/test_daemon_termination.py | 67 ++++---- tests/handling/daemons/test_timer_errors.py | 143 ++++++++---------- .../handling/daemons/test_timer_filtration.py | 11 +- .../handling/daemons/test_timer_intervals.py | 47 +++--- .../handling/daemons/test_timer_triggering.py | 46 +++--- 10 files changed, 234 insertions(+), 314 deletions(-) diff --git a/tests/handling/daemons/conftest.py b/tests/handling/daemons/conftest.py index d64e3c9a..2f3d39aa 100644 --- a/tests/handling/daemons/conftest.py +++ b/tests/handling/daemons/conftest.py @@ -1,10 +1,10 @@ import asyncio +import collections import contextlib -import time +from typing import Optional -import freezegun import pytest -from mock import MagicMock, patch +from mock import MagicMock import kopf from kopf._cogs.aiokits.aiotoggles import ToggleSet @@ -20,23 +20,32 @@ class DaemonDummy: def __init__(self): super().__init__() self.mock = MagicMock() - self.kwargs = {} + self._flag_statuses = collections.defaultdict(lambda: False) self.steps = { 'called': asyncio.Event(), 'finish': asyncio.Event(), 'error': asyncio.Event(), } + self.called = asyncio.Condition() + self.failed = asyncio.Event() + self.finished = asyncio.Event() - async def wait_for_daemon_done(self): - stopped = self.kwargs['stopped'] + async def wait_for_daemon_done(self) -> None: + stopped = self.mock.call_args[1]['stopped'] await stopped.wait() - while not stopped.reason & stopped.reason.DONE: + while stopped.reason is None or not stopped.reason & stopped.reason.DONE: await asyncio.sleep(0) # give control back to asyncio event loop @pytest.fixture() -def dummy(): - return DaemonDummy() +async def dummy(simulate_cycle): + dummy = DaemonDummy() + yield dummy + + # Cancel the background tasks, if any. + event_object = {'metadata': {'deletionTimestamp': '...'}} + await simulate_cycle(event_object) + await dummy.wait_for_daemon_done() @pytest.fixture() @@ -52,7 +61,11 @@ def _merge_dicts(src, dst): else: dst[key] = val - async def _simulate_cycle(event_object: RawBody): + async def _simulate_cycle( + event_object: RawBody, + *, + stream_pressure: Optional[asyncio.Event] = None, + ) -> None: mocker.resetall() await process_resource_event( @@ -65,6 +78,7 @@ async def _simulate_cycle(event_object: RawBody): indexers=OperatorIndexers(), raw_event={'type': 'irrelevant', 'object': event_object}, event_queue=asyncio.Queue(), + stream_pressure=stream_pressure, ) # Do the same as k8s does: merge the patches into the object. @@ -96,33 +110,3 @@ async def background_daemon_killer(settings, memories, operator_paused): with contextlib.suppress(asyncio.CancelledError): task.cancel() await task - - -@pytest.fixture() -async def frozen_time(): - """ - A helper to simulate time movements to step over long sleeps/timeouts. - """ - with freezegun.freeze_time("2020-01-01 00:00:00") as frozen: - # Use freezegun-supported time instead of system clocks -- for testing purposes only. - # NB: Patch strictly after the time is frozen -- to use fake_time(), not real time(). - # NB: StdLib's event loops use time.monotonic(), but uvloop uses its own C-level time, - # so patch the loop object directly instead of its implied underlying implementation. - with patch.object(asyncio.get_running_loop(), 'time', time.time): - yield frozen - - -# The time-driven tests mock the sleeps, and shift the time as much as it was requested to sleep. -# This makes the sleep realistic for the app code, though executed instantly for the tests. -@pytest.fixture() -def manual_time(k8s_mocked, frozen_time): - async def sleep_substitute(delay, *_, **__): - if delay is None: - pass - elif isinstance(delay, float): - frozen_time.tick(delay) - else: - frozen_time.tick(min(delay)) - - k8s_mocked.sleep.side_effect = sleep_substitute - yield frozen_time diff --git a/tests/handling/daemons/test_daemon_errors.py b/tests/handling/daemons/test_daemon_errors.py index 1bdd7bf1..12ffb716 100644 --- a/tests/handling/daemons/test_daemon_errors.py +++ b/tests/handling/daemons/test_daemon_errors.py @@ -1,3 +1,4 @@ +import asyncio import logging import kopf @@ -5,26 +6,21 @@ async def test_daemon_stopped_on_permanent_error( - settings, resource, dummy, manual_time, caplog, assert_logs, k8s_mocked, simulate_cycle): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) - @kopf.daemon(*resource, id='fn', backoff=0.01) + @kopf.daemon(*resource, id='fn', backoff=1.23) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) raise PermanentError("boo!") finalizer = settings.persistence.finalizer event_object = {'metadata': {'finalizers': [finalizer]}} await simulate_cycle(event_object) + await asyncio.sleep(123) # give it enough opportunities to misbehave (e.g. restart) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() - + assert looptime == 123 assert dummy.mock.call_count == 1 - assert k8s_mocked.patch.call_count == 0 - assert k8s_mocked.sleep.call_count == 0 assert_logs([ "Daemon 'fn' failed permanently: boo!", @@ -35,25 +31,21 @@ async def fn(**kwargs): async def test_daemon_stopped_on_arbitrary_errors_with_mode_permanent( - settings, resource, dummy, manual_time, caplog, assert_logs, k8s_mocked, simulate_cycle): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) - @kopf.daemon(*resource, id='fn', errors=ErrorsMode.PERMANENT, backoff=0.01) + @kopf.daemon(*resource, id='fn', errors=ErrorsMode.PERMANENT, backoff=1.23) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) raise Exception("boo!") finalizer = settings.persistence.finalizer event_object = {'metadata': {'finalizers': [finalizer]}} await simulate_cycle(event_object) + await asyncio.sleep(123) # give it enough opportunities to misbehave (e.g. restart) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() - + assert looptime == 123 assert dummy.mock.call_count == 1 - assert k8s_mocked.sleep.call_count == 0 assert_logs([ "Daemon 'fn' failed with an exception and will stop now: boo!", @@ -64,31 +56,25 @@ async def fn(**kwargs): async def test_daemon_retried_on_temporary_error( - registry, settings, resource, dummy, manual_time, - caplog, assert_logs, k8s_mocked, simulate_cycle): + registry, settings, resource, dummy, + caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + finished = asyncio.Event() - @kopf.daemon(*resource, id='fn', backoff=1.0) + @kopf.daemon(*resource, id='fn', backoff=1.23) async def fn(retry, **kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) if not retry: - raise TemporaryError("boo!", delay=1.0) + raise TemporaryError("boo!", delay=3.45) else: - dummy.steps['finish'].set() + finished.set() finalizer = settings.persistence.finalizer event_object = {'metadata': {'finalizers': [finalizer]}} await simulate_cycle(event_object) + await finished.wait() - await dummy.steps['called'].wait() - await dummy.steps['finish'].wait() - await dummy.wait_for_daemon_done() - - assert k8s_mocked.sleep.call_count == 1 # one for each retry - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] - + assert looptime == 3.45 assert_logs([ "Daemon 'fn' failed temporarily: boo!", "Daemon 'fn' succeeded.", @@ -97,70 +83,66 @@ async def fn(retry, **kwargs): async def test_daemon_retried_on_arbitrary_error_with_mode_temporary( - settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, manual_time): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + finished = asyncio.Event() - @kopf.daemon(*resource, id='fn', errors=ErrorsMode.TEMPORARY, backoff=1.0) + @kopf.daemon(*resource, id='fn', errors=ErrorsMode.TEMPORARY, backoff=1.23) async def fn(retry, **kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) if not retry: raise Exception("boo!") else: - dummy.steps['finish'].set() + finished.set() finalizer = settings.persistence.finalizer event_object = {'metadata': {'finalizers': [finalizer]}} await simulate_cycle(event_object) + await finished.wait() - await dummy.steps['called'].wait() - await dummy.steps['finish'].wait() - await dummy.wait_for_daemon_done() - - assert k8s_mocked.sleep.call_count == 1 # one for each retry - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] - + assert looptime == 1.23 assert_logs([ - "Daemon 'fn' failed with an exception and will try again in 1.0 seconds: boo!", + "Daemon 'fn' failed with an exception and will try again in 1.23 seconds: boo!", "Daemon 'fn' succeeded.", "Daemon 'fn' has exited on its own", ]) async def test_daemon_retried_until_retries_limit( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, manual_time): + resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + trigger = asyncio.Condition() @kopf.daemon(*resource, id='fn', retries=3) async def fn(**kwargs): - dummy.kwargs = kwargs - dummy.steps['called'].set() - raise TemporaryError("boo!", delay=1.0) + dummy.mock(**kwargs) + async with trigger: + trigger.notify_all() + raise TemporaryError("boo!", delay=1.23) await simulate_cycle({}) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() + async with trigger: + await trigger.wait_for(lambda: any("but will" in m for m in caplog.messages)) - assert k8s_mocked.sleep.call_count == 2 # one between each retry (3 attempts - 2 sleeps) - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] - assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 # [call#][args/kwargs][arg#] + assert looptime == 2.46 + assert dummy.mock.call_count == 3 async def test_daemon_retried_until_timeout( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, manual_time): + resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + trigger = asyncio.Condition() - @kopf.daemon(*resource, id='fn', timeout=3.0) + @kopf.daemon(*resource, id='fn', timeout=4) async def fn(**kwargs): - dummy.kwargs = kwargs - dummy.steps['called'].set() - raise TemporaryError("boo!", delay=1.0) + dummy.mock(**kwargs) + async with trigger: + trigger.notify_all() + raise TemporaryError("boo!", delay=1.23) await simulate_cycle({}) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() + async with trigger: + await trigger.wait_for(lambda: any("but will" in m for m in caplog.messages)) - assert k8s_mocked.sleep.call_count == 2 # one between each retry (3 attempts - 2 sleeps) - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] - assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 # [call#][args/kwargs][arg#] + assert looptime == 3.69 + assert dummy.mock.call_count == 4 diff --git a/tests/handling/daemons/test_daemon_filtration.py b/tests/handling/daemons/test_daemon_filtration.py index b7a48c73..ea5bdf71 100644 --- a/tests/handling/daemons/test_daemon_filtration.py +++ b/tests/handling/daemons/test_daemon_filtration.py @@ -1,3 +1,4 @@ +import asyncio import logging import pytest @@ -11,22 +12,23 @@ async def test_daemon_filtration_satisfied( settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): caplog.set_level(logging.DEBUG) + executed = asyncio.Event() @kopf.daemon(*resource, id='fn', labels={'a': 'value', 'b': kopf.PRESENT, 'c': kopf.ABSENT}, annotations={'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT}) async def fn(**kwargs): - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) + executed.set() finalizer = settings.persistence.finalizer event_body = {'metadata': {'labels': {'a': 'value', 'b': '...'}, 'annotations': {'x': 'value', 'y': '...'}, 'finalizers': [finalizer]}} await simulate_cycle(event_body) + await executed.wait() - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() + assert dummy.mock.call_count == 1 @pytest.mark.parametrize('labels, annotations', [ @@ -56,6 +58,7 @@ async def fn(**kwargs): 'annotations': annotations, 'finalizers': [finalizer]}} await simulate_cycle(event_body) + await asyncio.sleep(123) # give it enough time to do something when nothing is expected assert spawn_daemons.called assert spawn_daemons.call_args_list[0][1]['handlers'] == [] diff --git a/tests/handling/daemons/test_daemon_rematching.py b/tests/handling/daemons/test_daemon_rematching.py index 4442f743..36bf39a9 100644 --- a/tests/handling/daemons/test_daemon_rematching.py +++ b/tests/handling/daemons/test_daemon_rematching.py @@ -1,3 +1,4 @@ +import asyncio import logging import kopf @@ -7,19 +8,19 @@ async def test_running_daemon_is_stopped_when_mismatches( resource, dummy, looptime, mocker, caplog, assert_logs, k8s_mocked, simulate_cycle): caplog.set_level(logging.DEBUG) + executed = asyncio.Event() @kopf.daemon(*resource, id='fn', when=lambda **_: is_matching) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) + executed.set() await kwargs['stopped'].wait() # Ensure it is spawned while it is matching. (The same as the spawning tests.) mocker.resetall() is_matching = True await simulate_cycle({}) - await dummy.steps['called'].wait() + await executed.wait() assert dummy.mock.call_count == 1 # Ensure it is stopped once it stops matching. (The same as the termination tests.) @@ -29,5 +30,5 @@ async def fn(**kwargs): await dummy.wait_for_daemon_done() assert looptime == 0 - stopped = dummy.kwargs['stopped'] + stopped = dummy.mock.call_args[1]['stopped'] assert DaemonStoppingReason.FILTERS_MISMATCH in stopped.reason diff --git a/tests/handling/daemons/test_daemon_spawning.py b/tests/handling/daemons/test_daemon_spawning.py index 46b7d959..d341981a 100644 --- a/tests/handling/daemons/test_daemon_spawning.py +++ b/tests/handling/daemons/test_daemon_spawning.py @@ -1,41 +1,37 @@ +import asyncio import logging import kopf async def test_daemon_is_spawned_at_least_once( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): + resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + executed = asyncio.Event() @kopf.daemon(*resource, id='fn') async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) + executed.set() await simulate_cycle({}) + await executed.wait() - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() - + assert looptime == 0 assert dummy.mock.call_count == 1 # not restarted async def test_daemon_initial_delay_obeyed( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): + resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + executed = asyncio.Event() - @kopf.daemon(*resource, id='fn', initial_delay=1.0) + @kopf.daemon(*resource, id='fn', initial_delay=5.0) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) + executed.set() await simulate_cycle({}) + await executed.wait() - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() - - assert k8s_mocked.sleep.call_count >= 1 - assert k8s_mocked.sleep.call_count <= 2 # one optional extra call for sleep(None) - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] + assert looptime == 5.0 diff --git a/tests/handling/daemons/test_daemon_termination.py b/tests/handling/daemons/test_daemon_termination.py index 3a0e97e1..6228198a 100644 --- a/tests/handling/daemons/test_daemon_termination.py +++ b/tests/handling/daemons/test_daemon_termination.py @@ -9,13 +9,13 @@ async def test_daemon_exits_gracefully_and_instantly_on_resource_deletion( settings, resource, dummy, simulate_cycle, - caplog, assert_logs, k8s_mocked, frozen_time, mocker, looptime): + looptime, caplog, assert_logs, k8s_mocked, mocker): caplog.set_level(logging.DEBUG) # A daemon-under-test. @kopf.daemon(*resource, id='fn') async def fn(**kwargs): - dummy.kwargs = kwargs + dummy.mock(**kwargs) dummy.steps['called'].set() await kwargs['stopped'].wait() @@ -34,20 +34,19 @@ async def fn(**kwargs): await dummy.wait_for_daemon_done() assert looptime == 0 - assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == [] async def test_daemon_exits_gracefully_and_instantly_on_operator_exiting( settings, resource, dummy, simulate_cycle, background_daemon_killer, - caplog, assert_logs, k8s_mocked, frozen_time, mocker, looptime): + looptime, caplog, assert_logs, k8s_mocked, mocker): caplog.set_level(logging.DEBUG) # A daemon-under-test. @kopf.daemon(*resource, id='fn') async def fn(**kwargs): - dummy.kwargs = kwargs + dummy.mock(**kwargs) dummy.steps['called'].set() await kwargs['stopped'].wait() @@ -65,7 +64,6 @@ async def fn(**kwargs): await dummy.wait_for_daemon_done() assert looptime == 0 - assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch.call_count == 0 # To prevent double-cancelling of the scheduler's system tasks in the fixture, let them finish: @@ -76,13 +74,13 @@ async def fn(**kwargs): @pytest.mark.usefixtures('background_daemon_killer') async def test_daemon_exits_gracefully_and_instantly_on_operator_pausing( settings, memories, resource, dummy, simulate_cycle, conflicts_found, - caplog, assert_logs, k8s_mocked, frozen_time, mocker, looptime): + looptime, caplog, assert_logs, k8s_mocked, mocker): caplog.set_level(logging.DEBUG) # A daemon-under-test. @kopf.daemon(*resource, id='fn') async def fn(**kwargs): - dummy.kwargs = kwargs + dummy.mock(**kwargs) dummy.steps['called'].set() await kwargs['stopped'].wait() @@ -107,21 +105,17 @@ async def fn(**kwargs): assert not memory.daemons_memory.forever_stopped -async def test_daemon_exits_instantly_via_cancellation_with_backoff( +async def test_daemon_exits_instantly_on_cancellation_with_backoff( settings, resource, dummy, simulate_cycle, - caplog, assert_logs, k8s_mocked, frozen_time, mocker): + looptime, caplog, assert_logs, k8s_mocked, mocker): caplog.set_level(logging.DEBUG) - dummy.steps['finish'].set() # A daemon-under-test. - @kopf.daemon(*resource, id='fn', cancellation_backoff=5, cancellation_timeout=10) + @kopf.daemon(*resource, id='fn', cancellation_backoff=1.23, cancellation_timeout=10) async def fn(**kwargs): - dummy.kwargs = kwargs + dummy.mock(**kwargs) dummy.steps['called'].set() - try: - await asyncio.Event().wait() # this one is cancelled. - except asyncio.CancelledError: - await dummy.steps['finish'].wait() # simulated slow (non-instant) exiting. + await asyncio.Event().wait() # this one is cancelled. # Trigger spawning and wait until ready. Assume the finalizers are already added. finalizer = settings.persistence.finalizer @@ -134,17 +128,15 @@ async def fn(**kwargs): event_object.setdefault('metadata', {}).update({'deletionTimestamp': '...'}) await simulate_cycle(event_object) - assert k8s_mocked.sleep.call_count == 1 - assert k8s_mocked.sleep.call_args_list[0][0][0] == 5.0 + assert looptime == 1.23 # i.e. the slept through the whole backoff time assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['status']['kopf']['dummy'] # 2nd cycle: cancelling after the backoff is reached. Wait for cancellation timeout. mocker.resetall() - frozen_time.tick(5) # backoff time or slightly above it await simulate_cycle(event_object) - assert k8s_mocked.sleep.call_count == 0 + assert looptime == 1.23 # i.e. no additional sleeps happened assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == [] @@ -152,15 +144,15 @@ async def fn(**kwargs): await dummy.wait_for_daemon_done() -async def test_daemon_exits_slowly_via_cancellation_with_backoff( +async def test_daemon_exits_slowly_on_cancellation_with_backoff( settings, resource, dummy, simulate_cycle, - caplog, assert_logs, k8s_mocked, frozen_time, mocker): + looptime, caplog, assert_logs, k8s_mocked, mocker): caplog.set_level(logging.DEBUG) # A daemon-under-test. - @kopf.daemon(*resource, id='fn', cancellation_backoff=5, cancellation_timeout=10) + @kopf.daemon(*resource, id='fn', cancellation_backoff=1.23, cancellation_timeout=4.56) async def fn(**kwargs): - dummy.kwargs = kwargs + dummy.mock(**kwargs) dummy.steps['called'].set() try: await asyncio.Event().wait() # this one is cancelled. @@ -178,43 +170,39 @@ async def fn(**kwargs): event_object.setdefault('metadata', {}).update({'deletionTimestamp': '...'}) await simulate_cycle(event_object) - assert k8s_mocked.sleep.call_count == 1 - assert k8s_mocked.sleep.call_args_list[0][0][0] == 5.0 + assert looptime == 1.23 assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['status']['kopf']['dummy'] # 2nd cycle: cancelling after the backoff is reached. Wait for cancellation timeout. mocker.resetall() - frozen_time.tick(5) # backoff time or slightly above it await simulate_cycle(event_object) - assert k8s_mocked.sleep.call_count == 1 - assert k8s_mocked.sleep.call_args_list[0][0][0] == 10.0 + assert looptime == 1.23 + 4.56 # i.e. it really spent all the timeout assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['status']['kopf']['dummy'] # 3rd cycle: the daemon has exited, the resource should be unblocked from actual deletion. mocker.resetall() - frozen_time.tick(1) # any time below timeout dummy.steps['finish'].set() - await asyncio.sleep(0) + await asyncio.sleep(0) # let the daemon to exit and all the routines to trigger await simulate_cycle(event_object) await dummy.wait_for_daemon_done() - assert k8s_mocked.sleep.call_count == 0 + assert looptime == 1.23 + 4.56 # i.e. not additional sleeps happened assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == [] async def test_daemon_is_abandoned_due_to_cancellation_timeout_reached( settings, resource, dummy, simulate_cycle, - caplog, assert_logs, k8s_mocked, frozen_time, mocker): + looptime, caplog, assert_logs, k8s_mocked, mocker): caplog.set_level(logging.DEBUG) # A daemon-under-test. - @kopf.daemon(*resource, id='fn', cancellation_timeout=10) + @kopf.daemon(*resource, id='fn', cancellation_timeout=4.56) async def fn(**kwargs): - dummy.kwargs = kwargs + dummy.mock(**kwargs) dummy.steps['called'].set() try: await dummy.steps['finish'].wait() # this one is cancelled. @@ -232,18 +220,17 @@ async def fn(**kwargs): event_object.setdefault('metadata', {}).update({'deletionTimestamp': '...'}) await simulate_cycle(event_object) - assert k8s_mocked.sleep.call_count == 1 - assert k8s_mocked.sleep.call_args_list[0][0][0] == 10.0 + assert looptime == 4.56 assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['status']['kopf']['dummy'] # 2rd cycle: the daemon has exited, the resource should be unblocked from actual deletion. mocker.resetall() - frozen_time.tick(50) + await asyncio.sleep(1000) # unnecessary, but let's fast-forward time just in case with pytest.warns(ResourceWarning, match=r"Daemon .+ did not exit in time"): await simulate_cycle(event_object) - assert k8s_mocked.sleep.call_count == 0 + assert looptime == 1000 + 4.56 assert k8s_mocked.patch.call_count == 1 assert k8s_mocked.patch.call_args_list[0][1]['payload']['metadata']['finalizers'] == [] assert_logs(["Daemon 'fn' did not exit in time. Leaving it orphaned."]) diff --git a/tests/handling/daemons/test_timer_errors.py b/tests/handling/daemons/test_timer_errors.py index 934ff4fe..bd69e737 100644 --- a/tests/handling/daemons/test_timer_errors.py +++ b/tests/handling/daemons/test_timer_errors.py @@ -1,3 +1,4 @@ +import asyncio import logging import kopf @@ -5,26 +6,20 @@ async def test_timer_stopped_on_permanent_error( - settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) - @kopf.timer(*resource, id='fn', backoff=0.01, interval=1.0) + @kopf.timer(*resource, id='fn', backoff=1.23, interval=999) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() - kwargs['stopped']._setter.set() # to exit the cycle + dummy.mock(**kwargs) raise PermanentError("boo!") event_object = {'metadata': {'finalizers': [settings.persistence.finalizer]}} await simulate_cycle(event_object) + await asyncio.sleep(123) # give it enough opportunities to misbehave (e.g. retry) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() - + assert looptime == 123 # no intervals used, as there were no retries assert dummy.mock.call_count == 1 - assert k8s_mocked.sleep.call_count == 1 # one for each retry - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 assert_logs([ "Timer 'fn' failed permanently: boo!", @@ -34,26 +29,20 @@ async def fn(**kwargs): async def test_timer_stopped_on_arbitrary_errors_with_mode_permanent( - settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) - @kopf.timer(*resource, id='fn', errors=ErrorsMode.PERMANENT, backoff=0.01, interval=1.0) + @kopf.timer(*resource, id='fn', errors=ErrorsMode.PERMANENT, backoff=1.23, interval=999) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() - kwargs['stopped']._setter.set() # to exit the cycle + dummy.mock(**kwargs) raise Exception("boo!") event_object = {'metadata': {'finalizers': [settings.persistence.finalizer]}} await simulate_cycle(event_object) + await asyncio.sleep(123) # give it enough opportunities to misbehave (e.g. retry) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() - + assert looptime == 123 # no intervals used, as there were no retries assert dummy.mock.call_count == 1 - assert k8s_mocked.sleep.call_count == 1 # one for each retry - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 assert_logs([ "Timer 'fn' failed with an exception and will stop now: boo!", @@ -63,32 +52,23 @@ async def fn(**kwargs): async def test_timer_retried_on_temporary_error( - settings, resource, dummy, manual_time, - caplog, assert_logs, k8s_mocked, simulate_cycle): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + finished = asyncio.Event() - @kopf.timer(*resource, id='fn', backoff=1.0, interval=1.0) + @kopf.timer(*resource, id='fn', backoff=1.23, interval=2.34) async def fn(retry, **kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) if not retry: - raise TemporaryError("boo!", delay=1.0) + raise TemporaryError("boo!", delay=3.45) else: - kwargs['stopped']._setter.set() # to exit the cycle - dummy.steps['finish'].set() + finished.set() event_object = {'metadata': {'finalizers': [settings.persistence.finalizer]}} await simulate_cycle(event_object) + await finished.wait() - await dummy.steps['called'].wait() - await dummy.steps['finish'].wait() - await dummy.wait_for_daemon_done() - - assert k8s_mocked.sleep.call_count == 2 # one for each retry - assert k8s_mocked.sleep.call_args_list[0][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 # interval - + assert looptime == 3.45 assert_logs([ "Timer 'fn' failed temporarily: boo!", "Timer 'fn' succeeded.", @@ -96,78 +76,73 @@ async def fn(retry, **kwargs): async def test_timer_retried_on_arbitrary_error_with_mode_temporary( - settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, manual_time): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + finished = asyncio.Event() - @kopf.timer(*resource, id='fn', errors=ErrorsMode.TEMPORARY, backoff=1.0, interval=1.0) + @kopf.timer(*resource, id='fn', errors=ErrorsMode.TEMPORARY, backoff=1.23, interval=2.34) async def fn(retry, **kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) if not retry: raise Exception("boo!") else: - kwargs['stopped']._setter.set() # to exit the cycle - dummy.steps['finish'].set() + finished.set() event_object = {'metadata': {'finalizers': [settings.persistence.finalizer]}} await simulate_cycle(event_object) + await finished.wait() - await dummy.steps['called'].wait() - await dummy.steps['finish'].wait() - await dummy.wait_for_daemon_done() - - assert k8s_mocked.sleep.call_count == 2 # one for each retry - assert k8s_mocked.sleep.call_args_list[0][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 # interval - + assert looptime == 1.23 assert_logs([ - "Timer 'fn' failed with an exception and will try again in 1.0 seconds: boo!", + "Timer 'fn' failed with an exception and will try again in 1.23 seconds: boo!", "Timer 'fn' succeeded.", ]) async def test_timer_retried_until_retries_limit( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, manual_time): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + trigger = asyncio.Condition() - @kopf.timer(*resource, id='fn', retries=3, interval=1.0) + @kopf.timer(*resource, id='fn', retries=3, interval=2.34) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() - if dummy.mock.call_count >= 5: - kwargs['stopped']._setter.set() # to exit the cycle - raise TemporaryError("boo!", delay=1.0) + dummy.mock(**kwargs) + async with trigger: + trigger.notify_all() + raise TemporaryError("boo!", delay=3.45) - await simulate_cycle({}) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() + event_object = {'metadata': {'finalizers': [settings.persistence.finalizer]}} + await simulate_cycle(event_object) + async with trigger: + await trigger.wait_for(lambda: any("but will" in m for m in caplog.messages)) - assert k8s_mocked.sleep.call_count >= 3 # one between each retry (3 attempts - 2 sleeps) - assert k8s_mocked.sleep.call_args_list[0][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[1][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[2][0][0] == 1.0 # interval + assert looptime == 6.9 # 2*3.45 -- 2 sleeps between 3 attempts + assert dummy.mock.call_count == 3 async def test_timer_retried_until_timeout( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, manual_time): + settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + trigger = asyncio.Condition() - @kopf.timer(*resource, id='fn', timeout=3.0, interval=1.0) + @kopf.timer(*resource, id='fn', timeout=10.0, interval=1.23) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() - if dummy.mock.call_count >= 5: - kwargs['stopped']._setter.set() # to exit the cycle - raise TemporaryError("boo!", delay=1.0) + dummy.mock(**kwargs) + async with trigger: + trigger.notify_all() + raise TemporaryError("boo!", delay=3.45) await simulate_cycle({}) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() + event_object = {'metadata': {'finalizers': [settings.persistence.finalizer]}} + await simulate_cycle(event_object) + async with trigger: + await trigger.wait_for(lambda: any("but will" in m for m in caplog.messages)) + + assert looptime == 6.9 # 2*3.45 -- 2 sleeps between 3 attempts + assert dummy.mock.call_count == 3 + - assert k8s_mocked.sleep.call_count >= 3 # one between each retry (3 attempts - 2 sleeps) - assert k8s_mocked.sleep.call_args_list[0][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[1][0][0] == [1.0] # delays - assert k8s_mocked.sleep.call_args_list[2][0][0] == 1.0 # interval +# TODO: next steps: +# extract the PR with test refactoring, where it is all simplified: +# - "dummy" is converted to Condition, not just Events. +# - in all daemons & timers, not only here! diff --git a/tests/handling/daemons/test_timer_filtration.py b/tests/handling/daemons/test_timer_filtration.py index c2979188..84b09ca8 100644 --- a/tests/handling/daemons/test_timer_filtration.py +++ b/tests/handling/daemons/test_timer_filtration.py @@ -1,3 +1,4 @@ +import asyncio import logging import pytest @@ -11,21 +12,22 @@ async def test_timer_filtration_satisfied( settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): caplog.set_level(logging.DEBUG) + executed = asyncio.Event() @kopf.timer(*resource, id='fn', labels={'a': 'value', 'b': kopf.PRESENT, 'c': kopf.ABSENT}, annotations={'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT}) async def fn(**kwargs): - dummy.kwargs = kwargs - dummy.steps['called'].set() + dummy.mock(**kwargs) + executed.set() event_body = {'metadata': {'labels': {'a': 'value', 'b': '...'}, 'annotations': {'x': 'value', 'y': '...'}, 'finalizers': [settings.persistence.finalizer]}} await simulate_cycle(event_body) + await executed.wait() - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() + assert dummy.mock.call_count == 1 @pytest.mark.parametrize('labels, annotations', [ @@ -54,6 +56,7 @@ async def fn(**kwargs): 'annotations': annotations, 'finalizers': [settings.persistence.finalizer]}} await simulate_cycle(event_body) + await asyncio.sleep(123) # give it enough time to do something when nothing is expected assert spawn_daemons.called assert spawn_daemons.call_args_list[0][1]['handlers'] == [] diff --git a/tests/handling/daemons/test_timer_intervals.py b/tests/handling/daemons/test_timer_intervals.py index 637234c5..bd064a4d 100644 --- a/tests/handling/daemons/test_timer_intervals.py +++ b/tests/handling/daemons/test_timer_intervals.py @@ -1,3 +1,4 @@ +import asyncio import logging import kopf @@ -6,49 +7,41 @@ async def test_timer_regular_interval( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, frozen_time): + resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + trigger = asyncio.Condition() @kopf.timer(*resource, id='fn', interval=1.0, sharp=False) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() - frozen_time.tick(0.3) - if dummy.mock.call_count >= 2: - dummy.steps['finish'].set() - kwargs['stopped']._setter.set() # to exit the cycle + dummy.mock(**kwargs) + async with trigger: + trigger.notify_all() + await asyncio.sleep(0.3) # simulate a slow operation await simulate_cycle({}) - await dummy.steps['called'].wait() - await dummy.wait_for_daemon_done() + async with trigger: + await trigger.wait() + await trigger.wait() assert dummy.mock.call_count == 2 - assert k8s_mocked.sleep.call_count == 2 - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 - assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 + assert looptime == 1.3 async def test_timer_sharp_interval( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, frozen_time): + resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + trigger = asyncio.Condition() @kopf.timer(*resource, id='fn', interval=1.0, sharp=True) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() - frozen_time.tick(0.3) - if dummy.mock.call_count >= 2: - dummy.steps['finish'].set() - kwargs['stopped']._setter.set() # to exit the cycle + dummy.mock(**kwargs) + async with trigger: + trigger.notify_all() + await asyncio.sleep(0.3) # simulate a slow operation await simulate_cycle({}) - await dummy.steps['called'].wait() - await dummy.steps['finish'].wait() - await dummy.wait_for_daemon_done() + async with trigger: + await trigger.wait_for(lambda: dummy.mock.call_count >= 2) assert dummy.mock.call_count == 2 - assert k8s_mocked.sleep.call_count == 2 - assert 0.7 <= k8s_mocked.sleep.call_args_list[0][0][0] < 0.71 - assert 0.7 <= k8s_mocked.sleep.call_args_list[1][0][0] < 0.71 + assert looptime == 1 # not 1.3! diff --git a/tests/handling/daemons/test_timer_triggering.py b/tests/handling/daemons/test_timer_triggering.py index ed074979..c203c4af 100644 --- a/tests/handling/daemons/test_timer_triggering.py +++ b/tests/handling/daemons/test_timer_triggering.py @@ -1,48 +1,44 @@ +import asyncio import logging import kopf async def test_timer_is_spawned_at_least_once( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): + resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + trigger = asyncio.Condition() @kopf.timer(*resource, id='fn', interval=1.0) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() - kwargs['stopped']._setter.set() # to exit the cycle + dummy.mock(**kwargs) + async with trigger: + trigger.notify_all() await simulate_cycle({}) - await dummy.steps['called'].wait() + async with trigger: + await trigger.wait() + await trigger.wait() - assert dummy.mock.call_count == 1 - assert dummy.kwargs['retry'] == 0 - assert k8s_mocked.sleep.call_count == 1 - assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 - - await dummy.wait_for_daemon_done() + assert looptime == 1 + assert dummy.mock.call_count == 2 async def test_timer_initial_delay_obeyed( - resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): + resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, looptime): caplog.set_level(logging.DEBUG) + trigger = asyncio.Condition() @kopf.timer(*resource, id='fn', initial_delay=5.0, interval=1.0) async def fn(**kwargs): - dummy.mock() - dummy.kwargs = kwargs - dummy.steps['called'].set() - kwargs['stopped']._setter.set() # to exit the cycle + dummy.mock(**kwargs) + async with trigger: + trigger.notify_all() await simulate_cycle({}) - await dummy.steps['called'].wait() - - assert dummy.mock.call_count == 1 - assert dummy.kwargs['retry'] == 0 - assert k8s_mocked.sleep.call_count == 2 - assert k8s_mocked.sleep.call_args_list[0][0][0] == 5.0 - assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 + async with trigger: + await trigger.wait() + await trigger.wait() - await dummy.wait_for_daemon_done() + assert looptime == 6 + assert dummy.mock.call_count == 2