From 7b2fa3eaebd9687eddb0e4fe2526454d0e48f9cb Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Tue, 12 May 2026 09:45:02 +0200 Subject: [PATCH 1/5] Add experimental qcodes.measure_v2 package (tracer bullet) Introduce a parallel, async-capable measurement API for QCoDeS that decouples plan description from execution. Plans are Python generators yielding typed messages (Set, Read, Sleep, Emit, OpenRun, CloseRun); a MeasurementEngine drives them on a dedicated worker thread and publishes events (RunStarted, RowEmitted, RunStopped) to pluggable sinks via a publisher thread with a bounded queue. Key properties of the tracer-bullet implementation: - Non-blocking submission via engine.submit(...) returning a RunHandle with futures for the dataset and final result. - Cancellation via it.throw(CancelRequested) at the next yield point; plan try/finally blocks always run for safe cleanup. scan_1d guarantees a ramp-to-zero of the swept parameter on any exit path. - Cancel flag re-checked between .send() and dispatch to prevent one extra Set/Read leaking to hardware after cancellation. - handle.wait() returning guarantees data is durably committed: the publisher thread completes futures only after all sinks have processed RunStopped. - SqliteSink reuses existing DataSaver/Measurement infrastructure; opens its own SQLite connection on the publisher thread to satisfy check_same_thread=True. ~50 lines of integration code. - MemorySink for tests and lightweight in-memory consumption. Public surface: import qcodes as qc ds = qc.measure_v2.scan(LinSweep(g, 0, 1, 11), measure=[i]) Advanced users construct an explicit MeasurementEngine and submit plans built via run(...) + plan-builders (scan_1d, or custom generators). Tracer scope is deliberately small: 1D scans of scalar parameters with the default SQLite sink. Multi-dim scans, Call/Describe messages, parallel reads via underlying_instrument grouping, queueing, pause/resume, live-plot/progress sinks, and a dond shim are P0-P2 for the v1 release. See src/qcodes/measure_v2/DESIGN.md for the full architecture. 50 tests across L1 (message-stream), L2 (engine + MemorySink), and L3 (engine + SqliteSink) levels. All three end-to-end acceptance scenarios pass. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../newsfragments/+measure_v2_tracer.new | 12 + src/qcodes/measure_v2/DESIGN.md | 1231 +++++++++++++++++ src/qcodes/measure_v2/__init__.py | 68 + src/qcodes/measure_v2/convenience.py | 121 ++ src/qcodes/measure_v2/decorators.py | 133 ++ src/qcodes/measure_v2/engine.py | 662 +++++++++ src/qcodes/measure_v2/events.py | 115 ++ src/qcodes/measure_v2/exceptions.py | 40 + src/qcodes/measure_v2/messages.py | 112 ++ src/qcodes/measure_v2/plans/__init__.py | 10 + src/qcodes/measure_v2/plans/scan.py | 53 + src/qcodes/measure_v2/sinks/__init__.py | 16 + src/qcodes/measure_v2/sinks/memory.py | 58 + src/qcodes/measure_v2/sinks/protocol.py | 42 + src/qcodes/measure_v2/sinks/sqlite.py | 165 +++ src/qcodes/measure_v2/testing.py | 87 ++ tests/measure_v2/__init__.py | 0 tests/measure_v2/conftest.py | 34 + tests/measure_v2/test_acceptance.py | 118 ++ tests/measure_v2/test_engine.py | 302 ++++ tests/measure_v2/test_memory_sink.py | 122 ++ tests/measure_v2/test_messages.py | 57 + tests/measure_v2/test_run_decorator.py | 207 +++ tests/measure_v2/test_scan_1d_l1.py | 136 ++ tests/measure_v2/test_sqlite_sink.py | 97 ++ 25 files changed, 3998 insertions(+) create mode 100644 docs/changes/newsfragments/+measure_v2_tracer.new create mode 100644 src/qcodes/measure_v2/DESIGN.md create mode 100644 src/qcodes/measure_v2/__init__.py create mode 100644 src/qcodes/measure_v2/convenience.py create mode 100644 src/qcodes/measure_v2/decorators.py create mode 100644 src/qcodes/measure_v2/engine.py create mode 100644 src/qcodes/measure_v2/events.py create mode 100644 src/qcodes/measure_v2/exceptions.py create mode 100644 src/qcodes/measure_v2/messages.py create mode 100644 src/qcodes/measure_v2/plans/__init__.py create mode 100644 src/qcodes/measure_v2/plans/scan.py create mode 100644 src/qcodes/measure_v2/sinks/__init__.py create mode 100644 src/qcodes/measure_v2/sinks/memory.py create mode 100644 src/qcodes/measure_v2/sinks/protocol.py create mode 100644 src/qcodes/measure_v2/sinks/sqlite.py create mode 100644 src/qcodes/measure_v2/testing.py create mode 100644 tests/measure_v2/__init__.py create mode 100644 tests/measure_v2/conftest.py create mode 100644 tests/measure_v2/test_acceptance.py create mode 100644 tests/measure_v2/test_engine.py create mode 100644 tests/measure_v2/test_memory_sink.py create mode 100644 tests/measure_v2/test_messages.py create mode 100644 tests/measure_v2/test_run_decorator.py create mode 100644 tests/measure_v2/test_scan_1d_l1.py create mode 100644 tests/measure_v2/test_sqlite_sink.py diff --git a/docs/changes/newsfragments/+measure_v2_tracer.new b/docs/changes/newsfragments/+measure_v2_tracer.new new file mode 100644 index 000000000000..72aea8b79d3d --- /dev/null +++ b/docs/changes/newsfragments/+measure_v2_tracer.new @@ -0,0 +1,12 @@ +Added experimental ``qcodes.measure_v2`` package — a parallel measurement API +that decouples plan description from execution. Plans are Python generators +yielding typed messages (``Set``, ``Read``, ``Sleep``, ``Emit``, ``OpenRun``, +``CloseRun``); an engine drives them on a dedicated worker thread and +publishes events to pluggable sinks. Enables non-blocking measurement +submission, mid-flight cancellation with guaranteed cleanup via plan +``try/finally`` blocks, and live data consumption via custom sinks. +The public surface is ``qc.measure_v2.scan(...)`` and the engine / +``RunHandle`` types for advanced use; the SQLite sink reuses the existing +``DataSaver`` infrastructure. This package is experimental and its API +may change in subsequent releases. See ``src/qcodes/measure_v2/DESIGN.md`` +for the architecture document. diff --git a/src/qcodes/measure_v2/DESIGN.md b/src/qcodes/measure_v2/DESIGN.md new file mode 100644 index 000000000000..b659f02e0a13 --- /dev/null +++ b/src/qcodes/measure_v2/DESIGN.md @@ -0,0 +1,1231 @@ +# QCoDeS Async Measurement Architecture — Design Exploration + +**Status:** exploration / design draft, no implementation +**Scope:** a parallel measurement API for QCoDeS that decouples plan description +from execution, enabling non-blocking measurements, adaptive scans, live data +access, and cancellation with safe cleanup. + +**Revision history:** +- **rev2** (post-critique): dropped lazy schema discovery; snapshot taken on + `OpenRun` unconditionally; introduced sink criticality; added cancel re-check + after `.send()`; added shutdown deadline; engine canonicalizes parameter + identity by `register_name`; cross-param parallelism preserves today's + `underlying_instrument` grouping; `get_after_set` explicit in plan-builders; + multi-stream `Emit(stream=...)` removed from core vocabulary (deferred); + softened convenience-layer compatibility claims; documented behavior for + empty plans and `dataset.cache.data()` thread-safety. + +--- + +## 1. Problem Statement + +QCoDeS today performs measurements synchronously on the main thread. The +`Measurement.run()` context manager and `dond(...)` family use nested for-loops +that block the calling kernel for the duration of the scan. This makes several +classes of work awkward or impossible: + +- Keeping a Jupyter kernel responsive while a long scan runs. +- Adaptive sweeps that decide their next setpoint from the previous reading. +- Live data consumption (plotters, ML feedback, dashboards) beyond the + existing dataset subscriber mechanism. +- Pause/resume of in-flight measurements. +- Multiple concurrent measurements on independent rigs in one process. +- Programmatic cancellation with guaranteed safe cleanup of instrument state. + +The existing model conflates four distinct concerns into one for-loop: +*describing* the scan, *executing* it on instruments, *writing* it to disk, +and *displaying* it. This document proposes splitting them. + +## 2. Goals & Non-Goals + +### Goals + +- A parallel API surface (not a replacement) usable from notebooks today. +- Plans as data — describable, inspectable, composable, testable without + instruments. +- Non-blocking submission with handles for status / cancel / pause / resume. +- Live data access via a uniform sink/event model. +- Cancellation that always runs user-defined cleanup. +- Reuse of existing parameter and dataset infrastructure (no driver changes + required for correctness). +- Clean unit testability of plan logic. + +### Non-goals (for v1) + +- Replacing or deprecating any existing API. +- Multi-process or remote execution (out-of-process services). +- Async/await user-facing API. +- Driver-level cancel-during-set support. +- Crash recovery / replay of partial runs. +- A measurement queue UI / scheduler beyond simple FIFO. +- Modeling after any specific existing framework (bluesky, pymeasure, Labber). + Where designs converge it's coincidence; where they diverge it's intentional. + +## 3. Architectural Overview + +Four components, each owning one concern: + +``` + +--> SqliteSink (default) ++----------+ +----------------+ +-------+ | +| Plan |-->| Measurement |-->|Publisher|+--> LiveMatplotlibSink +| (gener- | | Engine | | Thread | | +| ator of |<--| (worker thread | | | +--> TqdmSink +| Msg) | | + queue) | +-------+ | ++----------+ +----------------+ +--> user sinks ... + ^ + | ++-----------------+ +| Plan-builders & | +| Convenience API | +| (qc.scan, ...) | ++-----------------+ +``` + +- **Plan** — a generator yielding typed messages. Pure data; doesn't perform + I/O itself. Receives results back via `.send()`. +- **MeasurementEngine** — owns one worker thread; iterates plans; dispatches + messages to instruments; emits events. +- **Sinks** — callables that consume events. Default sink writes to SQLite by + wrapping `DataSaver`. Other sinks attach for live viz, network, etc. +- **Convenience API** — `qc.scan(...)` etc., constructs plans + sinks and + submits them to a default engine. + +Strict layering: plans depend only on the message vocabulary and +`ParameterBase`. The engine depends on the message vocabulary. Sinks depend on +the event vocabulary. The convenience layer depends on all three. + +## 4. Plan Model + +### 4.1 Message vocabulary + +Seven message types (plus one descriptor message for opt-in metadata): + +```python +@dataclass(frozen=True) +class Set: + param: ParameterBase + value: Any + +@dataclass(frozen=True) +class Read: + params: tuple[ParameterBase, ...] + # Engine returns dict[ParameterBase, Any] via .send() + +@dataclass(frozen=True) +class Sleep: + seconds: float + +@dataclass(frozen=True) +class Call: + fn: Callable[[], Any] + # Engine returns fn's result via .send() + +@dataclass(frozen=True) +class Emit: + overrides: Mapping[ParameterBase, Any] = field(default_factory=dict) + # overrides may only carry params already in the descriptor. + # Adding new params at Emit time is forbidden (no lazy registration). + +@dataclass(frozen=True) +class OpenRun: + name: str + exp: Experiment | None + descriptor: Descriptor # required — no lazy mode + write_period: float | None = None + +@dataclass(frozen=True) +class CloseRun: + pass + +@dataclass(frozen=True) +class Describe: + """First-message marker; consumed by the run() decorator to build OpenRun + when the caller doesn't pass explicit args to run(...). One of explicit + run() args or Describe MUST be provided — schema is never lazy.""" + setpoints: tuple[ParameterBase, ...] + measured: tuple[ParameterBase, ...] + shapes: Shapes | None = None +``` + +`Emit(stream=...)` (multi-stream within one run) is deliberately not in v1. +Per-stream schemas and SqliteSink mapping are non-trivial; defer until a +concrete use case demands it. + +Deliberately absent: + +- No `BreakIf` / `Checkpoint` — both reduce to `Call`. +- No `Subscribe` — sink registration is engine API, not plan content. +- No `Pause` / `Resume` — engine API only. +- No `Wait(condition)` — composes from `Sleep` + `Read` + plan-level loop. + +### 4.2 Plan as generator + +```python +Plan = Generator[Msg, Any, None] +``` + +A plan is a generator function. The engine drives iteration with `it.send(...)`, +where the value sent is the result of the previous message: + +| Message | What `.send()` returns next iteration | +|---|---| +| `Set`, `Sleep`, `Emit`, `OpenRun`, `CloseRun`, `Describe` | `None` | +| `Read(params)` | `dict[ParameterBase, Any]` | +| `Call(fn)` | return value of `fn` | + +### 4.3 Composition + +Plans compose with `yield from`. Python's generator protocol propagates both +sent values and thrown exceptions correctly through nested generators, so +adaptive plans built from sub-plans work without engine support. + +```python +def outer(...): + yield Set(g1, 1.0) + try: + yield from inner(...) + finally: + yield Set(g1, 0.0) +``` + +### 4.4 Adaptivity + +Adaptive plans use the `.send()` channel to receive read results and decide +the next setpoint: + +```python +def bisect_transition(gate, drain_i, *, lo, hi, threshold, tol, settle): + while abs(hi - lo) > tol: + mid = 0.5 * (hi + lo) + yield Set(gate, mid) + yield Sleep(settle) + r = yield Read((drain_i,)) + yield Emit() + if r[drain_i] < threshold: + hi = mid + else: + lo = mid +``` + +External adaptive libraries integrate cleanly because plan-side code is +synchronous between yields: + +```python +def adaptive_scan(knob, signal, bounds, *, loss_goal, settle): + learner = Learner1D(function=None, bounds=bounds) + while learner.loss() > loss_goal: + (x,), _ = learner.ask(1) + yield Set(knob, x) + yield Sleep(settle) + r = yield Read((signal,)) + yield Emit() + learner.tell(x, float(r[signal])) +``` + +## 5. Engine + +### 5.1 Threading + +One dedicated worker thread per `MeasurementEngine` instance. The engine owns +this thread and instantiates it lazily. + +Rationale: + +- Driver code stays synchronous. Per-instrument thread-safety constraints + (pyvisa, NI-DAQmx, ZI) are satisfied by always touching an instrument from + the same thread. +- GIL is irrelevant — work is I/O bound. +- Cancellation is cooperative checks between yields, not thread interruption. +- The engine can run an internal `ThreadPoolExecutor` for parallel `Read` + reads without exposing parallelism to plans. + +For multi-rig scenarios, instantiate multiple engines, each with its own +disjoint instrument set. + +### 5.2 Dispatch loop (sketch) + +```python +def _run_plan(self, plan: Plan) -> None: + it = iter(plan) + send_value: Any = None + in_cleanup = False + while True: + try: + if self._cancel_pending and not in_cleanup: + in_cleanup = True + msg = it.throw(CancelRequested(self._cancel_reason)) + else: + self._paused.wait() + msg = it.send(send_value) + except StopIteration: + return + # Re-check cancel between .send() and dispatch: a cancel arriving in + # this window must not cause one extra Set/Read to land on hardware. + if self._cancel_pending and not in_cleanup: + in_cleanup = True + try: + msg = it.throw(CancelRequested(self._cancel_reason)) + except StopIteration: + return + send_value = self._dispatch(msg) + +def _dispatch(self, msg: Msg) -> Any: + match msg: + case Set(p, v): + p.set(v) + self._state[p.register_name] = v + case Read(params): + out = self._read_pool.read(params) # grouped by underlying_instrument + self._state.update({p.register_name: v for p, v in out.items()}) + return out + case Sleep(s): + self._cancellable_sleep(s) + case Call(fn): + return fn() + case Emit(overrides): + snapshot = {**self._state, **{p.register_name: v for p, v in overrides.items()}} + # overrides containing un-declared params raise before publish + self._validate_against_descriptor(overrides) + self._publish(RowEmitted(self._run_id, snapshot, self._next_seq())) + case OpenRun(name, exp, descriptor, wp): + self._publish(RunStarted(self._run_id, name, exp, descriptor, wp, time.time())) + case CloseRun(): + self._publish(RunStopped(...)) +``` + +State is keyed by `register_name`, not parameter object identity, to match +how the dataset identifies parameters (see §10). + +### 5.3 Engine API + +```python +class MeasurementEngine: + def __init__(self, *, name: str = "default", + sinks: Sequence[DataSink] = (DEFAULT_SQLITE_SINK,), + read_pool_size: int | None = None) -> None: ... + + def submit(self, plan: Plan, *, name: str = "", + sinks: Sequence[DataSink] = ()) -> RunHandle: ... + + def cancel(self, h: RunHandle) -> None: ... + def pause(self, h: RunHandle) -> None: ... + def resume(self, h: RunHandle) -> None: ... + def shutdown(self, *, wait: bool = True) -> None: ... + +class RunHandle: + uuid: UUID + dataset: Future[DataSetProtocol] # resolves once SQLite sink opens + future: Future[RunResult] # resolves on RunStopped + @property + def status(self) -> RunStatus: ... + def wait(self, timeout: float | None = None) -> RunResult: ... + def cancel(self) -> None: ... +``` + +### 5.4 Concurrent submissions + +Concurrent submits to a single engine are queued FIFO. The engine processes +one plan at a time. Cancelling a queued-but-not-started run removes it from +the queue and emits `RunStopped(reason="cancelled_before_start")` to sinks +without iterating the plan. + +For parallel execution across rigs, use multiple engine instances. + +## 6. Sinks & Events + +### 6.1 Event vocabulary + +```python +@dataclass(frozen=True) +class RunStarted: + run_id: UUID + name: str + exp: Experiment | None + descriptor: Descriptor | None + write_period: float | None + started_at: float + +@dataclass(frozen=True) +class RowEmitted: + run_id: UUID + stream: str + snapshot: Mapping[ParameterBase, Any] + seq: int + +@dataclass(frozen=True) +class RunStopped: + run_id: UUID + reason: Literal["completed", "cancelled", "interrupted", + "error", "engine_shutdown", "cancelled_before_start"] + error: BaseException | None + stopped_at: float + cancel_latency: float | None + n_rows_emitted: int +``` + +### 6.2 Sink protocol + +```python +class DataSink(Protocol): + def __call__(self, event: Event) -> None: ... +``` + +A sink is just a callable. Trivial to implement; symmetric across writer, +plotter, network publisher, test fakes. + +### 6.3 Publisher thread and sink criticality + +One publisher thread per engine. Engine emits events into a bounded queue; +publisher drains and fans out to sinks in **criticality order**: critical +sinks first (so durable storage runs before non-critical observers), then +non-critical sinks. + +``` +[engine thread] --put--> [Queue, bounded] --get--> [publisher thread] + +--> [critical] sqlite_sink(event) + +--> [non-crit] plotter_sink(event) + +--> [non-crit] user_sink(event) +``` + +**Sink criticality.** Each sink declares `critical: bool` (default `False`): + +- **Critical sinks** (e.g., the default `SqliteSink`): an exception aborts + the run. On `RunStarted` failure, the engine throws `SinkOpenFailed` into + the plan generator (which runs its `finally`) and emits + `RunStopped(reason="error")`. On mid-run or `RunStopped` failure, the + exception is logged, the dataset is marked errored, and propagates to + `RunResult.error`. +- **Non-critical sinks** (plotters, network publishers): exceptions are + caught and logged. The sink is not unregistered. Other sinks continue. + +Rationale: silent data loss on a "completed" run is unacceptable for the +default storage path. Plotters losing a frame is fine. + +**Ordering guarantees:** +- Events delivered to all sinks in emission order. +- Critical sinks run before non-critical sinks for each event. +- Engine thread never blocks on disk I/O except via backpressure (§6.4). +- `handle.dataset` future resolves immediately after the critical SqliteSink + finishes processing `RunStarted` for that run. +- `handle.future` (a `Future[RunResult]`) resolves only after the publisher + has processed `RunStopped` through **all** sinks — i.e., after the dataset + is closed on disk. This means `handle.wait()` returning is a guarantee that + data is durably committed. + +### 6.4 Backpressure + +Bounded queue between engine and publisher. + +- Default queue size: tuned for ~1s of typical event throughput. +- Queue holds event objects (cheap); large array payloads inside `RowEmitted` + are referenced, not copied. Memory cost is bounded by in-flight Emit count. +- On overflow: **engine thread blocks**. Acquisition pauses until writer + catches up. Logged as warning. Correct default for DAQ — better to slow the + sweep than to drop data. +- Future: opt-in lossy sinks for pure live-viz that can drop intermediate + events but never `RunStarted`/`RunStopped`/critical `RowEmitted`. +- Invariant: `RunStopped` is never dropped, even on cancel or shutdown. + Publisher fully drains before reporting run finished. + +### 6.5 Default SQLite sink + +The default sink wraps existing `DataSaver`. The dataclass-to-`add_result` +conversion is trivial, but the **schema-registration lifecycle is where the +real integration work lives**: + +```python +class SqliteSink: + critical = True + + def __call__(self, event: Event) -> None: + match event: + case RunStarted(run_id, name, exp, descriptor, wp, _): + meas = Measurement(name=name, exp=exp) + # Register all setpoints from descriptor + for p in descriptor.setpoints: + meas.register_parameter(p) + # Register measured params with their setpoint dependencies + for p in descriptor.measured: + meas.register_parameter(p, setpoints=descriptor.setpoints, + shapes=descriptor.shapes) + if wp is not None: + meas.write_period = wp + saver = meas.run().__enter__() # snapshot taken here + self._savers[run_id] = saver + self._resolve_dataset_future(run_id, saver.dataset) + case RowEmitted(run_id, snapshot, _): + self._savers[run_id].add_result(*snapshot.items()) + case RunStopped(run_id, reason, error, *_): + exc_type = type(error) if error else None + self._savers.pop(run_id).__exit__(exc_type, error, None) +``` + +Key points: + +- The descriptor is required (no lazy mode), so `RunStarted` is the only + moment the sink registers parameters. No "register at first Emit" complexity. +- `meas.run().__enter__()` is where the station snapshot is taken — exactly + matches today's behavior. The plan's setup actions (warm-up, ramping) run + *after* this snapshot, also matching today's behavior with `enter_actions`. +- `handle.dataset` is resolved in this method, before any `RowEmitted` can + arrive on the queue (publisher is single-threaded). +- For `ParameterWithSetpoints` / `MultiParameter`, all existing array + fan-out and shape-validation logic in `DataSaver.add_result` is reused + unchanged. The sink itself stays small. + +The non-trivial work is not in the sink — it's in ensuring the descriptor +passed in `RunStarted` is correct. That's the responsibility of the +plan-builder (via `Describe`) or the user (via explicit `run()` args). + +## 7. Run Lifecycle / `run()` Decorator + +Plans don't open their own runs. The `run(...)` decorator wraps a plan-builder +to inject `OpenRun`/`CloseRun` around it. The descriptor is mandatory — it +comes from either explicit args or a `Describe` first message: + +```python +def run(*, name: str = "", exp: Experiment | None = None, + setpoints: tuple[ParameterBase, ...] | None = None, + measured: tuple[ParameterBase, ...] | None = None, + shapes: Shapes | None = None, + write_period: float | None = None) -> Callable[[Plan], Plan]: + def wrap(inner: Plan) -> Plan: + try: + first = next(inner) + except StopIteration: + # Empty plan: no run, no events. Returns immediately. + # RunResult will report reason="completed", n_rows_emitted=0, + # dataset future resolved to None. + return + + described = first if isinstance(first, Describe) else None + first_passthrough = None if described is not None else first + + explicit = (setpoints is not None or measured is not None) + if explicit and described is not None: + raise PlanError("Both run() args and Describe given; pick one.") + if not explicit and described is None: + raise PlanError( + "Plan has no schema. Provide either run(setpoints=..., " + "measured=...) args or yield Describe(...) as the first " + "message of the plan. Lazy schema discovery is not supported." + ) + + if explicit: + descr = Descriptor(setpoints=setpoints or (), + measured=measured or (), + shapes=shapes) + else: + descr = Descriptor(setpoints=described.setpoints, + measured=described.measured, + shapes=described.shapes) + + yield OpenRun(name=name, exp=exp, descriptor=descr, write_period=write_period) + try: + if first_passthrough is not None: + yield first_passthrough + yield from inner + finally: + yield CloseRun() + return wrap +``` + +Properties: + +- Plans without an explicit schema (neither `run()` args nor `Describe`) + fail fast at decoration time with a clear error. +- Empty plans (zero yielded messages) are valid and produce no run. +- Plans composable via `yield from` — an outer plan can wrap sub-plans + without re-opening runs, since only the outermost `run(...)` decorator + yields `OpenRun`/`CloseRun`. + +## 8. Schema Declaration + +Every run has a `Descriptor` declared **before** execution begins. It carries: + +- `setpoints: tuple[ParameterBase, ...]` — parameters the plan sweeps via `Set`. +- `measured: tuple[ParameterBase, ...]` — parameters the plan reads via `Read`. +- `shapes: Shapes | None` — optional per-measured-param shape hints. + +Two modes only: + +1. **Explicit `run(...)` args** — for callers (e.g., the convenience layer) + that know everything about the run. + + ```python + plan = run( + name="g1_vs_g2", + setpoints=(g1, g2), + measured=(current,), + shapes=(11, 11), + )(scan_inner_outer(LinSweep(g1, 0, 1, 11), LinSweep(g2, 0, 1, 11), [current])) + ``` + +2. **`Describe` as the first yielded message** — for plan-builders whose + shape is known to the builder but not to the caller. The `run(...)` + decorator consumes the `Describe` to construct the descriptor. + + ```python + def scan_inner_outer(outer, inner, measured) -> Plan: + yield Describe( + setpoints=(outer.param, inner.param), + measured=tuple(measured), + shapes=(outer.num_points, inner.num_points), + ) + for v_outer in outer.get_setpoints(): + ... + ``` + +The two modes are mutually exclusive — passing both raises `PlanError`. +Providing neither also raises `PlanError`. Schema is never inferred from +observed `Set`/`Read` calls (this was considered and dropped — see +revision history). + +**Validation at runtime:** + +- A `Set(p, v)` whose `p.register_name` isn't in `descriptor.setpoints` + is allowed (intermediate setpoints, warm-up, etc. — these execute but + aren't written to the dataset). +- A `Read((p1, p2, …))` of a param not in `descriptor.measured` raises. +- An `Emit(overrides={p: v})` whose `p` isn't in + `descriptor.setpoints | descriptor.measured` raises. + +These rules give the SQLite sink a fixed, declared schema before the first +row is written, removing the lifecycle-coordination problem that lazy mode +introduced. + +## 9. Cancellation & Cleanup Contract + +### 9.1 Single mechanism + +All cancellation sources funnel into one path: engine throws `CancelRequested` +into the plan generator at the next yield point (re-checked between `.send()` +and dispatch — see §5.2). The plan's `try/finally` runs. Cleanup messages are +dispatched. `RunStopped(reason=…)` is delivered to sinks. Different sources +differ only in the `reason` string. + +| Source | Trigger | +|---|---| +| User explicit | `handle.cancel()` | +| KeyboardInterrupt | Ctrl-C while in `handle.wait()` | +| Engine shutdown | `engine.shutdown()` cancels all live runs | +| Plan-internal | Plan raises `BreakConditionInterrupt` | +| Process termination | `atexit` calls `shutdown(wait=True)` with a deadline | + +### 9.1a Shutdown deadline (escape valve) + +`engine.shutdown(wait=True, timeout=30.0)` carries a deadline (default 30 s). +On expiry, the engine logs loudly and returns; in-flight cleanups continue +on the engine thread until they finish or the process exits. `atexit` +invokes `shutdown(wait=True, timeout=30.0)` — the deadline prevents +interpreter shutdown from hanging on a stuck driver. + +This is the only way the "cleanup always runs" rule has an escape: not by +skipping cleanup, but by detaching the caller after a bounded wait. + +### 9.2 Latency + +Cancel takes effect at the next yield point. Bounded by: + +| Currently dispatching | Cancel latency | +|---|---| +| `Set` (no ramp) | ≈ instrument I/O time | +| `Set` with `step`/`inter_delay` ramp | full ramp duration | +| `Read` | slowest `get` | +| `Sleep(s)` | ~100 ms (engine chunks sleep with cancel-flag checks) | +| `Call(fn)` | `fn`'s runtime | +| Cleanup messages | same rules; second cancel ignored | + +There is **no `immediate=True` mode**. Cleanup always runs. If cleanup hangs, +the only escape is killing the process — same as today. + +### 9.3 Plan-author rules + +1. **Cleanup goes in `try/finally`.** The `finally` block can yield messages; + they're dispatched normally. +2. **If you catch `CancelRequested`, re-raise.** Swallowing makes the plan + un-cancellable. +3. **Cleanup must not hang or raise.** Slow cleanup is the user's call but + counts against cancel latency. Raising in cleanup masks the original + exception. + +### 9.4 Canonical safe-bias example + +```python +def biased_sweep(bias, current, targets, *, max_step=0.1, settle=10e-3): + def safe_ramp(target): + current_val = bias.cache.get() if bias.cache.valid else 0.0 + n = max(1, int(abs(target - current_val) / max_step) + 1) + for step in np.linspace(current_val, target, n): + yield Set(bias, step) + yield Sleep(0.02) + + yield from safe_ramp(targets[0]) + try: + for v in targets: + yield Set(bias, v) + yield Sleep(settle) + yield Read((current,)) + yield Emit() + finally: + yield from safe_ramp(0.0) +``` + +Cancel mid-sweep: `CancelRequested` is thrown at the current yield, the inner +`finally` runs `safe_ramp(0.0)`, run is reported stopped only after the ramp +completes. + +### 9.5 Composition + +`yield from` propagates exceptions naturally. Nested plans' `finally` blocks +run in reverse-nested order on cancel. No engine support needed beyond +Python's generator protocol. + +### 9.6 Ctrl-C handling + +```python +def wait(self, timeout=None) -> RunResult: + try: + return self._future.result(timeout=timeout) + except KeyboardInterrupt: + if self._first_interrupt: + self._first_interrupt = False + self.engine.cancel(self) + return self._future.result() + raise # second Ctrl-C escapes +``` + +Matches today's `catch_interrupts`: first Ctrl-C cancels gracefully; second +escapes (cleanup may still run on engine thread). + +### 9.7 RunResult + +```python +@dataclass(frozen=True) +class RunResult: + run_id: UUID + reason: Literal[...] + error: BaseException | None + started_at: float + stopped_at: float + cancel_latency: float | None # surfaces slow cleanups + n_rows_emitted: int +``` + +## 10. Parameter Contract + +The engine demands almost nothing from `ParameterBase`: + +| Engine action | Method called | +|---|---| +| `Set(p, v)` | `p.set(v)` | +| `Read((p,))` | `p.get()` | +| State cache key | `p.register_name` (canonical identity — see below) | + +Everything else (`validators`, `step`, `inter_delay`, `scale`, `cache`, +`snapshot`, `setpoints`, etc.) is internal to `set`/`get` or used only by the +sink, and is reused unchanged. + +### Canonical parameter identity + +The engine identifies parameters by `register_name`, not by object identity. +Rationale: `DataSaver.add_result` validates by name and rejects unknown +names, so two distinct `Parameter` objects sharing a name would pass engine +identity checks but fail at the sink. Failing fast at submission is better. + +At `engine.submit(plan)`: +- The descriptor's `setpoints` and `measured` are checked for duplicate + `register_name` — duplicates raise `PlanError`. +- The state cache uses `register_name` keys for both reads and emits. +- Two `Parameter` objects in the same descriptor with the same name → error. + +Sink-side reuse (default `SqliteSink` via `DataSaver`): + +- `p.full_name`, `p.label`, `p.unit` for registration +- `p.shapes`, `p.setpoints` for `ParameterWithSetpoints` +- `p.snapshot()` for run snapshot (taken on `RunStarted` / `meas.run().__enter__()`) +- All existing array fan-out logic + +### Thread-safety contract — preserves today's behavior + +The engine reuses today's `underlying_instrument` grouping for parallel reads: + +- `Read((p1, p2, …))` partitions params by `p.underlying_instrument`. Params + sharing an underlying instrument are read sequentially; params on + *different* underlying instruments are read in parallel via the engine's + thread pool. +- This matches the existing behavior of `ThreadPoolParamsCaller` exactly. + Drivers safe under today's `use_threads=True` are safe under the new + engine; drivers unsafe under it remain unsafe. +- Cross-instrument coupling on shared resources (e.g., a shared VISA bus + between two `Instrument`s) is still the driver author's problem. Workaround: + configure the engine's `read_pool_size=1` or set `underlying_instrument` to + return the shared resource. + +### `get_after_set`: plan-builder responsibility + +`AbstractSweep.get_after_set` is honored by plan-builders, not by the engine: + +```python +# Inside scan_1d / scan_inner_outer: +yield Set(sweep.param, v) +yield Sleep(sweep.delay) +if sweep.get_after_set: + yield Read((sweep.param,)) # overwrites state[register_name] +yield Read(tuple(measured)) +yield Emit() +``` + +The engine has no special case. The plan-builder uses the existing +`AbstractSweep` API and emits the appropriate `Read` message. Appendix A.1 +and the tracer's `scan_1d` both honor this contract. + +### Cancellation during set/get + +Not interruptible in v1. Cancel waits for the current `set`/`get` to return. +Driver authors who care could opt in to a cancel token in a future revision; +out of scope now. + +### Parameter type coverage + +All standard parameter types work without modification: + +- `Parameter`, `ManualParameter`, `DelegateParameter` +- `ParameterWithSetpoints`, `MultiParameter`, `ArrayParameter` +- `GroupParameter`, `Function` +- Custom `ParameterBase` subclasses with standard `get_raw` / `set_raw` + +Callable measurements (today's `param_meas` accepting bare functions): the +convenience layer wraps them in a one-off `Parameter` so the engine sees only +`ParameterBase`. Plan vocabulary stays typed. + +### Live data access from main thread + +`handle.dataset.cache.data()` is **eventually consistent** in v1. Multiple +threads read/write the dataset and its cache; there's no read lock. For +deterministic live data, attach a sink instead: + +```python +class MyLiveSink: + critical = False + def __call__(self, event): + if isinstance(event, RowEmitted): + my_thread_safe_buffer.append(event.snapshot) +``` + +A thread-safe `LiveSnapshot` API may be added in a future revision. + +## 11. User-Facing API + +Three tiers. + +### Tier 1: convenience surface (95% of users) + +```python +import qcodes as qc + +# N-d sweep, replaces do0d/do1d/do2d/dond +ds = qc.scan( + LinSweep(g1, 0, 1, 11, delay=0.01), + LinSweep(g2, 0, 1, 11, delay=0.01), + measure=[current], + name="g1g2", +) + +# Single shot +ds = qc.measure(temperature, current, name="snapshot") + +# Adaptive +ds = qc.scan_adaptive( + knob=g1, signal=current, + bounds=(-2.0, 0.0), + loss_goal=0.01, max_points=200, +) + +# Non-blocking — return a handle +handle = qc.scan(..., wait=False) +handle.dataset.cache.data() # peek partial data +qc.live_plot(handle) # attach live plotter +handle.cancel() +handle.wait() # confirm cleanup completed +``` + +Some kwargs map directly to today's `dond` and ship in v1: + +- `name`, `exp`, `write_period` +- `show_progress`, `use_threads` +- `break_condition` +- `wait` (default `True`), `engine` (default `qc.default_engine()`) + +Other `dond` features need explicit design and are **not** v1 of the +convenience layer. Users who need them keep using today's `dond`: + +- `enter_actions` / `exit_actions` — designable, but their interaction with + the `run()` decorator's `try/finally` needs thought. +- `before_inner_actions` / `after_inner_actions` — tied to loop structure; + natural fit as plan-builder args once `scan_inner_outer` is finalized. +- `flush_columns` — engine policy or a new `Checkpoint` message; deferred. +- `additional_setpoints` — needs a dedicated design (registered but not + swept; today a `Read` at start would express this). +- Grouped measurements / multi-dataset outputs — deferred with multi-stream. +- `live_plot=True` — needs a `LiveMatplotlibSink`; deferred to live-viz + workstream. + +`dond` / `do0d` / `do1d` / `do2d` remain in `qcodes.dataset.dond` unchanged +during v1. They are not yet shims over the engine; a future migration step +may convert them. + +### Tier 2: engine surface + +```python +eng = qc.default_engine() +# or +eng = qc.MeasurementEngine(name="rig_b", sinks=[my_custom_sink]) + +handle = eng.submit(my_plan, name="run1") +handle.subscribe(my_callback) # add sink for this run +handle.pause() +handle.resume() +handle.cancel() +status = handle.status +result = handle.future.result() +``` + +### Tier 3: plan-builder authoring (library authors) + +```python +def find_pinchoff(gate, drain_i, *, threshold=1e-9, ...) -> Plan: + """Reusable plan-builder. No OpenRun — caller wraps with run(...).""" + v = 0.0 + while v >= -2.0: + yield Set(gate, v) + yield Sleep(20e-3) + r = yield Read((drain_i,)) + yield Emit() + if r[drain_i] < threshold: + return v + v -= 0.005 + +# Used via convenience: +ds = qc.run_plan(find_pinchoff(g1, current), name="pinchoff") +``` + +### Default engine + +`qc.default_engine()` lazy-instantiates a process-wide singleton. No +interaction with `Station` — the engine doesn't own instruments, it executes +plans against parameters. Multi-rig users instantiate explicit engines. + +## 12. Testing Strategy + +Three test levels. + +### L1: message-stream tests (no engine) + +Drive a plan generator manually; assert the message sequence. + +```python +def drive_plan(plan, *, on_read=None, on_call=None) -> list[Msg]: + out = [] + send_value = None + try: + msg = next(plan) + while True: + out.append(msg) + match msg: + case Read(params): send_value = on_read(params) if on_read else {} + case Call(fn): send_value = on_call(fn) if on_call else None + case _: send_value = None + msg = plan.send(send_value) + except StopIteration: + pass + return out + +def test_bisect_converges(): + gate = Parameter("gate") + drain_i = Parameter("drain_i") + msgs = drive_plan( + bisect_transition(gate, drain_i, lo=-2, hi=0, threshold=1e-9, tol=0.01), + on_read=lambda _: {drain_i: 1e-6 if some_condition else 1e-12}, + ) + last_set = [m for m in msgs if isinstance(m, Set)][-1] + assert abs(last_set.value - (-1.0)) < 0.02 +``` + +Exercises plan logic in isolation. No engine, no sinks, no instruments. + +### L2: engine-driven tests (with `MemorySink`) + +```python +class MemorySink: + def __init__(self): self.events = [] + def __call__(self, event): self.events.append(event) + +def test_scan_writes_correct_rows(): + g = Parameter("g", initial_value=0.0, set_cmd=None) + i = Parameter("i", get_cmd=lambda: g.cache.get() ** 2) + sink = MemorySink() + eng = MeasurementEngine(sinks=[sink]) + plan = run(name="t")(scan_inner_outer(LinSweep(g, 0, 1, 5), [i])) + eng.submit(plan).wait() + eng.shutdown() + + rows = [e for e in sink.events if isinstance(e, RowEmitted)] + assert len(rows) == 5 + assert rows[-1].snapshot[i] == pytest.approx(1.0) +``` + +Tests engine + plan + parameter integration without a database. + +### L3: end-to-end (with `SqliteSink` and instrument simulators) + +Existing pattern; unchanged. + +### Time + +Plan-builders accept their delays as parameters; tests pass ~0. For tests +that need to assert on timestamps or `cancel_latency`, an opt-in `FakeClock` +abstraction can be passed to the engine. + +### Layout + +``` +tests/ + engine/ # submit, queue, cancel, shutdown, schema discovery + plans/ # message vocabulary, run() decorator, composition + sinks/ # memory, sqlite, tee + builders/ # individual plan-builders, level 1 + integration/ # end-to-end + cancel safety +``` + +Today's `tests/dataset/test_dond_*.py` keep working unchanged because `dond` +becomes a shim. + +## 13. Decisions Log + +Decisions superseded by rev2 are struck through. New rev2 decisions are +marked `★`. + +| # | Decision | +|---|---| +| 1 | Seven-message vocabulary plus `Describe` (no `Emit(stream=...)` in v1 — ★ rev2 removed) | +| 2 | Single-threaded engine per instance | +| 3 | Plans are generators; `.send()` for adaptivity | +| 4 | DataSink = callable; one publisher thread per engine | +| 5 | Default SQLite sink wraps existing `DataSaver` (schema lifecycle is the real work — ★ rev2) | +| 6 | Bounded queue, block on overflow, never drop `RowEmitted`/`Run*` | +| 7 | `OpenRun`/`CloseRun` injected by `run(...)` decorator, not in builders | +| 8 | New parallel API; defer migration concerns | +| 9 | Driver thread-safety opt-in (no required changes) | +| 10 | Designed on own merits; no specific framework as model | +| 11 | ~~Schema discovery: explicit args > `Describe` > lazy~~ → **★ rev2: lazy mode dropped. Schema is mandatory via `run()` args or `Describe`** | +| 12 | Public API: `qc.scan` / `qc.measure` / `qc.run_plan` + engine for advanced | +| 13 | Default engine = lazy global singleton; multi-engine via explicit instantiation | +| 14 | Concurrent submits to one engine = FIFO queue | +| 15 | Engine surface on params: `set` + `get` only | +| 16 | Sink reuses existing `DataSaver` introspection | +| 17 | Per-param serialization guaranteed; cross-param parallelism follows today's `underlying_instrument` grouping (★ rev2 clarified) | +| 18 | Cancel during `set`/`get` not interruptible in v1 | +| 19 | Callables-as-measurements wrapped at convenience layer | +| 20 | ~~Snapshot on first `Emit` in lazy mode~~ → **★ rev2: snapshot always taken on `OpenRun` (`meas.run().__enter__()`), unconditionally** | +| 21 | Single cancel mechanism: `it.throw(CancelRequested)` | +| 22 | Cleanup runs in plan `try/finally`; engine dispatches yielded messages | +| 23 | Cancel-during-cleanup ignored (in_cleanup flag) | +| 24 | No `immediate=True` mode | +| 25 | First Ctrl-C = cancel; second = re-raise | +| 26 | Cancel latency unbounded (driver-dependent); not a v1 target | +| 27 | Engine `shutdown()` cancels all live runs; `atexit` invokes it | +| 28 | Queued-but-not-started runs cancel without iterating plan | +| 29 | `BreakConditionInterrupt` reuses cancel mechanism | +| 30 | `RunResult.cancel_latency` exposed | +| 31 | Three test levels: message-stream, engine-driven, instrument | +| 32 | `drive_plan(...)` test helper | +| 33 | `MemorySink` as standard test sink | +| 34 | Plan-builders parametrize delays for fast tests; `FakeClock` opt-in | +| 35 | Plan vocabulary module is engine-independent | +| 36 | New test directories alongside existing dond tests | +| ★37 | Cancel flag re-checked after `it.send()` and before `_dispatch` | +| ★38 | Sink criticality: critical sinks (SQLite default) abort run on failure; non-critical sinks (plotters) only log | +| ★39 | Critical sinks run before non-critical sinks per event | +| ★40 | `handle.dataset` resolves after SQLite sink processes `RunStarted`; `handle.future` resolves only after **all** sinks process `RunStopped` (data is durably committed when `wait()` returns) | +| ★41 | Engine identifies parameters by `register_name`, not object identity; duplicate names rejected at submit | +| ★42 | `get_after_set` is a plan-builder concern (yields explicit `Read` after `Set`); engine has no special case | +| ★43 | `engine.shutdown(wait=True, timeout=30.0)` has a deadline; `atexit` uses it | +| ★44 | Empty plans (zero yielded messages) produce no run, no events; `RunResult(reason="completed", n_rows_emitted=0)` | +| ★45 | `dataset.cache.data()` is documented as eventually-consistent; deterministic live data goes through a sink | +| ★46 | Convenience layer v1 ships a reduced kwarg set (see §11); `dond` is NOT yet a shim | + +## 14. Open Questions / Future Work + +- **Streaming acquisition ergonomics.** F2/F3 cases (chunked Alazar-style) + work via `Call` + `Emit(overrides=...)` but the boilerplate is real. If + driver authors hit this often, introduce a `Stream` message or sugar. +- **Multi-stream within one run.** Deliberately removed from v1 vocabulary + (no `Emit(stream=...)`); needs per-stream schema design plus SqliteSink + mapping. Re-introduce when a concrete use case justifies it. +- **Lazy schema discovery.** Considered and dropped (see rev2). May + reconsider if usage patterns show consistent friction with mandatory + `Describe` for ad-hoc plans. +- **Derived/metadata writes from plans.** `Annotate(...)` message vs + per-stream `Emit`. Defer. +- **Cancel-aware long sets.** Optional cancel token in `ParameterBase.set` + for drivers that opt in. +- **FakeClock implementation details.** Engine indirection through a `Clock` + abstraction. +- **Queue inspection / priority.** Beyond simple FIFO + cancel, no design. +- **Crash recovery / replay.** Out of scope for v1. +- **Out-of-process measurement service.** Could layer on top of this + architecture (sink protocol is naturally serializable) but explicitly out + of scope. +- **Static lint** for plan-builders that swallow `CancelRequested`. +- **Thread-safe live `dataset.cache` view.** v1 documents + `cache.data()` as eventually-consistent; a proper `LiveSnapshot` wrapper + with appropriate locking can be added later. +- **`enter_actions` / `exit_actions` / inner-loop actions.** Need design + for how user-provided callables interact with the `run()` decorator's + `try/finally` semantics. + +## 15. Out of Scope + +Documented to prevent scope creep: + +- async/await user-facing API +- Driver migration to async or to a new base class +- Multi-process execution +- Remote / network measurement service +- Crash recovery, replay, or partial-run resume +- Adaptive-only specialized engine +- Replacing `Measurement` / `dond` / `DataSaver` (they stay alongside; not + yet shims) +- A queue UI or scheduler beyond FIFO + cancel +- Modeling after any specific external framework +- Lazy schema discovery (considered, dropped in rev2) +- Multi-stream `Emit(stream=...)` (deferred from v1 vocabulary) +- Cancel-during-`set`/`get` interruption (driver-level cancel tokens) +- 2D-mechanical-extension claim: `scan_inner_outer` for v1 is a fresh + design, not a mechanical lift from `do_nd` (real semantics around + `set_before_sweep`, inner actions, `flush_columns`, `additional_setpoints` + need explicit treatment in the plan-builder) + +## Appendix A: Plan-Builder Worked Examples + +### A.1 N-d scan (NOT a drop-in replacement for dond/do2d) + +This is a fresh design for v1, not a mechanical lift. Several `dond` +features (`set_before_sweep`, `flush_columns`, `before/after_inner_actions`, +`additional_setpoints`, `enter_actions`, `exit_actions`) need explicit +treatment and are not in this minimal builder. + +```python +def scan_inner_outer(outer, inner, measured) -> Plan: + yield Describe( + setpoints=(outer.param, inner.param), + measured=tuple(measured), + shapes=(outer.num_points, inner.num_points), + ) + for v_outer in outer.get_setpoints(): + yield Set(outer.param, v_outer) + yield Sleep(outer.delay) + if outer.get_after_set: + yield Read((outer.param,)) + for v_inner in inner.get_setpoints(): + yield Set(inner.param, v_inner) + yield Sleep(inner.delay) + if inner.get_after_set: + yield Read((inner.param,)) + yield Read(tuple(measured)) + yield Emit() +``` + +### A.2 Pinchoff (stop-at-condition) + +```python +def find_pinchoff(gate, drain_i, *, threshold, settle, step) -> Plan: + yield Describe(setpoints=(gate,), measured=(drain_i,)) + v = 0.0 + while True: + yield Set(gate, v) + yield Sleep(settle) + r = yield Read((drain_i,)) + yield Emit() + if r[drain_i] < threshold: + break + v += step +``` + +### A.3 Adaptive (with python-adaptive) + +```python +def adaptive_scan(knob, signal, bounds, *, loss_goal, settle): + yield Describe(setpoints=(knob,), measured=(signal,)) + learner = Learner1D(function=None, bounds=bounds) + while learner.loss() > loss_goal: + (x,), _ = learner.ask(1) + yield Set(knob, x) + yield Sleep(settle) + r = yield Read((signal,)) + yield Emit() + learner.tell(x, float(r[signal])) +``` + +### A.4 Tracking a 2D feature + +```python +def track_resonance(B, f, signal, B_setpoints, *, f_window, f_n, f0_initial, settle): + yield Describe(setpoints=(B, f), measured=(signal,), + shapes=(len(B_setpoints), f_n)) + f_center = f0_initial + for b in B_setpoints: + yield Set(B, b) + yield Sleep(settle) + f_grid = np.linspace(f_center - f_window/2, f_center + f_window/2, f_n) + sweep_results = [] + for f_val in f_grid: + yield Set(f, f_val) + yield Sleep(settle) + r = yield Read((signal,)) + yield Emit() + sweep_results.append(r[signal]) + f_center = f_grid[int(np.argmin(sweep_results))] +``` + +### A.5 Safe biased sweep with ramp-down + +```python +def biased_sweep(bias, current, targets, *, max_step=0.1, settle=10e-3): + yield Describe(setpoints=(bias,), measured=(current,)) + + def safe_ramp(target): + cv = bias.cache.get() if bias.cache.valid else 0.0 + n = max(1, int(abs(target - cv) / max_step) + 1) + for step in np.linspace(cv, target, n): + yield Set(bias, step) + yield Sleep(0.02) + + yield from safe_ramp(targets[0]) + try: + for v in targets: + yield Set(bias, v) + yield Sleep(settle) + yield Read((current,)) + yield Emit() + finally: + yield from safe_ramp(0.0) +``` + +## Appendix B: Glossary + +- **Plan** — generator of `Msg` objects describing a measurement. +- **Plan-builder** — function returning a plan. +- **Message** — one of `Set`, `Read`, `Sleep`, `Call`, `Emit`, `OpenRun`, + `CloseRun`, `Describe`. +- **Engine** — `MeasurementEngine`, owns a worker thread and dispatches plans. +- **Sink** — callable accepting `Event` objects. +- **Event** — one of `RunStarted`, `RowEmitted`, `RunStopped`. +- **Run** — one execution of a plan, identified by a UUID, optionally + associated with a `DataSet`. +- **Descriptor** — declared schema of a run (setpoints, measured, shapes). +- **Convenience layer** — `qc.scan`, `qc.measure`, `qc.run_plan`, etc. +- **Default engine** — process-wide lazy singleton accessed via + `qc.default_engine()`. diff --git a/src/qcodes/measure_v2/__init__.py b/src/qcodes/measure_v2/__init__.py new file mode 100644 index 000000000000..7c71c1a2e22d --- /dev/null +++ b/src/qcodes/measure_v2/__init__.py @@ -0,0 +1,68 @@ +"""Experimental parallel measurement API for QCoDeS. + +See ``files/async-measurement-architecture.md`` in the design session for the +full architecture proposal. This package is **experimental and unstable**; +the public surface may change between releases. + +Tracer-bullet scope (current): plan message vocabulary, ``run(...)`` +decorator, ``scan_1d`` plan-builder, and unit-test helpers. Engine, +sinks, and convenience layer are not yet implemented. +""" + +from qcodes.measure_v2.convenience import ( + default_engine, + reset_default_engine, + scan, +) +from qcodes.measure_v2.decorators import run +from qcodes.measure_v2.engine import MeasurementEngine, RunHandle, RunStatus +from qcodes.measure_v2.events import ( + Descriptor, + Event, + RowEmitted, + RunResult, + RunStarted, + RunStopped, +) +from qcodes.measure_v2.exceptions import CancelRequested, PlanError +from qcodes.measure_v2.messages import ( + CloseRun, + Emit, + Msg, + OpenRun, + Read, + Set, + Sleep, +) +from qcodes.measure_v2.plans import scan_1d +from qcodes.measure_v2.sinks import DataSink, MemorySink, SqliteSink, is_critical + +__all__ = [ + "CancelRequested", + "CloseRun", + "DataSink", + "Descriptor", + "Emit", + "Event", + "MeasurementEngine", + "MemorySink", + "Msg", + "OpenRun", + "PlanError", + "Read", + "RowEmitted", + "RunHandle", + "RunResult", + "RunStarted", + "RunStatus", + "RunStopped", + "Set", + "Sleep", + "SqliteSink", + "default_engine", + "is_critical", + "reset_default_engine", + "run", + "scan", + "scan_1d", +] diff --git a/src/qcodes/measure_v2/convenience.py b/src/qcodes/measure_v2/convenience.py new file mode 100644 index 000000000000..b944e60468a3 --- /dev/null +++ b/src/qcodes/measure_v2/convenience.py @@ -0,0 +1,121 @@ +"""User-facing convenience surface for ``measure_v2``. + +The 95% case: ``qc.measure_v2.scan(LinSweep(g, 0, 1, 11), measure=[i])`` +returns a :py:class:`~qcodes.dataset.data_set_protocol.DataSetProtocol`. + +In tracer scope, only single-sweep (1D) scans are supported. Multi-sweep +scans will be added once ``scan_inner_outer`` is implemented. +""" + +from __future__ import annotations + +import threading +from typing import TYPE_CHECKING + +from qcodes.measure_v2.decorators import run +from qcodes.measure_v2.engine import MeasurementEngine, RunHandle +from qcodes.measure_v2.plans import scan_1d +from qcodes.measure_v2.sinks import SqliteSink + +if TYPE_CHECKING: + from collections.abc import Sequence + + from qcodes.dataset.data_set_protocol import DataSetProtocol + from qcodes.dataset.dond.sweeps import AbstractSweep + from qcodes.dataset.experiment_container import Experiment + from qcodes.parameters import ParameterBase + + +_default_engine: MeasurementEngine | None = None +_default_engine_lock = threading.Lock() + + +def default_engine() -> MeasurementEngine: + """Return the process-wide default :py:class:`MeasurementEngine`. + + Lazily instantiated on first use with a default + :py:class:`SqliteSink` that writes to the database configured via + ``qc.config["core"]["db_location"]``. Subsequent calls return the + same instance. + + For tests, prefer constructing an explicit engine rather than relying + on the default — the default's sink references the current + ``db_location`` at construction time, which won't pick up later + config changes. + """ + global _default_engine + with _default_engine_lock: + if _default_engine is None: + _default_engine = MeasurementEngine(sinks=[SqliteSink()]) + return _default_engine + + +def reset_default_engine() -> None: + """Shut down and drop the cached default engine. + + Primarily for tests that need to recreate the engine after changing + ``db_location`` or sink configuration. + """ + global _default_engine + with _default_engine_lock: + if _default_engine is not None: + _default_engine.shutdown(wait=True, timeout=5.0) + _default_engine = None + + +def scan( + *sweeps: AbstractSweep, + measure: Sequence[ParameterBase], + wait: bool = True, + name: str = "", + exp: Experiment | None = None, + engine: MeasurementEngine | None = None, +) -> DataSetProtocol | RunHandle | None: + """Run a scan. + + Tracer scope: one sweep only. The sweep parameter is set across its + setpoints; ``measure`` parameters are read at each point; a row is + emitted per point. On exit (success, error, or cancel), the swept + parameter is set back to 0.0 (the ``scan_1d`` cleanup contract). + + Args: + *sweeps: Sweeps to perform. Currently exactly one sweep is required. + measure: Parameters to read at each setpoint. + wait: If ``True`` (default), block until the run completes and + return the resulting dataset. If ``False``, return the + :py:class:`RunHandle` immediately for non-blocking workflows. + name: Dataset name. + exp: Experiment to attach the dataset to. If ``None``, the sink + creates a default experiment. + engine: Engine to submit on. Defaults to :py:func:`default_engine`. + + Returns: + - If ``wait=True``: the resulting dataset (or ``None`` if no sink + provided one). + - If ``wait=False``: a :py:class:`RunHandle` for the running submission. + + """ + if len(sweeps) != 1: + raise NotImplementedError( + "measure_v2.scan currently supports exactly one sweep " + f"(got {len(sweeps)}). Multi-dimensional scans are planned for v1." + ) + + sweep = sweeps[0] + eng = engine if engine is not None else default_engine() + + setpoints = (sweep.param,) + measured = tuple(measure) + + plan = run( + name=name, + exp=exp, + setpoints=setpoints, + measured=measured, + )(scan_1d(sweep, measured)) + + handle = eng.submit(plan) + if not wait: + return handle + handle.wait() + return handle.dataset.result() diff --git a/src/qcodes/measure_v2/decorators.py b/src/qcodes/measure_v2/decorators.py new file mode 100644 index 000000000000..dfbe6fc9ea41 --- /dev/null +++ b/src/qcodes/measure_v2/decorators.py @@ -0,0 +1,133 @@ +"""The ``run(...)`` decorator. + +Wraps a plan-builder so that the resulting plan opens and closes a run +around the inner messages. Plan-builders themselves never yield +``OpenRun``/``CloseRun``; this is the only place those messages originate. + +Tracer scope: explicit-args mode only. ``Describe`` first-message support +is deferred to v1. The schema (setpoints, measured params, optional shapes) +must be provided as kwargs to ``run(...)`` — failing to provide them raises +:py:class:`~qcodes.measure_v2.exceptions.PlanError` at decoration time. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from qcodes.measure_v2.exceptions import PlanError +from qcodes.measure_v2.messages import CloseRun, Msg, OpenRun + +if TYPE_CHECKING: + from collections.abc import Callable, Generator + + from qcodes.dataset.descriptions.versioning.rundescribertypes import Shapes + from qcodes.dataset.experiment_container import Experiment + from qcodes.parameters import ParameterBase + + +Plan = "Generator[Msg, Any, None]" # alias for documentation + + +def run( + *, + name: str = "", + exp: Experiment | None = None, + setpoints: tuple[ParameterBase, ...] | None = None, + measured: tuple[ParameterBase, ...] | None = None, + shapes: Shapes | None = None, + write_period: float | None = None, +) -> Callable[[Generator[Msg, Any, None]], Generator[Msg, Any, None]]: + """Wrap a plan to open and close a run around it. + + Args: + name: Run name passed to the dataset. + exp: Optional :py:class:`~qcodes.dataset.experiment_container.Experiment` + the run belongs to. + setpoints: Tuple of parameters the plan sweeps via ``Set``. Required + in tracer scope. + measured: Tuple of parameters the plan reads via ``Read``. Required + in tracer scope. + shapes: Optional per-measured-param shape hints, in the form + ``{register_name: (n0, n1, ...)}``. + write_period: Optional override for the dataset write period. + + Returns: + A decorator that wraps a plan-generator and yields ``OpenRun`` at + the start and ``CloseRun`` at the end (success, error, or cancel). + + Raises: + PlanError: If neither ``setpoints`` nor ``measured`` are provided. + Lazy schema discovery is not supported in v1. + + """ + + def wrap( + inner: Generator[Msg, Any, None], + ) -> Generator[Msg, Any, None]: + return _decorated( + inner, + name=name, + exp=exp, + setpoints=setpoints, + measured=measured, + shapes=shapes, + write_period=write_period, + ) + + return wrap + + +def _decorated( + inner: Generator[Msg, Any, None], + *, + name: str, + exp: Experiment | None, + setpoints: tuple[ParameterBase, ...] | None, + measured: tuple[ParameterBase, ...] | None, + shapes: Shapes | None, + write_period: float | None, +) -> Generator[Msg, Any, None]: + if setpoints is None and measured is None: + raise PlanError( + "run(...) requires explicit setpoints=... and/or measured=... " + "kwargs in v1 (lazy schema discovery is not supported). " + "Pass tuples of ParameterBase instances." + ) + + setpoints = setpoints or () + measured = measured or () + + _check_no_duplicate_register_names(setpoints, measured) + + yield OpenRun( + name=name, + setpoint_params=setpoints, + measured_params=measured, + exp=exp, + shapes=shapes, + write_period=write_period, + ) + try: + # ``yield from`` is critical here: it transparently forwards + # ``.send()`` and ``.throw()`` to the inner generator. A manual + # ``inner.send(...) / yield msg`` loop would NOT propagate + # ``CancelRequested`` to the inner plan's ``finally`` blocks. + yield from inner + finally: + yield CloseRun() + + +def _check_no_duplicate_register_names( + setpoints: tuple[ParameterBase, ...], + measured: tuple[ParameterBase, ...], +) -> None: + seen: dict[str, ParameterBase] = {} + for p in (*setpoints, *measured): + name = p.register_name + if name in seen and seen[name] is not p: + raise PlanError( + f"Two distinct parameters share register_name {name!r}: " + f"{seen[name]} and {p}. Engine identity is canonicalized " + f"by register_name; duplicates are rejected at submission." + ) + seen[name] = p diff --git a/src/qcodes/measure_v2/engine.py b/src/qcodes/measure_v2/engine.py new file mode 100644 index 000000000000..99adf930afaf --- /dev/null +++ b/src/qcodes/measure_v2/engine.py @@ -0,0 +1,662 @@ +"""The measurement engine. + +A :py:class:`MeasurementEngine` owns a dedicated worker thread that +iterates plan generators and dispatches their messages, and a publisher +thread that fans out events to sinks. The user-facing API is small: + +- ``engine.submit(plan)`` returns a :py:class:`RunHandle`. +- ``handle.wait()`` blocks until the run completes. +- ``handle.cancel()`` requests graceful cancellation (plan's ``finally`` + runs). + +Tracer-bullet scope (current): + +- One run at a time (concurrent ``submit`` raises ``RuntimeError``). +- Sequential reads (no thread pool grouping by underlying instrument). +- Engine identifies parameters by ``ParameterBase`` object identity; + ``register_name`` uniqueness is checked at submit (via the descriptor + in ``OpenRun``). +- ``handle.dataset`` is a :py:class:`Future` but resolves to ``None`` — + populated by the SQLite sink in a later layer. +""" + +from __future__ import annotations + +import logging +import queue +import threading +import time +from concurrent.futures import Future +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Literal +from uuid import UUID, uuid4 + +from qcodes.measure_v2.events import ( + Descriptor, + RowEmitted, + RunResult, + RunStarted, + RunStopped, +) +from qcodes.measure_v2.exceptions import CancelRequested, PlanError +from qcodes.measure_v2.messages import ( + CloseRun, + Emit, + OpenRun, + Read, + Set, + Sleep, +) +from qcodes.measure_v2.sinks import is_critical +from qcodes.measure_v2.sinks.memory import MemorySink + +if TYPE_CHECKING: + from collections.abc import Generator, Sequence + + from qcodes.measure_v2.events import Event, RunStopReason + from qcodes.measure_v2.messages import Msg + from qcodes.measure_v2.sinks import DataSink + from qcodes.parameters import ParameterBase + + +_LOG = logging.getLogger(__name__) + +# Sentinels used to signal thread shutdown via the internal queues. +_SUBMIT_SHUTDOWN = object() +_PUBLISH_SHUTDOWN = object() + + +# ---------------------------------------------------------------------------- +# Public types +# ---------------------------------------------------------------------------- + + +RunStatus = Literal["queued", "running", "cancelling", "done", "error"] + + +class RunHandle: + """User-facing handle for a submitted run. + + The handle is returned immediately from :py:meth:`MeasurementEngine.submit` + and exposes futures for the dataset (resolved when a SQLite sink opens it) + and the run result (resolved when the run completes). + """ + + def __init__( + self, + run_id: UUID, + cancel_event: threading.Event, + future: Future[RunResult], + dataset_future: Future[Any], + ) -> None: + self.run_id = run_id + self._cancel_event = cancel_event + self._cancel_reason_box: list[str] = [] + self.future = future + self.dataset = dataset_future + + def cancel(self, reason: str = "user") -> None: + """Request graceful cancellation. + + Sets the cancel flag; the engine throws + :py:class:`CancelRequested` into the plan at the next yield point. + The plan's ``try/finally`` cleanup runs before the run is reported + stopped. + """ + if not self._cancel_reason_box: + self._cancel_reason_box.append(reason) + self._cancel_event.set() + + def wait(self, timeout: float | None = None) -> RunResult: + """Block until the run completes; return the :py:class:`RunResult`.""" + return self.future.result(timeout=timeout) + + @property + def status(self) -> RunStatus: + if self.future.done(): + return "error" if self.future.exception() is not None else "done" + if self._cancel_event.is_set(): + return "cancelling" + return "running" + + +# ---------------------------------------------------------------------------- +# Internal submission state +# ---------------------------------------------------------------------------- + + +@dataclass +class _Submission: + run_id: UUID + plan: Generator[Msg, Any, None] + cancel_event: threading.Event + cancel_reason_box: list[str] + future: Future[RunResult] + dataset_future: Future[Any] + descriptor: Descriptor | None = None + state: dict[ParameterBase, Any] = field(default_factory=dict) + n_rows: int = 0 + _next_seq: int = 0 + started_at: float = 0.0 + + def next_seq(self) -> int: + s = self._next_seq + self._next_seq += 1 + return s + + +# ---------------------------------------------------------------------------- +# The engine +# ---------------------------------------------------------------------------- + + +class MeasurementEngine: + """Drives plans on a dedicated worker thread, publishes events to sinks. + + Args: + name: Engine name (for logging). + sinks: Sequence of sinks the engine publishes events to. Defaults + to a single :py:class:`MemorySink` — useful for tests; real + usage typically passes a :py:class:`SqliteSink`. + queue_maxsize: Bounded capacity of the publish queue. When full, + the engine thread blocks on event publication (back-pressure). + + """ + + def __init__( + self, + *, + name: str = "default", + sinks: Sequence[DataSink] | None = None, + queue_maxsize: int = 1024, + ) -> None: + self.name = name + # Sinks: critical first (so SQLite-style sinks process events before + # observers); within each criticality bucket, preserve registration order. + provided = list(sinks) if sinks is not None else [MemorySink()] + self._sinks: list[DataSink] = sorted( + provided, key=lambda s: 0 if is_critical(s) else 1 + ) + + self._submit_queue: queue.Queue[_Submission | object] = queue.Queue() + self._publish_queue: queue.Queue[Event | object] = queue.Queue( + maxsize=queue_maxsize + ) + + # Submissions live here until the publisher has finalized them (i.e., + # delivered RunStopped to all sinks). _current_sub is only cleared by + # the publisher, so handle.wait() returning is the signal that data + # has been durably written to all sinks. + self._current_sub: _Submission | None = None + self._subs_by_id: dict[UUID, _Submission] = {} + self._current_lock = threading.Lock() + self._shutdown = threading.Event() + + self._engine_thread = threading.Thread( + target=self._engine_loop, + name=f"measure_v2-engine-{name}", + daemon=True, + ) + self._publisher_thread = threading.Thread( + target=self._publisher_loop, + name=f"measure_v2-publisher-{name}", + daemon=True, + ) + self._engine_thread.start() + self._publisher_thread.start() + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + + def submit( + self, + plan: Generator[Msg, Any, None], + *, + name: str = "", + ) -> RunHandle: + """Submit a plan for execution. + + Returns immediately with a :py:class:`RunHandle`. The plan is + executed on the engine's worker thread. In tracer scope, concurrent + submission raises ``RuntimeError`` (no queue yet). The ``name`` + kwarg is accepted for API symmetry with v1 but currently ignored. + + Raises: + RuntimeError: If a run is already in progress. + + """ + del name # accepted for API symmetry; v1 will use it + if self._shutdown.is_set(): + raise RuntimeError(f"Engine {self.name!r} is shut down") + + with self._current_lock: + if self._current_sub is not None: + raise RuntimeError( + f"Engine {self.name!r} is already running a plan. " + "Tracer-scope engines run one plan at a time; queueing is deferred to v1." + ) + + run_id = uuid4() + cancel_event = threading.Event() + future: Future[RunResult] = Future() + dataset_future: Future[Any] = Future() + sub = _Submission( + run_id=run_id, + plan=plan, + cancel_event=cancel_event, + cancel_reason_box=[], + future=future, + dataset_future=dataset_future, + ) + handle = RunHandle( + run_id=run_id, + cancel_event=cancel_event, + future=future, + dataset_future=dataset_future, + ) + sub.cancel_reason_box = handle._cancel_reason_box # share reason box + with self._current_lock: + self._current_sub = sub + self._subs_by_id[run_id] = sub + self._submit_queue.put(sub) + return handle + + def shutdown(self, *, wait: bool = True, timeout: float = 30.0) -> None: + """Stop the engine. + + Cancels any in-flight run and waits up to ``timeout`` seconds for + the engine and publisher threads to exit. After the deadline: + logs and returns; cleanup may continue running in the background. + """ + if self._shutdown.is_set(): + return + self._shutdown.set() + + # Cancel any in-flight run. + with self._current_lock: + sub = self._current_sub + if sub is not None: + if not sub.cancel_reason_box: + sub.cancel_reason_box.append("engine_shutdown") + sub.cancel_event.set() + + # Signal both threads to exit after their current work. + self._submit_queue.put(_SUBMIT_SHUTDOWN) + + if wait: + self._engine_thread.join(timeout=timeout) + if self._engine_thread.is_alive(): + _LOG.warning( + "Engine thread did not exit within %.1fs; abandoning.", timeout + ) + # Publisher thread is signaled by the engine thread after it exits. + remaining = max( + 0.1, timeout - (timeout if self._engine_thread.is_alive() else 0) + ) + self._publisher_thread.join(timeout=remaining) + if self._publisher_thread.is_alive(): + _LOG.warning( + "Publisher thread did not exit within %.1fs; abandoning.", + timeout, + ) + + # ------------------------------------------------------------------ + # Engine thread + # ------------------------------------------------------------------ + + def _engine_loop(self) -> None: + try: + while True: + item = self._submit_queue.get() + if item is _SUBMIT_SHUTDOWN: + break + sub = item # type: ignore[assignment] + assert isinstance(sub, _Submission) + try: + self._run_one_plan(sub) + except BaseException: + _LOG.exception("Unhandled error driving plan %s", sub.run_id) + finally: + self._publish_queue.put(_PUBLISH_SHUTDOWN) + + def _run_one_plan(self, sub: _Submission) -> None: + it = iter(sub.plan) + in_cleanup = False + send_value: Any = None + reason: RunStopReason = "completed" + error: BaseException | None = None + sub.started_at = time.time() + cancel_request_time: float | None = None + + # ``pending_msg`` lets us inject a message (e.g., the first message + # returned by a cleanup throw) into the next dispatch iteration + # without re-entering the get-next-message phase. + pending_msg: Msg | None = None + + try: + while True: + # Phase 1: obtain the next message. + if pending_msg is not None: + msg = pending_msg + pending_msg = None + else: + cancel_now = sub.cancel_event.is_set() and not in_cleanup + if cancel_now: + in_cleanup = True + reason = "cancelled" + cancel_request_time = time.time() + try: + msg = it.throw(CancelRequested(self._reason(sub))) + except (StopIteration, CancelRequested): + break + else: + try: + msg = it.send(send_value) + except StopIteration: + break + except CancelRequested: + if reason == "completed": + reason = "interrupted" + break + + # Re-check cancel between send and dispatch (critique fix). + if sub.cancel_event.is_set() and not in_cleanup: + in_cleanup = True + reason = "cancelled" + cancel_request_time = time.time() + try: + msg = it.throw(CancelRequested(self._reason(sub))) + except (StopIteration, CancelRequested): + break + + # Phase 2: dispatch the message. + try: + send_value = self._dispatch(msg, sub) + except BaseException as exc: + if not in_cleanup: + error = exc + reason = "error" + in_cleanup = True + # Inject the error into the plan so its finally runs. + try: + pending_msg = it.throw(type(exc), exc) + send_value = None + except (StopIteration, CancelRequested): + break + finally: + # Engine guarantees the generator is closed. + try: + it.close() + except BaseException: + _LOG.exception("Error closing plan %s", sub.run_id) + stopped_at = time.time() + cancel_latency = ( + stopped_at - cancel_request_time + if cancel_request_time is not None + else None + ) + self._publish_run_stopped( + sub, reason, error, sub.started_at, stopped_at, cancel_latency + ) + self._complete_submission(sub, reason, error, stopped_at, cancel_latency) + + @staticmethod + def _reason(sub: _Submission) -> str: + return sub.cancel_reason_box[0] if sub.cancel_reason_box else "cancel" + + # ------------------------------------------------------------------ + # Dispatch + # ------------------------------------------------------------------ + + def _dispatch(self, msg: Msg, sub: _Submission) -> Any: + # Note: cancellable_sleep is the only place we honor cancellation + # within a message dispatch. All other dispatches are atomic. + match msg: + case Set(param, value): + param.set(value) + sub.state[param] = value + return None + case Read(params): + results: dict[ParameterBase, Any] = {} + # Sequential reads — tracer scope. v1 will group by underlying_instrument. + for p in params: + results[p] = p.get() + sub.state.update(results) + return results + case Sleep(seconds): + self._cancellable_sleep(seconds, sub) + return None + case Emit(overrides): + self._handle_emit(sub, overrides) + return None + case OpenRun() as o: + self._handle_open_run(sub, o) + return None + case CloseRun(): + # Sink receives RunStopped via _publish_run_stopped at end of run; + # CloseRun on its own is a no-op for the engine. + return None + case _: + raise TypeError(f"Unknown plan message: {msg!r}") + + def _handle_open_run(self, sub: _Submission, msg: OpenRun) -> None: + # Build the descriptor and validate. + descriptor = Descriptor( + setpoints=msg.setpoint_params, + measured=msg.measured_params, + shapes=msg.shapes, + ) + self._check_no_duplicate_register_names(descriptor) + sub.descriptor = descriptor + self._publish( + RunStarted( + run_id=sub.run_id, + name=msg.name, + descriptor=descriptor, + exp=msg.exp, + write_period=msg.write_period, + started_at=sub.started_at, + ) + ) + + def _handle_emit( + self, sub: _Submission, overrides: dict[ParameterBase, Any] | Any + ) -> None: + descriptor = sub.descriptor + if descriptor is None: + raise PlanError("Emit yielded before OpenRun") + + declared: set[ParameterBase] = set(descriptor.setpoints) | set( + descriptor.measured + ) + + # Validate overrides reference only declared params. + for p in overrides: + if p not in declared: + raise PlanError( + f"Emit overrides parameter {p.register_name!r} " + "not declared in the run descriptor" + ) + + # Build the snapshot row. + snapshot: dict[ParameterBase, Any] = {} + for p in descriptor.setpoints: + if p in overrides: + snapshot[p] = overrides[p] + elif p in sub.state: + snapshot[p] = sub.state[p] + else: + raise PlanError( + f"Emit before setpoint {p.register_name!r} was set; " + "engine state has no value for it." + ) + for p in descriptor.measured: + if p in overrides: + snapshot[p] = overrides[p] + elif p in sub.state: + snapshot[p] = sub.state[p] + else: + raise PlanError( + f"Emit before measured parameter {p.register_name!r} " + "was read; engine state has no value for it." + ) + + seq = sub.next_seq() + sub.n_rows += 1 + self._publish(RowEmitted(run_id=sub.run_id, snapshot=snapshot, seq=seq)) + + def _cancellable_sleep(self, seconds: float, sub: _Submission) -> None: + if seconds <= 0: + return + deadline = time.monotonic() + seconds + # Chunked checks: ~100ms granularity, so cancel during a long sleep + # is bounded by that chunk. + while True: + remaining = deadline - time.monotonic() + if remaining <= 0: + return + if sub.cancel_event.is_set(): + return # Let the main loop pick up the cancel on next iteration. + time.sleep(min(remaining, 0.1)) + + @staticmethod + def _check_no_duplicate_register_names(descriptor: Descriptor) -> None: + seen: dict[str, ParameterBase] = {} + for p in (*descriptor.setpoints, *descriptor.measured): + name = p.register_name + if name in seen and seen[name] is not p: + raise PlanError( + f"Two distinct parameters share register_name {name!r}: " + f"{seen[name]} and {p}." + ) + seen[name] = p + + # ------------------------------------------------------------------ + # Publish & sink lifecycle + # ------------------------------------------------------------------ + + def _publish(self, event: Event) -> None: + # Blocks if the publish queue is full (back-pressure). Acceptable + # default for DAQ — better than dropping data. + self._publish_queue.put(event) + + def _publish_run_stopped( + self, + sub: _Submission, + reason: RunStopReason, + error: BaseException | None, + started_at: float, + stopped_at: float, + cancel_latency: float | None, + ) -> None: + # Always emit RunStopped, even if OpenRun was never published (e.g., + # the plan errored before yielding OpenRun). Sinks must handle this + # gracefully. + self._publish_queue.put( + RunStopped( + run_id=sub.run_id, + reason=reason, + error=error, + started_at=started_at, + stopped_at=stopped_at, + cancel_latency=cancel_latency, + n_rows_emitted=sub.n_rows, + ) + ) + + def _complete_submission( + self, + sub: _Submission, + reason: RunStopReason, + error: BaseException | None, + stopped_at: float, + cancel_latency: float | None, + ) -> None: + # No-op: the publisher thread now owns final-state transition so + # that handle.wait() returning is a guarantee that all sinks have + # processed RunStopped. See _finalize_after_run_stopped. + del sub, reason, error, stopped_at, cancel_latency + + def _finalize_after_run_stopped(self, event: RunStopped) -> None: + """Called by the publisher after RunStopped has been dispatched. + + Constructs the :py:class:`RunResult`, resolves the dataset future + (to ``None`` if no sink claimed it), completes the run future, + and releases the engine slot. + """ + with self._current_lock: + sub = self._subs_by_id.pop(event.run_id, None) + if ( + self._current_sub is not None + and self._current_sub.run_id == event.run_id + ): + self._current_sub = None + if sub is None: + return # already finalized (defensive) + if not sub.dataset_future.done(): + sub.dataset_future.set_result(None) + if not sub.future.done(): + sub.future.set_result( + RunResult( + run_id=event.run_id, + reason=event.reason, + error=event.error, + started_at=event.started_at, + stopped_at=event.stopped_at, + cancel_latency=event.cancel_latency, + n_rows_emitted=event.n_rows_emitted, + ) + ) + + def _publisher_loop(self) -> None: + while True: + item = self._publish_queue.get() + if item is _PUBLISH_SHUTDOWN: + break + event = item # type: ignore[assignment] + for sink in self._sinks: + try: + sink(event) # type: ignore[arg-type] + except BaseException: + if is_critical(sink): + _LOG.exception( + "Critical sink %r raised on %s; " + "run integrity may be compromised.", + sink, + type(event).__name__, + ) + # Tracer scope: log only. v1 will escalate to abort the run. + else: + _LOG.exception( + "Non-critical sink %r raised on %s; continuing.", + sink, + type(event).__name__, + ) + # After RunStarted is delivered, ask any dataset-providing sink + # (e.g., SqliteSink) for the dataset and resolve handle.dataset. + if isinstance(event, RunStarted): + self._resolve_dataset_for(event.run_id) + # Finalize after all sinks have seen RunStopped — this is what + # makes handle.wait() return only when data is durable. + if isinstance(event, RunStopped): + self._finalize_after_run_stopped(event) + + def _resolve_dataset_for(self, run_id: UUID) -> None: + """Resolve ``handle.dataset`` from the first sink that has one.""" + with self._current_lock: + sub = self._subs_by_id.get(run_id) + if sub is None or sub.dataset_future.done(): + return + for sink in self._sinks: + provider = getattr(sink, "dataset_for", None) + if provider is None: + continue + try: + ds = provider(run_id) + except BaseException: + _LOG.exception("Sink %r raised in dataset_for; ignoring.", sink) + continue + if ds is not None: + sub.dataset_future.set_result(ds) + return diff --git a/src/qcodes/measure_v2/events.py b/src/qcodes/measure_v2/events.py new file mode 100644 index 000000000000..4020cbf64f22 --- /dev/null +++ b/src/qcodes/measure_v2/events.py @@ -0,0 +1,115 @@ +"""Event vocabulary published by the engine to sinks. + +Sinks receive instances of these dataclasses via their ``__call__``. The +event stream for a single run is always: + + RunStarted -> RowEmitted* -> RunStopped + +with exactly one ``RunStarted`` and exactly one ``RunStopped`` per +``run_id``. The publisher thread guarantees ordering. + +See ``files/async-measurement-architecture.md`` §6 for the full sink and +event protocol. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Literal + +if TYPE_CHECKING: + from collections.abc import Mapping + from uuid import UUID + + from qcodes.dataset.descriptions.versioning.rundescribertypes import Shapes + from qcodes.dataset.experiment_container import Experiment + from qcodes.parameters import ParameterBase + + +RunStopReason = Literal[ + "completed", + "cancelled", + "interrupted", + "error", + "engine_shutdown", + "cancelled_before_start", +] + + +@dataclass(frozen=True) +class Descriptor: + """Schema declaration for a run. + + Built by the ``run(...)`` decorator from either explicit kwargs or a + ``Describe`` first message in the plan. Attached to ``RunStarted`` so + sinks can register the dataset before any rows arrive. + """ + + setpoints: tuple[ParameterBase, ...] + measured: tuple[ParameterBase, ...] + shapes: Shapes | None = None + + +@dataclass(frozen=True) +class RunStarted: + """A run has been opened. Sinks should set up dataset state here.""" + + run_id: UUID + name: str + descriptor: Descriptor + exp: Experiment | None + write_period: float | None + started_at: float + + +@dataclass(frozen=True) +class RowEmitted: + """A single row of measurement data, snapshotted from the engine state. + + ``snapshot`` is keyed by :py:class:`~qcodes.parameters.ParameterBase` + objects from the descriptor. Each value is whatever ``param.get()`` + returned for measured params, or the last ``Set`` value for setpoints. + Array-valued measurements are stored as ndarrays — the SQLite sink + fans them out into multiple dataset rows via existing + ``DataSaver.add_result`` logic. + """ + + run_id: UUID + snapshot: Mapping[ParameterBase, Any] + seq: int + + +@dataclass(frozen=True) +class RunStopped: + """The run is over. Sinks should finalize/close dataset state.""" + + run_id: UUID + reason: RunStopReason + error: BaseException | None + started_at: float + stopped_at: float + # Tracer scope: cancel_latency and n_rows_emitted are present in the + # event but may be left None / 0 by the v0 engine. + cancel_latency: float | None = None + n_rows_emitted: int = 0 + + +Event = RunStarted | RowEmitted | RunStopped +"""Union of all event types delivered to sinks.""" + + +@dataclass(frozen=True) +class RunResult: + """Final outcome of a run, returned to the user via ``RunHandle``. + + A condensed view of the run's lifecycle, suitable for assertions in + tests and for synchronous return values from ``qc.measure_v2.scan``. + """ + + run_id: UUID + reason: RunStopReason + error: BaseException | None + started_at: float + stopped_at: float + cancel_latency: float | None = None + n_rows_emitted: int = 0 diff --git a/src/qcodes/measure_v2/exceptions.py b/src/qcodes/measure_v2/exceptions.py new file mode 100644 index 000000000000..3b3dcb462b2c --- /dev/null +++ b/src/qcodes/measure_v2/exceptions.py @@ -0,0 +1,40 @@ +"""Exceptions used by the measure_v2 plan/engine API. + +Plan authors only need to know about :py:class:`CancelRequested`: catch it +only if you need to distinguish cancel from other errors, and always +re-raise. Letting it propagate naturally through ``try/finally`` is the +normal pattern. +""" + +from __future__ import annotations + + +class CancelRequested(BaseException): + """Thrown into a plan generator by the engine to request cancellation. + + Inherits from :py:class:`BaseException` (not :py:class:`Exception`) so + that broad ``except Exception:`` clauses in user plans do not + accidentally swallow cancellation. Plans that need to distinguish + cancellation from other errors should catch this explicitly and always + re-raise. + + Attributes: + reason: A short string describing why cancellation was requested + (e.g., ``"user"``, ``"engine_shutdown"``, ``"keyboard_interrupt"``). + + """ + + def __init__(self, reason: str = "cancel") -> None: + super().__init__(reason) + self.reason = reason + + +class PlanError(Exception): + """Raised when a plan is malformed or its schema is invalid. + + Examples: + - A plan decorated with ``run(...)`` neither passes explicit + ``setpoints``/``measured`` args nor yields ``Describe`` first. + - The descriptor contains two parameters sharing a ``register_name``. + + """ diff --git a/src/qcodes/measure_v2/messages.py b/src/qcodes/measure_v2/messages.py new file mode 100644 index 000000000000..8b7170202279 --- /dev/null +++ b/src/qcodes/measure_v2/messages.py @@ -0,0 +1,112 @@ +"""Plan message vocabulary. + +Plans are Python generators that yield instances of these frozen dataclasses. +A measurement engine iterates the generator, dispatching each message to +instruments and returning results to the plan via :py:meth:`generator.send`. + +The vocabulary is deliberately small. The tracer-bullet scope is six message +types; ``Call`` and ``Describe`` are deferred to v1. + +See the design document at +``files/async-measurement-architecture.md`` (in the design session +workspace) for the full rationale. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from collections.abc import Mapping + + from qcodes.dataset.descriptions.versioning.rundescribertypes import Shapes + from qcodes.dataset.experiment_container import Experiment + from qcodes.parameters import ParameterBase + + +@dataclass(frozen=True) +class Set: + """Set a single parameter to a value. + + The engine calls ``param.set(value)`` and records the value in its + per-run state cache under ``param.register_name``. + """ + + param: ParameterBase + value: Any + + +@dataclass(frozen=True) +class Read: + """Read one or more parameters. + + The engine calls ``param.get()`` for each parameter (potentially in + parallel, grouped by ``underlying_instrument``) and returns a + ``dict[ParameterBase, Any]`` to the plan via ``generator.send``. + + Reading a parameter that is not declared in the run descriptor's + ``measured`` tuple is an error. + """ + + params: tuple[ParameterBase, ...] + + +@dataclass(frozen=True) +class Sleep: + """Sleep for a duration. + + The engine implements this as a cancellable sleep (chunked checks of the + cancel flag every ~100 ms) so a long sleep does not delay cancellation. + """ + + seconds: float + + +@dataclass(frozen=True) +class Emit: + """Emit one dataset row for the current run. + + The row is built from the engine's per-run state cache (last value seen + for each declared parameter via :py:class:`Set` or :py:class:`Read`), + overlaid with any ``overrides``. The completed row is forwarded to all + sinks as a :py:class:`~qcodes.measure_v2.events.RowEmitted` event. + + ``overrides`` may only reference parameters that are already declared in + the run descriptor; providing an undeclared parameter raises before the + row is published. There is no lazy schema registration. + """ + + overrides: Mapping[ParameterBase, Any] = field(default_factory=dict) + + +@dataclass(frozen=True) +class OpenRun: + """Open a run. + + Emitted by the ``run(...)`` decorator at the start of plan execution. + The descriptor declares the run's schema (setpoint and measured + parameters, plus optional shapes). The sink uses it to register the + dataset; the engine uses it to validate subsequent ``Set``/``Read``/ + ``Emit`` messages. + """ + + name: str + setpoint_params: tuple[ParameterBase, ...] + measured_params: tuple[ParameterBase, ...] + exp: Experiment | None = None + shapes: Shapes | None = None + write_period: float | None = None + + +@dataclass(frozen=True) +class CloseRun: + """Close the current run. + + Emitted by the ``run(...)`` decorator at the end of plan execution + (success, error, or cancel). The sink finalizes the dataset. + """ + + +Msg = Set | Read | Sleep | Emit | OpenRun | CloseRun +"""Union of all plan message types. A plan is a ``Generator[Msg, Any, None]``.""" diff --git a/src/qcodes/measure_v2/plans/__init__.py b/src/qcodes/measure_v2/plans/__init__.py new file mode 100644 index 000000000000..07043b6df2a8 --- /dev/null +++ b/src/qcodes/measure_v2/plans/__init__.py @@ -0,0 +1,10 @@ +"""Plan-builders for common scan patterns. + +Plan-builders are functions that return plan generators. They never yield +``OpenRun``/``CloseRun`` themselves — wrap with :py:func:`qcodes.measure_v2.run` +to open the run. +""" + +from qcodes.measure_v2.plans.scan import scan_1d + +__all__ = ["scan_1d"] diff --git a/src/qcodes/measure_v2/plans/scan.py b/src/qcodes/measure_v2/plans/scan.py new file mode 100644 index 000000000000..f1d4d7761ef0 --- /dev/null +++ b/src/qcodes/measure_v2/plans/scan.py @@ -0,0 +1,53 @@ +"""Scan plan-builders.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from qcodes.measure_v2.messages import Emit, Msg, Read, Set, Sleep + +if TYPE_CHECKING: + from collections.abc import Generator, Sequence + + from qcodes.dataset.dond.sweeps import AbstractSweep + from qcodes.parameters import ParameterBase + + +def scan_1d( + sweep: AbstractSweep, + measured: Sequence[ParameterBase], + *, + ramp_down_value: float = 0.0, +) -> Generator[Msg, Any, None]: + """Sweep one parameter, reading measured parameters at each setpoint. + + Yields one ``Set``/(optional ``Sleep``)/``Read``/``Emit`` cycle per + setpoint. In its ``finally`` block, **always** yields a final ``Set`` + that drives the swept parameter to ``ramp_down_value`` (default 0.0). + This guarantee is the basis of the tracer's cancel-safety contract: + a cancelled scan returns the swept parameter to a known state. + + The plan-builder does **not** yield ``OpenRun``/``CloseRun``; wrap with + :py:func:`qcodes.measure_v2.run` to open a run for the dataset. + + Args: + sweep: The :py:class:`~qcodes.dataset.dond.sweeps.AbstractSweep` + describing the swept parameter and its setpoint values. + measured: Parameters to read at each setpoint. + ramp_down_value: Value to set the swept parameter to on exit + (success, error, or cancel). Defaults to 0.0. + + Yields: + Plan messages. + + """ + try: + for v in sweep.get_setpoints(): + yield Set(sweep.param, float(v)) + if sweep.delay > 0: + yield Sleep(sweep.delay) + yield Read(tuple(measured)) + yield Emit() + finally: + # Mandatory cleanup: leave the swept parameter at a known value. + yield Set(sweep.param, ramp_down_value) diff --git a/src/qcodes/measure_v2/sinks/__init__.py b/src/qcodes/measure_v2/sinks/__init__.py new file mode 100644 index 000000000000..8cc94251687e --- /dev/null +++ b/src/qcodes/measure_v2/sinks/__init__.py @@ -0,0 +1,16 @@ +"""Sinks consume events from the engine's publisher thread. + +A sink is any callable that accepts a single :py:class:`Event`. Sinks may +optionally declare a ``critical`` attribute (default ``False``): critical +sinks abort the run on failure, non-critical sinks merely log. + +The default storage sink is :py:class:`SqliteSink`. The simplest sink is +:py:class:`MemorySink`, which records events into a list — useful for +tests and for in-memory consumption. +""" + +from qcodes.measure_v2.sinks.memory import MemorySink +from qcodes.measure_v2.sinks.protocol import DataSink, is_critical +from qcodes.measure_v2.sinks.sqlite import SqliteSink + +__all__ = ["DataSink", "MemorySink", "SqliteSink", "is_critical"] diff --git a/src/qcodes/measure_v2/sinks/memory.py b/src/qcodes/measure_v2/sinks/memory.py new file mode 100644 index 000000000000..c8770cdfa5b1 --- /dev/null +++ b/src/qcodes/measure_v2/sinks/memory.py @@ -0,0 +1,58 @@ +"""In-memory sink: records every event into a list. + +Useful for testing plan-builders and the engine without a database, and +for users who want to consume the event stream programmatically (e.g., +into a custom analysis pipeline) without going through the SQLite sink. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from qcodes.measure_v2.events import RowEmitted, RunStarted, RunStopped + +if TYPE_CHECKING: + from qcodes.measure_v2.events import Event + + +class MemorySink: + """A sink that records all events into a list. + + Non-critical: an exception in a downstream sink does not affect the + in-memory record. The recorded list is the authoritative log of what + the engine emitted. + + Thread-safety: this sink is invoked from the engine's publisher thread, + sequentially per event. Reads of ``events`` from other threads after + the run has completed are safe; concurrent reads during a running + measurement may see a partial list (it's a plain Python list). + """ + + critical: bool = False + + def __init__(self) -> None: + self.events: list[Event] = [] + + def __call__(self, event: Event) -> None: + self.events.append(event) + + # --- Convenience accessors used heavily by tests --- + + @property + def rows(self) -> list[RowEmitted]: + """All :py:class:`RowEmitted` events, in arrival order.""" + return [e for e in self.events if isinstance(e, RowEmitted)] + + @property + def starts(self) -> list[RunStarted]: + """All :py:class:`RunStarted` events, in arrival order.""" + return [e for e in self.events if isinstance(e, RunStarted)] + + @property + def stops(self) -> list[RunStopped]: + """All :py:class:`RunStopped` events, in arrival order.""" + return [e for e in self.events if isinstance(e, RunStopped)] + + def clear(self) -> None: + """Drop all recorded events (e.g., between runs in a test).""" + self.events.clear() diff --git a/src/qcodes/measure_v2/sinks/protocol.py b/src/qcodes/measure_v2/sinks/protocol.py new file mode 100644 index 000000000000..4f0244fe4ac7 --- /dev/null +++ b/src/qcodes/measure_v2/sinks/protocol.py @@ -0,0 +1,42 @@ +"""Sink protocol and the criticality helper. + +A sink is any callable that accepts a single :py:class:`Event`. There is +no required base class. Sinks MAY declare a ``critical`` attribute +(class- or instance-level) to opt into "abort run on failure" semantics; +sinks without the attribute default to non-critical (failures are logged +but the run continues). + +The engine inspects ``critical`` via :py:func:`is_critical`, which uses +``getattr`` with a default — so plain functions work as sinks without any +boilerplate. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Protocol, runtime_checkable + +if TYPE_CHECKING: + from qcodes.measure_v2.events import Event + + +@runtime_checkable +class DataSink(Protocol): + """A callable that consumes events. + + Implementations may be plain functions or classes implementing + ``__call__``. Class-based sinks may declare ``critical: bool`` to + opt into "abort run on failure" semantics. + """ + + def __call__(self, event: Event, /) -> None: ... + + +def is_critical(sink: DataSink) -> bool: + """Return whether a sink declares itself as critical. + + Returns ``getattr(sink, 'critical', False)``. Critical sinks abort the + run if they raise on ``RunStarted``; their failures during the run + are propagated to ``RunResult.error`` while still allowing other + sinks to finish. + """ + return bool(getattr(sink, "critical", False)) diff --git a/src/qcodes/measure_v2/sinks/sqlite.py b/src/qcodes/measure_v2/sinks/sqlite.py new file mode 100644 index 000000000000..bb36639b8058 --- /dev/null +++ b/src/qcodes/measure_v2/sinks/sqlite.py @@ -0,0 +1,165 @@ +"""Default storage sink: writes the event stream to a SQLite dataset. + +Wraps the existing :py:class:`~qcodes.dataset.measurements.Measurement` / +:py:class:`~qcodes.dataset.measurements.DataSaver` plumbing. The bulk of +the work is the schema-registration lifecycle: + +- On ``RunStarted``: build a ``Measurement``, register each setpoint and + measured parameter (with setpoint dependencies for measured params), + enter the ``Runner`` context manager (which is where the station + snapshot is taken and the ``DataSaver`` is created). +- On ``RowEmitted``: forward the row via ``DataSaver.add_result``. All + array fan-out, type coercion, and write-batching logic stays in + ``DataSaver``. +- On ``RunStopped``: exit the ``Runner`` context manager (success or + error path); the dataset's completion timestamp is set. + +**Threading.** SQLite connections are bound to the thread that created +them (``check_same_thread=True``). Since this sink runs on the engine's +publisher thread, it opens its own connection lazily on first use and +creates experiments against it. Datasets opened by this sink can be +read back from other threads via their in-memory caches, or re-loaded +via ``load_by_id`` on the consuming thread. + +The sink also implements :py:meth:`dataset_for` so the engine's publisher +can resolve ``RunHandle.dataset`` to the created dataset right after +``RunStarted`` is processed. +""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Any + +import qcodes as qc +from qcodes.dataset.experiment_container import load_or_create_experiment +from qcodes.dataset.measurements import Measurement +from qcodes.dataset.sqlite.database import connect +from qcodes.measure_v2.events import RowEmitted, RunStarted, RunStopped + +if TYPE_CHECKING: + from uuid import UUID + + from qcodes.dataset.data_set_protocol import DataSetProtocol + from qcodes.dataset.measurements import DataSaver + from qcodes.dataset.sqlite.connection import AtomicConnection + from qcodes.measure_v2.events import Event + + +_LOG = logging.getLogger(__name__) + + +class SqliteSink: + """Critical sink that writes the event stream to a QCoDeS SQLite dataset. + + Tracer-bullet scope: handles 1D scans of scalar-valued parameters. + Array-valued parameters work transparently through ``DataSaver`` but + are not yet covered by tests. + + Args: + experiment_name: Name of the experiment all runs from this sink + belong to. The experiment is loaded or created on the publisher + thread on first ``RunStarted``. + sample_name: Sample name for the experiment. + + """ + + critical: bool = True + + def __init__( + self, + *, + experiment_name: str = "measure_v2", + sample_name: str = "default", + ) -> None: + self._experiment_name = experiment_name + self._sample_name = sample_name + self._conn: AtomicConnection | None = None + self._runners: dict[UUID, Any] = {} + self._savers: dict[UUID, DataSaver] = {} + self._datasets: dict[UUID, DataSetProtocol] = {} + + def __call__(self, event: Event) -> None: + if isinstance(event, RunStarted): + self._open(event) + elif isinstance(event, RowEmitted): + self._add_row(event) + elif isinstance(event, RunStopped): + self._close(event) + + def dataset_for(self, run_id: UUID) -> DataSetProtocol | None: + """Return the dataset associated with ``run_id``, if one was opened.""" + return self._datasets.get(run_id) + + # ------------------------------------------------------------------ + # Internal lifecycle + # ------------------------------------------------------------------ + + def _ensure_connection(self) -> AtomicConnection: + """Open a SQLite connection on the current (publisher) thread. + + SQLite connections are thread-bound, so this connection is only + usable from the same thread that calls this method first. + """ + if self._conn is None: + db_path = qc.config["core"]["db_location"] + self._conn = connect(db_path) + return self._conn + + def _open(self, event: RunStarted) -> None: + descriptor = event.descriptor + conn = self._ensure_connection() + + # Load or create the experiment on the publisher thread so its + # connection is usable here. Note: if event.exp was provided from + # the main thread, we ignore its identity and use ours; this is a + # tracer limitation. + exp_name = self._experiment_name + sample_name = self._sample_name + if event.exp is not None: + exp_name = event.exp.name + sample_name = event.exp.sample_name + exp = load_or_create_experiment( + experiment_name=exp_name, + sample_name=sample_name, + conn=conn, + ) + + meas = Measurement(name=event.name or "results", exp=exp) + if event.write_period is not None: + meas.write_period = event.write_period + + # Register setpoints first; measured params reference them. + for p in descriptor.setpoints: + meas.register_parameter(p) + for p in descriptor.measured: + meas.register_parameter(p, setpoints=descriptor.setpoints) + + # meas.run() returns a Runner (the actual context manager). + # Runner.__enter__ returns the DataSaver; Runner.__exit__ commits. + runner = meas.run() + saver: DataSaver = runner.__enter__() + self._runners[event.run_id] = runner + self._savers[event.run_id] = saver + self._datasets[event.run_id] = saver.dataset + + def _add_row(self, event: RowEmitted) -> None: + saver = self._savers.get(event.run_id) + if saver is None: + _LOG.warning("RowEmitted for unknown run %s; dropping.", event.run_id) + return + # snapshot is {ParameterBase: value} → tuples for add_result. + saver.add_result(*event.snapshot.items()) + + def _close(self, event: RunStopped) -> None: + runner = self._runners.pop(event.run_id, None) + self._savers.pop(event.run_id, None) + # Keep the dataset reference around so handle.dataset.result() stays valid. + if runner is None: + return + exc_type = type(event.error) if event.error is not None else None + try: + runner.__exit__(exc_type, event.error, None) + except BaseException: + _LOG.exception("Error closing dataset for run %s", event.run_id) + raise diff --git a/src/qcodes/measure_v2/testing.py b/src/qcodes/measure_v2/testing.py new file mode 100644 index 000000000000..8261369d00f4 --- /dev/null +++ b/src/qcodes/measure_v2/testing.py @@ -0,0 +1,87 @@ +"""Testing utilities for the measure_v2 plan/engine API. + +These helpers are public-API so users writing custom plan-builders can +unit-test them without instantiating an engine. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +from qcodes.measure_v2.exceptions import CancelRequested +from qcodes.measure_v2.messages import Msg, Read + +if TYPE_CHECKING: + from collections.abc import Callable, Generator + + from qcodes.parameters import ParameterBase + + +@dataclass +class DrivePlanResult: + """Result of :py:func:`drive_plan`.""" + + messages: list[Msg] = field(default_factory=list) + cancelled: bool = False + error: BaseException | None = None + + +def drive_plan( + plan: Generator[Msg, Any, None], + *, + on_read: Callable[[tuple[ParameterBase, ...]], dict[ParameterBase, Any]] + | None = None, + cancel_after: int | None = None, + cancel_reason: str = "test", +) -> DrivePlanResult: + """Drive a plan to completion, synthesizing responses to ``Read`` messages. + + A miniature, in-memory engine for unit-testing plan-builders. Records + every message the plan yields. Optionally simulates cancellation by + throwing :py:class:`~qcodes.measure_v2.exceptions.CancelRequested` into + the plan at a chosen message index. + + Args: + plan: The plan generator to drive. + on_read: Optional callback to synthesize ``Read`` responses. Called + with the params tuple from the ``Read`` message; must return a + dict mapping each param to its value. Defaults to returning 0.0 + for every param. + cancel_after: If set, throw :py:class:`CancelRequested` after this + many messages have been yielded. The plan's ``finally`` blocks + should still run; their yielded messages are appended to the + result. + cancel_reason: The reason string to attach to the thrown + :py:class:`CancelRequested`. + + Returns: + A :py:class:`DrivePlanResult` with the message log and the cancel + state. Well-behaved plans either complete normally (``cancelled=False``) + or re-raise the ``CancelRequested`` after their finally + (``cancelled=True``). + + """ + result = DrivePlanResult() + try: + msg = next(plan) + while True: + result.messages.append(msg) + if cancel_after is not None and len(result.messages) == cancel_after: + result.cancelled = True + msg = plan.throw(CancelRequested(cancel_reason)) + continue + send_value: Any = None + if isinstance(msg, Read): + send_value = ( + on_read(msg.params) + if on_read is not None + else dict.fromkeys(msg.params, 0.0) + ) + msg = plan.send(send_value) + except StopIteration: + pass + except CancelRequested: + # Plan re-raised after its finally; this is the well-behaved path. + result.cancelled = True + return result diff --git a/tests/measure_v2/__init__.py b/tests/measure_v2/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/measure_v2/conftest.py b/tests/measure_v2/conftest.py new file mode 100644 index 000000000000..06319d74cdc1 --- /dev/null +++ b/tests/measure_v2/conftest.py @@ -0,0 +1,34 @@ +"""Shared fixtures for the measure_v2 test suite.""" + +from __future__ import annotations + +import gc +from typing import TYPE_CHECKING + +import pytest + +import qcodes as qc +from qcodes.dataset.sqlite.database import initialise_or_create_database_at + +if TYPE_CHECKING: + from collections.abc import Generator + from pathlib import Path + + +@pytest.fixture(scope="function") +def empty_db(tmp_path: Path) -> Generator[None, None, None]: + """Configure qcodes to use an empty SQLite database in tmp_path. + + Initializes the database file so the SqliteSink can immediately open + runs against it. + """ + original = qc.config["core"]["db_location"] + db_path = tmp_path / "measure_v2_test.db" + try: + qc.config["core"]["db_location"] = str(db_path) + initialise_or_create_database_at(str(db_path)) + yield + finally: + qc.config["core"]["db_location"] = original + # Close any leftover SQLite connections before tmp_path teardown. + gc.collect() diff --git a/tests/measure_v2/test_acceptance.py b/tests/measure_v2/test_acceptance.py new file mode 100644 index 000000000000..6f87c6f42c06 --- /dev/null +++ b/tests/measure_v2/test_acceptance.py @@ -0,0 +1,118 @@ +"""Acceptance tests — the three programs from the tracer plan. + +These are the success criterion for the tracer bullet. If all three pass, +the architecture has cleared the proof bar end-to-end: + +1. Blocking ``qc.measure_v2.scan`` returns a populated dataset. +2. Non-blocking ``qc.measure_v2.scan(..., wait=False)`` + ``cancel()`` runs + ``scan_1d``'s ``finally`` ramp-to-zero. Partial dataset is preserved. +3. Engine-level submission with ``MemorySink`` produces the expected events. +""" + +from __future__ import annotations + +import time + +import pytest + +import qcodes as qc +from qcodes import measure_v2 as mv2 +from qcodes.dataset.dond.sweeps import LinSweep +from qcodes.measure_v2 import ( + MeasurementEngine, + MemorySink, + RowEmitted, + SqliteSink, +) + + +@pytest.fixture +def fresh_engine(empty_db): + """Acceptance engine: SqliteSink + isolated db, separate from default.""" + del empty_db + sink = SqliteSink(experiment_name="acceptance", sample_name="tracer") + eng = MeasurementEngine(sinks=[sink]) + try: + yield eng + finally: + eng.shutdown(wait=True, timeout=5.0) + + +def _make_params(): + g = qc.Parameter("g", initial_value=0.0, set_cmd=None, get_cmd=None) + i = qc.Parameter("i", get_cmd=lambda: g.cache.get() ** 2) + return g, i + + +def test_acceptance_blocking_scan_returns_populated_dataset(fresh_engine) -> None: + """Acceptance #1: ``scan(..., wait=True)`` returns a real dataset.""" + g, i = _make_params() + + ds = mv2.scan( + LinSweep(g, 0.0, 1.0, 11), + measure=[i], + name="acceptance-1", + engine=fresh_engine, + ) + + assert ds is not None + # Verify shape via the cache (publisher-thread connection → cache is the + # canonical way to read on the main thread). + data = ds.cache.data() + assert "i" in data + assert len(data["i"]["g"]) == 11 + assert len(data["i"]["i"]) == 11 + + +def test_acceptance_nonblocking_cancel_preserves_partial_data( + fresh_engine, +) -> None: + """Acceptance #2: cancel mid-scan; finally ramps g to 0; partial data persisted.""" + g, i = _make_params() + + handle = mv2.scan( + LinSweep(g, 0.0, 1.0, 1001, delay=0.01), + measure=[i], + wait=False, + name="acceptance-2", + engine=fresh_engine, + ) + assert isinstance(handle, mv2.RunHandle) + + time.sleep(0.1) # let some rows accumulate + handle.cancel() + result = handle.wait(timeout=5.0) + + assert result.reason == "cancelled" + # Cleanup contract: scan_1d's finally ramped g back to 0 + assert g.cache.get() == pytest.approx(0.0) + + # Partial dataset is preserved and committed + ds = handle.dataset.result(timeout=1.0) + assert ds is not None + assert ds.completed + assert len(ds.cache.data()["i"]["g"]) >= 1 + + +def test_acceptance_engine_with_memorysink() -> None: + """Acceptance #3: engine-level submission with MemorySink yields events.""" + g, i = _make_params() + sink = MemorySink() + eng = MeasurementEngine(sinks=[sink]) + try: + plan = mv2.run(name="acceptance-3", setpoints=(g,), measured=(i,))( + mv2.scan_1d(LinSweep(g, 0.0, 1.0, 11), [i]) + ) + result = eng.submit(plan).wait(timeout=10.0) + finally: + eng.shutdown(wait=True, timeout=5.0) + + assert result.reason == "completed" + rows = [e for e in sink.events if isinstance(e, RowEmitted)] + assert len(rows) == 11 + # Each row's snapshot has both g and i + assert all(g in r.snapshot and i in r.snapshot for r in rows) + # i = g**2 — first row is (0,0), last is (1,1) + assert rows[0].snapshot[g] == pytest.approx(0.0) + assert rows[-1].snapshot[g] == pytest.approx(1.0) + assert rows[-1].snapshot[i] == pytest.approx(1.0) diff --git a/tests/measure_v2/test_engine.py b/tests/measure_v2/test_engine.py new file mode 100644 index 000000000000..52ec716d5fa3 --- /dev/null +++ b/tests/measure_v2/test_engine.py @@ -0,0 +1,302 @@ +"""L2 tests for ``MeasurementEngine`` using ``MemorySink``. + +These tests run the full engine + publisher + plan pipeline on real +threads, but with software-only parameters and an in-memory sink (no +database). They validate the threading model, cancel semantics, and +event ordering — the things L1 tests can't reach. +""" + +from __future__ import annotations + +import time + +import pytest + +from qcodes.dataset.dond.sweeps import LinSweep +from qcodes.measure_v2 import ( + Emit, + MeasurementEngine, + MemorySink, + PlanError, + Read, + RowEmitted, + RunStarted, + RunStopped, + Set, + run, + scan_1d, +) +from qcodes.parameters import Parameter, ParameterBase + + +@pytest.fixture +def engine() -> MeasurementEngine: + sink = MemorySink() + eng = MeasurementEngine(sinks=[sink]) + # Stash the sink on the engine for test convenience. + eng._test_sink = sink # type: ignore[attr-defined] + try: + yield eng + finally: + eng.shutdown(wait=True, timeout=5.0) + + +def _make_params() -> tuple[ParameterBase, ParameterBase]: + g = Parameter("g", initial_value=0.0, set_cmd=None, get_cmd=None) + # i computes from g's cache, so we can assert dependent values + i = Parameter("i", get_cmd=lambda: g.cache.get() ** 2) + return g, i + + +# ---------------------------------------------------------------------------- +# Happy path +# ---------------------------------------------------------------------------- + + +def test_engine_executes_simple_1d_scan(engine: MeasurementEngine) -> None: + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=2.0, num_points=5) + plan = run(name="t", setpoints=(g,), measured=(i,), shapes={"i": (5,)})( + scan_1d(sweep, [i]) + ) + + handle = engine.submit(plan) + result = handle.wait(timeout=10.0) + + assert result.reason == "completed" + assert result.n_rows_emitted == 5 + sink: MemorySink = engine._test_sink # type: ignore[attr-defined] + assert len(sink.rows) == 5 + assert len(sink.starts) == 1 + assert len(sink.stops) == 1 + # i = g**2; sweep is 0, 0.5, 1.0, 1.5, 2.0 — so i is 0, 0.25, 1.0, 2.25, 4.0 + expected_i = [0.0, 0.25, 1.0, 2.25, 4.0] + actual_i = [r.snapshot[i] for r in sink.rows] + assert actual_i == pytest.approx(expected_i) + + +def test_event_ordering(engine: MeasurementEngine) -> None: + """RunStarted is always first; RunStopped is always last.""" + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=3) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine.submit(plan) + handle.wait(timeout=5.0) + + sink: MemorySink = engine._test_sink # type: ignore[attr-defined] + assert isinstance(sink.events[0], RunStarted) + assert isinstance(sink.events[-1], RunStopped) + # All middle events are RowEmitted + for ev in sink.events[1:-1]: + assert isinstance(ev, RowEmitted) + + +def test_setpoint_state_is_set_on_hardware(engine: MeasurementEngine) -> None: + """The engine actually calls param.set() — verify via cache.""" + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=3) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + engine.submit(plan).wait(timeout=5.0) + + # After the scan completes, scan_1d's finally sets g back to 0.0. + assert g.cache.get() == pytest.approx(0.0) + + +def test_handle_dataset_future_resolves(engine: MeasurementEngine) -> None: + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=2) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine.submit(plan) + handle.wait(timeout=5.0) + # In tracer scope (no SqliteSink), the dataset future resolves to None. + assert handle.dataset.result(timeout=1.0) is None + + +def test_handle_status_transitions(engine: MeasurementEngine) -> None: + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=2) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine.submit(plan) + # Could be "running" briefly; either way, after wait we're done. + handle.wait(timeout=5.0) + assert handle.status == "done" + + +# ---------------------------------------------------------------------------- +# Cancellation +# ---------------------------------------------------------------------------- + + +def test_cancel_triggers_cleanup(engine: MeasurementEngine) -> None: + """Cancellation must run scan_1d's finally block — g.cache.get() == 0.0.""" + g, i = _make_params() + # Many points, with delay, so cancel lands mid-sweep. + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=10000, delay=0.01) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine.submit(plan) + time.sleep(0.05) # let the engine start + handle.cancel() + result = handle.wait(timeout=5.0) + + assert result.reason == "cancelled" + assert result.cancel_latency is not None + # Cleanup happened: g was ramped back to 0.0 + assert g.cache.get() == pytest.approx(0.0), ( + "scan_1d's finally block must run on cancel and reset g to 0.0" + ) + + +def test_cancel_emits_runstopped_with_cancelled_reason( + engine: MeasurementEngine, +) -> None: + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=10000, delay=0.01) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine.submit(plan) + time.sleep(0.05) + handle.cancel() + handle.wait(timeout=5.0) + + sink: MemorySink = engine._test_sink # type: ignore[attr-defined] + assert len(sink.stops) == 1 + assert sink.stops[0].reason == "cancelled" + + +def test_cancel_during_sleep_unblocks_quickly( + engine: MeasurementEngine, +) -> None: + """A long Sleep must be cancellable within ~100ms (chunked sleep).""" + g, i = _make_params() + + # One point, long delay — engine will be in cancellable sleep most of the time. + sweep = LinSweep(g, start=0.0, stop=0.0, num_points=1, delay=5.0) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine.submit(plan) + time.sleep(0.1) + cancel_at = time.time() + handle.cancel() + handle.wait(timeout=2.0) + elapsed = time.time() - cancel_at + + assert elapsed < 1.0, ( + f"Cancel-during-sleep took {elapsed:.2f}s; should be sub-second." + ) + + +def test_keyboardinterrupt_via_cancelrequested_in_plan( + engine: MeasurementEngine, +) -> None: + """A plan raising CancelRequested itself ends the run as interrupted.""" + from qcodes.measure_v2 import CancelRequested # noqa: PLC0415 + + g, i = _make_params() + + def _self_aborting_plan(): + yield Set(g, 0.5) + yield Read((i,)) + yield Emit() + raise CancelRequested("plan_aborted") + + plan = run(setpoints=(g,), measured=(i,))(_self_aborting_plan()) + handle = engine.submit(plan) + result = handle.wait(timeout=5.0) + + assert result.reason == "interrupted" + + +# ---------------------------------------------------------------------------- +# Concurrency & lifecycle +# ---------------------------------------------------------------------------- + + +def test_concurrent_submit_raises(engine: MeasurementEngine) -> None: + """Tracer scope: second submit while busy raises (no queue yet).""" + g, i = _make_params() + sweep1 = LinSweep(g, start=0.0, stop=1.0, num_points=200, delay=0.01) + sweep2 = LinSweep(g, start=0.0, stop=1.0, num_points=10) + + plan1 = run(setpoints=(g,), measured=(i,))(scan_1d(sweep1, [i])) + handle1 = engine.submit(plan1) + + plan2 = run(setpoints=(g,), measured=(i,))(scan_1d(sweep2, [i])) + with pytest.raises(RuntimeError, match="already running"): + engine.submit(plan2) + + handle1.cancel() + handle1.wait(timeout=5.0) + + +def test_sequential_submits_work(engine: MeasurementEngine) -> None: + """After one run completes, a second submit should succeed.""" + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=3) + + plan1 = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + engine.submit(plan1).wait(timeout=5.0) + + plan2 = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + result = engine.submit(plan2).wait(timeout=5.0) + assert result.reason == "completed" + + +def test_shutdown_cancels_inflight_run(engine: MeasurementEngine) -> None: + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=10000, delay=0.01) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine.submit(plan) + time.sleep(0.05) + engine.shutdown(wait=True, timeout=5.0) + result = handle.future.result(timeout=1.0) + assert result.reason in ("cancelled", "engine_shutdown") + # Cleanup still ran + assert g.cache.get() == pytest.approx(0.0) + + +# ---------------------------------------------------------------------------- +# Validation +# ---------------------------------------------------------------------------- + + +def test_emit_with_undeclared_param_raises(engine: MeasurementEngine) -> None: + """Emit overrides must reference declared params.""" + g, i = _make_params() + j = Parameter("j") # not declared! + + def _bad_plan(): + yield Set(g, 0.0) + yield Read((i,)) + yield Emit(overrides={j: 99}) + + plan = run(setpoints=(g,), measured=(i,))(_bad_plan()) + handle = engine.submit(plan) + result = handle.wait(timeout=5.0) + + assert result.reason == "error" + assert isinstance(result.error, PlanError) + + +def test_read_returns_value_to_plan(engine: MeasurementEngine) -> None: + """The send-value path: a plan reads, computes, sets — engine drives it.""" + g, i = _make_params() + observed_values: list[float] = [] + + def _adaptive_plan(): + for v in (0.1, 0.2, 0.3): + yield Set(g, v) + r = yield Read((i,)) + observed_values.append(r[i]) + yield Emit() + + plan = run(setpoints=(g,), measured=(i,))(_adaptive_plan()) + engine.submit(plan).wait(timeout=5.0) + + # i = g**2; we set g to 0.1, 0.2, 0.3 + assert observed_values == pytest.approx([0.01, 0.04, 0.09]) diff --git a/tests/measure_v2/test_memory_sink.py b/tests/measure_v2/test_memory_sink.py new file mode 100644 index 000000000000..1769ff30b4d9 --- /dev/null +++ b/tests/measure_v2/test_memory_sink.py @@ -0,0 +1,122 @@ +"""Tests for the sink protocol and ``MemorySink``.""" + +from __future__ import annotations + +import time +from uuid import uuid4 + +import pytest + +from qcodes.measure_v2 import ( + DataSink, + Descriptor, + MemorySink, + RowEmitted, + RunStarted, + RunStopped, + is_critical, +) +from qcodes.parameters import Parameter + + +@pytest.fixture +def descriptor() -> Descriptor: + g = Parameter("g") + i = Parameter("i") + return Descriptor(setpoints=(g,), measured=(i,)) + + +def _started(descriptor: Descriptor) -> RunStarted: + return RunStarted( + run_id=uuid4(), + name="t", + descriptor=descriptor, + exp=None, + write_period=None, + started_at=time.time(), + ) + + +def _stopped(run_id) -> RunStopped: + return RunStopped( + run_id=run_id, + reason="completed", + error=None, + started_at=time.time(), + stopped_at=time.time(), + ) + + +def test_memory_sink_records_events_in_order(descriptor: Descriptor) -> None: + sink = MemorySink() + start = _started(descriptor) + row = RowEmitted(run_id=start.run_id, snapshot={}, seq=0) + stop = _stopped(start.run_id) + + sink(start) + sink(row) + sink(stop) + + assert sink.events == [start, row, stop] + + +def test_memory_sink_convenience_accessors(descriptor: Descriptor) -> None: + sink = MemorySink() + start = _started(descriptor) + sink(start) + sink(RowEmitted(run_id=start.run_id, snapshot={}, seq=0)) + sink(RowEmitted(run_id=start.run_id, snapshot={}, seq=1)) + sink(_stopped(start.run_id)) + + assert len(sink.rows) == 2 + assert len(sink.starts) == 1 + assert len(sink.stops) == 1 + assert sink.starts[0] is start + + +def test_memory_sink_clear(descriptor: Descriptor) -> None: + sink = MemorySink() + sink(_started(descriptor)) + sink.clear() + assert sink.events == [] + + +def test_memory_sink_is_non_critical_by_default() -> None: + sink = MemorySink() + assert is_critical(sink) is False + + +def test_is_critical_returns_false_for_plain_function() -> None: + """A plain callable used as a sink defaults to non-critical.""" + + def sink(event): + pass + + assert is_critical(sink) is False + + +def test_is_critical_reads_attribute() -> None: + """An object can opt into critical via an attribute.""" + + class _CriticalSink: + critical = True + + def __call__(self, event): + pass + + assert is_critical(_CriticalSink()) is True + + +def test_memory_sink_satisfies_datasink_protocol() -> None: + """Runtime protocol check: MemorySink is a DataSink.""" + sink = MemorySink() + assert isinstance(sink, DataSink) + + +def test_plain_callable_satisfies_datasink_protocol() -> None: + """Functions also satisfy the protocol (it's just a callable).""" + + def sink(event): + pass + + assert isinstance(sink, DataSink) diff --git a/tests/measure_v2/test_messages.py b/tests/measure_v2/test_messages.py new file mode 100644 index 000000000000..deb8e350d5e6 --- /dev/null +++ b/tests/measure_v2/test_messages.py @@ -0,0 +1,57 @@ +"""Smoke tests for the plan message vocabulary. + +Verifies that the dataclasses can be constructed and are hashable/frozen. +Plan-builder integration tests live in test_scan_1d_l1.py. +""" + +from __future__ import annotations + +import pytest + +from qcodes.measure_v2 import ( + Emit, + OpenRun, + Read, + Set, +) +from qcodes.parameters import Parameter + + +def test_messages_are_frozen() -> None: + g = Parameter("g") + s = Set(g, 0.5) + with pytest.raises(Exception): + s.value = 1.0 # type: ignore[misc] + + +def test_messages_are_hashable() -> None: + g = Parameter("g") + s = Set(g, 0.5) + # Frozen dataclasses with hashable fields are hashable. Useful for + # sets/dict keys (e.g., deduplicating expected messages in tests). + assert hash(s) == hash(Set(g, 0.5)) + + +def test_read_takes_tuple() -> None: + i = Parameter("i") + r = Read((i,)) + assert r.params == (i,) + + +def test_emit_default_overrides_empty() -> None: + e = Emit() + assert e.overrides == {} + + +def test_open_run_carries_descriptor() -> None: + g = Parameter("g") + i = Parameter("i") + o = OpenRun( + name="t", + setpoint_params=(g,), + measured_params=(i,), + shapes={i.register_name: (5,)}, + ) + assert o.setpoint_params == (g,) + assert o.measured_params == (i,) + assert o.shapes is not None diff --git a/tests/measure_v2/test_run_decorator.py b/tests/measure_v2/test_run_decorator.py new file mode 100644 index 000000000000..59010694d6bb --- /dev/null +++ b/tests/measure_v2/test_run_decorator.py @@ -0,0 +1,207 @@ +"""L1 tests for the ``run(...)`` decorator. + +The decorator owns run-lifecycle responsibility: it must yield ``OpenRun`` +at the start and ``CloseRun`` at the end (success, error, or cancel) of +the wrapped plan. It also validates the schema at decoration time. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +import pytest + +from qcodes.measure_v2 import ( + CloseRun, + Emit, + Msg, + OpenRun, + PlanError, + Read, + Set, + run, +) +from qcodes.measure_v2.testing import drive_plan +from qcodes.parameters import Parameter + +if TYPE_CHECKING: + from collections.abc import Generator + + +def _trivial_plan(g, i) -> Generator[Msg, Any, None]: + yield Set(g, 1.0) + yield Read((i,)) + yield Emit() + + +def test_run_yields_openrun_first_and_closerun_last() -> None: + g = Parameter("g") + i = Parameter("i") + plan = run(name="t", setpoints=(g,), measured=(i,))(_trivial_plan(g, i)) + + result = drive_plan(plan) + + assert isinstance(result.messages[0], OpenRun) + assert isinstance(result.messages[-1], CloseRun) + assert result.messages[0].name == "t" + assert result.messages[0].setpoint_params == (g,) + assert result.messages[0].measured_params == (i,) + + +def test_run_preserves_inner_messages_in_order() -> None: + g = Parameter("g") + i = Parameter("i") + plan = run(setpoints=(g,), measured=(i,))(_trivial_plan(g, i)) + + result = drive_plan(plan) + + # OpenRun + (Set + Read + Emit) + CloseRun = 5 messages + assert len(result.messages) == 5 + assert isinstance(result.messages[1], Set) + assert isinstance(result.messages[2], Read) + assert isinstance(result.messages[3], Emit) + + +def test_run_without_schema_raises_plan_error() -> None: + g = Parameter("g") + i = Parameter("i") + plan = run(name="t")(_trivial_plan(g, i)) + with pytest.raises(PlanError, match="explicit setpoints"): + drive_plan(plan) + + +def test_run_duplicate_register_names_raises() -> None: + g1 = Parameter("dup") + g2 = Parameter("dup") + i = Parameter("i") + plan = run(setpoints=(g1, g2), measured=(i,))(_trivial_plan(g1, i)) + with pytest.raises(PlanError, match="register_name"): + drive_plan(plan) + + +def test_run_empty_inner_plan_emits_only_lifecycle() -> None: + """Empty plans yield only the OpenRun + CloseRun bookends. + + (Prior design noted that empty plans produced no events; that was + incompatible with correct exception forwarding through the decorator. + Current contract: every run() execution produces an OpenRun and a + CloseRun, even with no data.) + """ + + def _empty() -> Generator[Msg, Any, None]: + if False: + yield # pragma: no cover + + g = Parameter("g") + i = Parameter("i") + plan = run(setpoints=(g,), measured=(i,))(_empty()) + + result = drive_plan(plan) + + assert len(result.messages) == 2 + assert isinstance(result.messages[0], OpenRun) + assert isinstance(result.messages[-1], CloseRun) + assert not result.cancelled + + +def test_run_cancel_propagates_to_inner_plan_finally() -> None: + """REGRESSION: the decorator must forward CancelRequested to inner. + + A naive decorator that manually iterates ``inner.send(...) / yield msg`` + will run its own finally on cancel but won't throw into the inner + plan — so the inner plan's finally never runs and cleanup is silently + skipped. ``yield from`` is what gives us correct propagation. + """ + g = Parameter("g") + i = Parameter("i") + inner_finally_ran = [] + + def _inner() -> Generator[Msg, Any, None]: + try: + for _ in range(1000): + yield Set(g, 1.0) + yield Read((i,)) + yield Emit() + finally: + inner_finally_ran.append(True) + yield Set(g, 0.0) # cleanup message + + plan = run(setpoints=(g,), measured=(i,))(_inner()) + result = drive_plan(plan, cancel_after=4) + + assert inner_finally_ran == [True], ( + "Inner plan's finally must run when the decorator is cancelled" + ) + # The cleanup Set(g, 0.0) must appear in the message stream + cleanup_sets = [ + m + for m in result.messages + if isinstance(m, Set) and m.value == 0.0 and m.param is g + ] + assert len(cleanup_sets) == 1 + assert isinstance(result.messages[-1], CloseRun) + + +def test_run_closerun_emitted_even_on_cancel() -> None: + """CloseRun MUST appear in the message stream after a cancel.""" + g = Parameter("g") + i = Parameter("i") + + def _long_plan() -> Generator[Msg, Any, None]: + for v in range(1000): + yield Set(g, float(v)) + yield Read((i,)) + yield Emit() + + plan = run(setpoints=(g,), measured=(i,))(_long_plan()) + + result = drive_plan(plan, cancel_after=5) + + assert result.cancelled + assert isinstance(result.messages[-1], CloseRun) + + +def test_run_composition_via_yield_from_single_run() -> None: + """A plan that yield-froms an inner plan still produces ONE run.""" + g = Parameter("g") + i = Parameter("i") + + def _inner() -> Generator[Msg, Any, None]: + yield Set(g, 0.0) + yield Read((i,)) + yield Emit() + + def _outer() -> Generator[Msg, Any, None]: + yield Set(g, 1.0) + yield from _inner() + yield Set(g, 2.0) + yield Read((i,)) + yield Emit() + + plan = run(setpoints=(g,), measured=(i,))(_outer()) + result = drive_plan(plan) + + opens = [m for m in result.messages if isinstance(m, OpenRun)] + closes = [m for m in result.messages if isinstance(m, CloseRun)] + assert len(opens) == 1 + assert len(closes) == 1 + + +def test_run_passes_send_values_through_decorator() -> None: + """The .send() value (Read result) must reach the inner plan.""" + g = Parameter("g") + i = Parameter("i") + seen: list[Any] = [] + + def _adaptive() -> Generator[Msg, Any, None]: + for _ in range(3): + yield Set(g, 0.0) + r = yield Read((i,)) + seen.append(r) + yield Emit() + + plan = run(setpoints=(g,), measured=(i,))(_adaptive()) + + drive_plan(plan, on_read=lambda params: {p: 7.0 for p in params}) + + assert seen == [{i: 7.0}, {i: 7.0}, {i: 7.0}] diff --git a/tests/measure_v2/test_scan_1d_l1.py b/tests/measure_v2/test_scan_1d_l1.py new file mode 100644 index 000000000000..edc802409365 --- /dev/null +++ b/tests/measure_v2/test_scan_1d_l1.py @@ -0,0 +1,136 @@ +"""L1 tests for the ``scan_1d`` plan-builder. + +These tests exercise the plan-builder in isolation from any engine, using +the ``drive_plan`` helper. The contract under test: + +- Yields exactly ``num_points`` of (Set, [Sleep], Read, Emit) cycles. +- ALWAYS yields ``Set(sweep.param, 0.0)`` in its ``finally`` block — this + is the cancel-safety guarantee that acceptance criterion #2 relies on. +""" + +from __future__ import annotations + +import numpy as np +import pytest + +from qcodes.dataset.dond.sweeps import LinSweep +from qcodes.measure_v2 import Emit, Read, Set, Sleep, run, scan_1d +from qcodes.measure_v2.testing import drive_plan +from qcodes.parameters import Parameter + + +def test_scan_1d_yields_expected_message_count() -> None: + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=5) + + result = drive_plan(scan_1d(sweep, [i])) + + sets = [m for m in result.messages if isinstance(m, Set)] + reads = [m for m in result.messages if isinstance(m, Read)] + emits = [m for m in result.messages if isinstance(m, Emit)] + # 5 sweep Sets + 1 cleanup Set in finally + assert len(sets) == 6 + assert len(reads) == 5 + assert len(emits) == 5 + + +def test_scan_1d_set_values_match_linspace() -> None: + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, start=-1.0, stop=1.0, num_points=11) + + result = drive_plan(scan_1d(sweep, [i])) + + sets = [m for m in result.messages if isinstance(m, Set) and m.param is g] + sweep_values = [s.value for s in sets[:-1]] # last is cleanup + expected = np.linspace(-1.0, 1.0, 11) + assert sweep_values == pytest.approx(list(expected)) + + +def test_scan_1d_cleanup_set_is_zero() -> None: + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, start=0.0, stop=5.0, num_points=3) + + result = drive_plan(scan_1d(sweep, [i])) + + last_set = next(m for m in reversed(result.messages) if isinstance(m, Set)) + assert last_set.param is g + assert last_set.value == 0.0 + + +def test_scan_1d_no_sleep_when_delay_zero() -> None: + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=3, delay=0.0) + + result = drive_plan(scan_1d(sweep, [i])) + + assert not any(isinstance(m, Sleep) for m in result.messages) + + +def test_scan_1d_sleeps_when_delay_positive() -> None: + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=3, delay=0.05) + + result = drive_plan(scan_1d(sweep, [i])) + + sleeps = [m for m in result.messages if isinstance(m, Sleep)] + assert len(sleeps) == 3 + assert all(s.seconds == pytest.approx(0.05) for s in sleeps) + + +def test_scan_1d_cleanup_runs_on_cancel() -> None: + """THE acceptance contract: even a mid-sweep cancel must ramp to 0.0.""" + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=1001) + + # Cancel after the second Set (cancel_after=2 → after [Set(0.0), Read] + # of the very first point's cycle, partway through the loop). + result = drive_plan(scan_1d(sweep, [i]), cancel_after=2) + + assert result.cancelled + last_set = next(m for m in reversed(result.messages) if isinstance(m, Set)) + assert last_set.param is g + assert last_set.value == 0.0, ( + "scan_1d MUST yield Set(sweep.param, 0.0) in its finally. " + "This is the contract acceptance criterion #2 depends on." + ) + + +def test_scan_1d_multiple_measured_params_one_read_msg() -> None: + """Multiple measured params are batched into one Read message per point.""" + g = Parameter("g") + i = Parameter("i") + j = Parameter("j") + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=3) + + result = drive_plan(scan_1d(sweep, [i, j])) + + reads = [m for m in result.messages if isinstance(m, Read)] + assert len(reads) == 3 + assert all(r.params == (i, j) for r in reads) + + +def test_scan_1d_under_run_decorator_emits_lifecycle() -> None: + """End-to-end L1: run() + scan_1d together produces a valid run.""" + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=5) + + plan = run(name="t", setpoints=(g,), measured=(i,), shapes={"i": (5,)})( + scan_1d(sweep, [i]) + ) + + result = drive_plan(plan) + + # OpenRun + 5*(Set + Read + Emit) + cleanup Set + CloseRun + assert len(result.messages) == 1 + 5 * 3 + 1 + 1 + # Last message must always be CloseRun (decorator's finally) + from qcodes.measure_v2 import CloseRun, OpenRun # noqa: PLC0415 + + assert isinstance(result.messages[0], OpenRun) + assert isinstance(result.messages[-1], CloseRun) diff --git a/tests/measure_v2/test_sqlite_sink.py b/tests/measure_v2/test_sqlite_sink.py new file mode 100644 index 000000000000..67d4f277a24e --- /dev/null +++ b/tests/measure_v2/test_sqlite_sink.py @@ -0,0 +1,97 @@ +"""L3 tests for the SQLite sink and engine end-to-end with persistence.""" + +from __future__ import annotations + +import time + +import pytest + +from qcodes.dataset.dond.sweeps import LinSweep +from qcodes.measure_v2 import ( + MeasurementEngine, + SqliteSink, + run, + scan_1d, +) +from qcodes.parameters import Parameter, ParameterBase + + +@pytest.fixture +def engine_with_sqlite(empty_db) -> MeasurementEngine: + del empty_db # fixture activated for side effects + sink = SqliteSink(experiment_name="measure_v2_test", sample_name="tracer") + eng = MeasurementEngine(sinks=[sink]) + try: + yield eng + finally: + eng.shutdown(wait=True, timeout=5.0) + + +def _make_params() -> tuple[ParameterBase, ParameterBase]: + g = Parameter("g", initial_value=0.0, set_cmd=None, get_cmd=None) + i = Parameter("i", get_cmd=lambda: g.cache.get() ** 2) + return g, i + + +def test_sqlite_sink_persists_rows( + engine_with_sqlite: MeasurementEngine, +) -> None: + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=2.0, num_points=5) + plan = run(name="persisted", setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine_with_sqlite.submit(plan) + result = handle.wait(timeout=10.0) + + assert result.reason == "completed" + assert result.n_rows_emitted == 5 + + dataset = handle.dataset.result(timeout=1.0) + assert dataset is not None + assert dataset.completed + + # Read via the in-memory cache (thread-safe-ish) since the dataset's + # connection is bound to the publisher thread. + cache_data = dataset.cache.data() + # cache.data() returns {measured_param_name: {param_name: ndarray}} + assert "i" in cache_data + measured = cache_data["i"] + g_vals = list(measured["g"]) + i_vals = list(measured["i"]) + assert g_vals == pytest.approx([0.0, 0.5, 1.0, 1.5, 2.0]) + assert i_vals == pytest.approx([0.0, 0.25, 1.0, 2.25, 4.0]) + + +def test_sqlite_sink_handle_dataset_resolves( + engine_with_sqlite: MeasurementEngine, +) -> None: + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=3) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine_with_sqlite.submit(plan) + # The dataset future should resolve once RunStarted is processed. + dataset = handle.dataset.result(timeout=5.0) + assert dataset is not None + handle.wait(timeout=5.0) + + +def test_sqlite_sink_cancel_finalizes_partial_dataset( + engine_with_sqlite: MeasurementEngine, +) -> None: + g, i = _make_params() + sweep = LinSweep(g, start=0.0, stop=1.0, num_points=10000, delay=0.01) + plan = run(setpoints=(g,), measured=(i,))(scan_1d(sweep, [i])) + + handle = engine_with_sqlite.submit(plan) + time.sleep(0.05) + handle.cancel() + result = handle.wait(timeout=5.0) + + assert result.reason == "cancelled" + dataset = handle.dataset.result(timeout=1.0) + assert dataset is not None + assert dataset.completed # exit ran successfully despite cancel + cache_data = dataset.cache.data() + # Some rows were written before cancel + assert len(cache_data["i"]["g"]) >= 1 From c09ad896daf469735bfa7396721f56fccf8b9fdb Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Tue, 12 May 2026 09:59:40 +0200 Subject: [PATCH 2/5] Rename newsfragment to PR 8116 Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- docs/changes/newsfragments/{+measure_v2_tracer.new => 8116.new} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/changes/newsfragments/{+measure_v2_tracer.new => 8116.new} (100%) diff --git a/docs/changes/newsfragments/+measure_v2_tracer.new b/docs/changes/newsfragments/8116.new similarity index 100% rename from docs/changes/newsfragments/+measure_v2_tracer.new rename to docs/changes/newsfragments/8116.new From 4a6c5b9bc29751b5189ed8b798c4f91a7fd76d18 Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Tue, 12 May 2026 10:07:59 +0200 Subject: [PATCH 3/5] Add scan_nd for N-dimensional sweeps in measure_v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces scan_nd(*sweeps, measured=[...]) for nested multi-dimensional scans, the highest-value gap vs dond/do2d/doNd. Sweeps are outermost-first ordered; at each innermost setpoint the plan reads all measured parameters and emits one dataset row. Per-sweep delay and get_after_set are honored. Cleanup contract extends scan_1d's: on exit (success, error, or cancel) every swept parameter is set to 0.0, in outer-to-inner order, with the engine dispatching each cleanup Set before publishing RunStopped. scan_1d is refactored as a thin delegate to scan_nd (single-sweep case). get_after_set support — deferred from the tracer bullet — is now honored in both, inserting a Read of the swept parameter immediately after Set so the dataset row reflects the actual instrument readback. The convenience entrypoint qc.measure_v2.scan(*sweeps, measure=[...]) now accepts any number of sweeps and dispatches accordingly. The previous NotImplementedError for multi-sweep usage is removed. 20 new tests across L1 (message-stream), L2 (engine + MemorySink), and L3 (engine + SqliteSink) levels. All 70 measure_v2 tests pass. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../newsfragments/+measure_v2_scan_nd.new | 10 + src/qcodes/measure_v2/__init__.py | 3 +- src/qcodes/measure_v2/convenience.py | 35 +-- src/qcodes/measure_v2/plans/__init__.py | 4 +- src/qcodes/measure_v2/plans/scan.py | 101 ++++++-- tests/measure_v2/test_scan_nd.py | 225 ++++++++++++++++++ tests/measure_v2/test_scan_nd_l1.py | 204 ++++++++++++++++ 7 files changed, 541 insertions(+), 41 deletions(-) create mode 100644 docs/changes/newsfragments/+measure_v2_scan_nd.new create mode 100644 tests/measure_v2/test_scan_nd.py create mode 100644 tests/measure_v2/test_scan_nd_l1.py diff --git a/docs/changes/newsfragments/+measure_v2_scan_nd.new b/docs/changes/newsfragments/+measure_v2_scan_nd.new new file mode 100644 index 000000000000..296db4d033b3 --- /dev/null +++ b/docs/changes/newsfragments/+measure_v2_scan_nd.new @@ -0,0 +1,10 @@ +Added ``scan_nd`` to ``qcodes.measure_v2`` for nested multi-dimensional +sweeps. Sweeps are passed outermost-first +(``scan_nd(outer, middle, inner, measured=[...])``); the plan-builder reads +every ``measured`` parameter at each innermost setpoint and emits one +dataset row per innermost point. Per-sweep ``delay`` and ``get_after_set`` +are honored. On exit (success, error, or cancel), every swept parameter +is set to ``0.0`` — extending the ``scan_1d`` cancel-safety contract to +N dimensions. The convenience entrypoint ``qc.measure_v2.scan(*sweeps, +measure=[...])`` now accepts any number of sweeps and dispatches to +``scan_1d`` or ``scan_nd`` accordingly. diff --git a/src/qcodes/measure_v2/__init__.py b/src/qcodes/measure_v2/__init__.py index 7c71c1a2e22d..687cebb48240 100644 --- a/src/qcodes/measure_v2/__init__.py +++ b/src/qcodes/measure_v2/__init__.py @@ -34,7 +34,7 @@ Set, Sleep, ) -from qcodes.measure_v2.plans import scan_1d +from qcodes.measure_v2.plans import scan_1d, scan_nd from qcodes.measure_v2.sinks import DataSink, MemorySink, SqliteSink, is_critical __all__ = [ @@ -65,4 +65,5 @@ "run", "scan", "scan_1d", + "scan_nd", ] diff --git a/src/qcodes/measure_v2/convenience.py b/src/qcodes/measure_v2/convenience.py index b944e60468a3..9f8a8388d317 100644 --- a/src/qcodes/measure_v2/convenience.py +++ b/src/qcodes/measure_v2/convenience.py @@ -14,7 +14,7 @@ from qcodes.measure_v2.decorators import run from qcodes.measure_v2.engine import MeasurementEngine, RunHandle -from qcodes.measure_v2.plans import scan_1d +from qcodes.measure_v2.plans import scan_1d, scan_nd from qcodes.measure_v2.sinks import SqliteSink if TYPE_CHECKING: @@ -71,16 +71,18 @@ def scan( exp: Experiment | None = None, engine: MeasurementEngine | None = None, ) -> DataSetProtocol | RunHandle | None: - """Run a scan. + """Run a 1D or N-D scan. - Tracer scope: one sweep only. The sweep parameter is set across its - setpoints; ``measure`` parameters are read at each point; a row is - emitted per point. On exit (success, error, or cancel), the swept - parameter is set back to 0.0 (the ``scan_1d`` cleanup contract). + For a single sweep, delegates to :py:func:`scan_1d`. For two or more + sweeps, delegates to :py:func:`scan_nd` (outermost-first ordering). + Each ``sweeps[k]`` is set across its setpoints; ``measure`` parameters + are read at each innermost point; a row is emitted per innermost point. + On exit (success, error, or cancel), every swept parameter is set + back to ``0.0`` — the ``scan_nd`` cleanup contract. Args: - *sweeps: Sweeps to perform. Currently exactly one sweep is required. - measure: Parameters to read at each setpoint. + *sweeps: One or more sweeps to perform, outermost-first. + measure: Parameters to read at each innermost setpoint. wait: If ``True`` (default), block until the run completes and return the resulting dataset. If ``False``, return the :py:class:`RunHandle` immediately for non-blocking workflows. @@ -95,24 +97,25 @@ def scan( - If ``wait=False``: a :py:class:`RunHandle` for the running submission. """ - if len(sweeps) != 1: - raise NotImplementedError( - "measure_v2.scan currently supports exactly one sweep " - f"(got {len(sweeps)}). Multi-dimensional scans are planned for v1." - ) + if len(sweeps) == 0: + raise ValueError("scan requires at least one sweep") - sweep = sweeps[0] eng = engine if engine is not None else default_engine() - setpoints = (sweep.param,) + setpoints = tuple(s.param for s in sweeps) measured = tuple(measure) + if len(sweeps) == 1: + inner_plan = scan_1d(sweeps[0], measured) + else: + inner_plan = scan_nd(*sweeps, measured=measured) + plan = run( name=name, exp=exp, setpoints=setpoints, measured=measured, - )(scan_1d(sweep, measured)) + )(inner_plan) handle = eng.submit(plan) if not wait: diff --git a/src/qcodes/measure_v2/plans/__init__.py b/src/qcodes/measure_v2/plans/__init__.py index 07043b6df2a8..9e5c18bd426f 100644 --- a/src/qcodes/measure_v2/plans/__init__.py +++ b/src/qcodes/measure_v2/plans/__init__.py @@ -5,6 +5,6 @@ to open the run. """ -from qcodes.measure_v2.plans.scan import scan_1d +from qcodes.measure_v2.plans.scan import scan_1d, scan_nd -__all__ = ["scan_1d"] +__all__ = ["scan_1d", "scan_nd"] diff --git a/src/qcodes/measure_v2/plans/scan.py b/src/qcodes/measure_v2/plans/scan.py index f1d4d7761ef0..5ed90683ba5e 100644 --- a/src/qcodes/measure_v2/plans/scan.py +++ b/src/qcodes/measure_v2/plans/scan.py @@ -21,33 +21,90 @@ def scan_1d( ) -> Generator[Msg, Any, None]: """Sweep one parameter, reading measured parameters at each setpoint. - Yields one ``Set``/(optional ``Sleep``)/``Read``/``Emit`` cycle per - setpoint. In its ``finally`` block, **always** yields a final ``Set`` - that drives the swept parameter to ``ramp_down_value`` (default 0.0). - This guarantee is the basis of the tracer's cancel-safety contract: - a cancelled scan returns the swept parameter to a known state. + Thin wrapper around :py:func:`scan_nd` for the 1D case. See + :py:func:`scan_nd` for the full contract; the only difference is that + ``scan_1d`` takes the sweep as a positional argument (matching the + single-sweep idiom) instead of as a variadic. - The plan-builder does **not** yield ``OpenRun``/``CloseRun``; wrap with - :py:func:`qcodes.measure_v2.run` to open a run for the dataset. + The mandatory ramp-to-``ramp_down_value`` cleanup on exit (success, + error, or cancel) is the architecture's safety guarantee — see DESIGN.md. + """ + yield from scan_nd(sweep, measured=measured, ramp_down_value=ramp_down_value) + + +def scan_nd( + *sweeps: AbstractSweep, + measured: Sequence[ParameterBase], + ramp_down_value: float = 0.0, +) -> Generator[Msg, Any, None]: + """Nested sweep over one or more parameters. + + Sweep ordering is outermost-first: ``scan_nd(outer, inner, measured=[i])`` + drives ``outer`` once per ``inner`` cycle. At each innermost point the + plan reads every measured parameter and emits one dataset row. + + Per-sweep behavior at every setpoint: + + 1. ``Set(sweep.param, value)`` — drive the instrument + 2. ``Sleep(sweep.delay)`` if ``sweep.delay > 0`` + 3. ``Read((sweep.param,))`` if ``sweep.get_after_set`` — readback wins + over the requested value in the dataset row. + + On the innermost sweep, after the optional readback: + + 4. ``Read(measured)`` — single batched read of all measured parameters + 5. ``Emit()`` — produce one row from the engine's state cache + + Mandatory cleanup (``finally``): every swept parameter is set to + ``ramp_down_value`` (default ``0.0``), in outer→inner order. This is + the architecture's cancel-safety contract: on cancel (or error), the + instrument state is restored to a known value before the run is + reported stopped. Args: - sweep: The :py:class:`~qcodes.dataset.dond.sweeps.AbstractSweep` - describing the swept parameter and its setpoint values. - measured: Parameters to read at each setpoint. - ramp_down_value: Value to set the swept parameter to on exit - (success, error, or cancel). Defaults to 0.0. + *sweeps: One or more :py:class:`~qcodes.dataset.dond.sweeps.AbstractSweep` + instances. The first argument is the outermost loop; the last + is the innermost. + measured: Parameters to read at each innermost setpoint. + ramp_down_value: Value each swept parameter is set to on exit. - Yields: - Plan messages. + Raises: + ValueError: If no sweeps are provided. """ + if not sweeps: + raise ValueError("scan_nd requires at least one sweep") + + measured_tuple = tuple(measured) try: - for v in sweep.get_setpoints(): - yield Set(sweep.param, float(v)) - if sweep.delay > 0: - yield Sleep(sweep.delay) - yield Read(tuple(measured)) - yield Emit() + yield from _scan_recursive(sweeps, measured_tuple, depth=0) finally: - # Mandatory cleanup: leave the swept parameter at a known value. - yield Set(sweep.param, ramp_down_value) + # Ramp every swept parameter to the safe value, outer-to-inner. + # If the plan is being cancelled, these Sets are dispatched in + # order by the engine before RunStopped is published. + for sweep in sweeps: + yield Set(sweep.param, ramp_down_value) + + +def _scan_recursive( + sweeps: tuple[AbstractSweep, ...], + measured: tuple[ParameterBase, ...], + *, + depth: int, +) -> Generator[Msg, Any, None]: + sweep = sweeps[depth] + is_innermost = depth == len(sweeps) - 1 + for v in sweep.get_setpoints(): + yield Set(sweep.param, float(v)) + if sweep.delay > 0: + yield Sleep(sweep.delay) + if sweep.get_after_set: + # Readback overwrites state[sweep.param] with the actual + # instrument value, so the dataset row reflects reality. + yield Read((sweep.param,)) + + if is_innermost: + yield Read(measured) + yield Emit() + else: + yield from _scan_recursive(sweeps, measured, depth=depth + 1) diff --git a/tests/measure_v2/test_scan_nd.py b/tests/measure_v2/test_scan_nd.py new file mode 100644 index 000000000000..6c064a542531 --- /dev/null +++ b/tests/measure_v2/test_scan_nd.py @@ -0,0 +1,225 @@ +"""L2 / L3 tests for ``scan_nd`` through the engine + sinks.""" + +from __future__ import annotations + +import time + +import pytest + +from qcodes.dataset.dond.sweeps import LinSweep +from qcodes.measure_v2 import ( + MeasurementEngine, + MemorySink, + RowEmitted, + RunStarted, + RunStopped, + SqliteSink, + run, + scan, + scan_nd, +) +from qcodes.parameters import Parameter, ParameterBase + + +def _make_params() -> tuple[ParameterBase, ParameterBase, ParameterBase]: + g = Parameter("g", initial_value=0.0, set_cmd=None, get_cmd=None) + h = Parameter("h", initial_value=0.0, set_cmd=None, get_cmd=None) + # i depends on both g and h + i = Parameter("i", get_cmd=lambda: g.cache.get() ** 2 + h.cache.get()) + return g, h, i + + +# ---------------------------------------------------------------------------- +# L2 — MemorySink +# ---------------------------------------------------------------------------- + + +@pytest.fixture +def memory_engine() -> MeasurementEngine: + sink = MemorySink() + eng = MeasurementEngine(sinks=[sink]) + eng._test_sink = sink # type: ignore[attr-defined] + try: + yield eng + finally: + eng.shutdown(wait=True, timeout=5.0) + + +def test_scan_nd_2d_emits_n_times_m_rows(memory_engine: MeasurementEngine) -> None: + g, h, i = _make_params() + outer = LinSweep(g, 0.0, 1.0, 3) + inner = LinSweep(h, 0.0, 1.0, 4) + plan = run(setpoints=(g, h), measured=(i,))(scan_nd(outer, inner, measured=[i])) + + handle = memory_engine.submit(plan) + result = handle.wait(timeout=10.0) + + assert result.reason == "completed" + assert result.n_rows_emitted == 3 * 4 + sink: MemorySink = memory_engine._test_sink # type: ignore[attr-defined] + rows = [e for e in sink.events if isinstance(e, RowEmitted)] + assert len(rows) == 3 * 4 + + +def test_scan_nd_2d_snapshot_contains_both_setpoints( + memory_engine: MeasurementEngine, +) -> None: + g, h, i = _make_params() + outer = LinSweep(g, 0.0, 1.0, 3) + inner = LinSweep(h, 0.0, 2.0, 3) + plan = run(setpoints=(g, h), measured=(i,))(scan_nd(outer, inner, measured=[i])) + + memory_engine.submit(plan).wait(timeout=10.0) + + sink: MemorySink = memory_engine._test_sink # type: ignore[attr-defined] + rows = [e for e in sink.events if isinstance(e, RowEmitted)] + # Every row's snapshot includes g, h, and i + for row in rows: + assert g in row.snapshot + assert h in row.snapshot + assert i in row.snapshot + # First row: g=0, h=0, i=0 + # Last row: g=1, h=2, i=1+2=3 + assert rows[0].snapshot[g] == pytest.approx(0.0) + assert rows[0].snapshot[h] == pytest.approx(0.0) + assert rows[-1].snapshot[g] == pytest.approx(1.0) + assert rows[-1].snapshot[h] == pytest.approx(2.0) + assert rows[-1].snapshot[i] == pytest.approx(1.0 + 2.0) + + +def test_scan_nd_3d_emits_product_of_lengths(memory_engine: MeasurementEngine) -> None: + g, h, _ = _make_params() + k = Parameter("k", initial_value=0.0, set_cmd=None, get_cmd=None) + i = Parameter("i", get_cmd=lambda: g.cache.get() + h.cache.get() + k.cache.get()) + s1 = LinSweep(g, 0.0, 1.0, 2) + s2 = LinSweep(h, 0.0, 1.0, 3) + s3 = LinSweep(k, 0.0, 1.0, 4) + plan = run(setpoints=(g, h, k), measured=(i,))(scan_nd(s1, s2, s3, measured=[i])) + + memory_engine.submit(plan).wait(timeout=10.0) + + sink: MemorySink = memory_engine._test_sink # type: ignore[attr-defined] + assert sum(1 for e in sink.events if isinstance(e, RowEmitted)) == 2 * 3 * 4 + + +def test_scan_nd_cancel_ramps_all_sweep_params_to_zero( + memory_engine: MeasurementEngine, +) -> None: + g, h, i = _make_params() + outer = LinSweep(g, 0.0, 1.0, 100, delay=0.005) + inner = LinSweep(h, 0.0, 1.0, 100, delay=0.005) + plan = run(setpoints=(g, h), measured=(i,))(scan_nd(outer, inner, measured=[i])) + + handle = memory_engine.submit(plan) + time.sleep(0.1) + handle.cancel() + result = handle.wait(timeout=5.0) + + assert result.reason == "cancelled" + # Both swept params must be back at 0.0 + assert g.cache.get() == pytest.approx(0.0) + assert h.cache.get() == pytest.approx(0.0) + + +def test_scan_nd_get_after_set_uses_readback_in_snapshot( + memory_engine: MeasurementEngine, +) -> None: + """A param with set_parser/non-identity readback shows the readback in rows.""" + # Build a parameter whose get() returns its cache value rounded — so + # setting 0.37 with get_after_set=True stores 0.37 (since cache=0.37 → round=0). + # Actually let's keep it simple: set_cmd doubles the value internally. + state = {"v": 0.0} + + def _set(val): + state["v"] = val * 2.0 # the "instrument" applies 2x + + def _get(): + return state["v"] + + g = Parameter("g", set_cmd=_set, get_cmd=_get) + i = Parameter("i", get_cmd=lambda: 0.0) + sweep = LinSweep(g, 1.0, 3.0, 3, get_after_set=True) + plan = run(setpoints=(g,), measured=(i,))(scan_nd(sweep, measured=[i])) + + memory_engine.submit(plan).wait(timeout=5.0) + + sink: MemorySink = memory_engine._test_sink # type: ignore[attr-defined] + rows = [e for e in sink.events if isinstance(e, RowEmitted)] + # The dataset rows reflect what the "instrument" actually has: 2*set_val. + g_in_rows = [r.snapshot[g] for r in rows] + assert g_in_rows == pytest.approx([2.0, 4.0, 6.0]) + + +def test_scan_nd_event_ordering(memory_engine: MeasurementEngine) -> None: + g, h, i = _make_params() + outer = LinSweep(g, 0.0, 1.0, 2) + inner = LinSweep(h, 0.0, 1.0, 3) + plan = run(setpoints=(g, h), measured=(i,))(scan_nd(outer, inner, measured=[i])) + + memory_engine.submit(plan).wait(timeout=5.0) + + sink: MemorySink = memory_engine._test_sink # type: ignore[attr-defined] + assert isinstance(sink.events[0], RunStarted) + assert isinstance(sink.events[-1], RunStopped) + middle = sink.events[1:-1] + assert all(isinstance(e, RowEmitted) for e in middle) + + +# ---------------------------------------------------------------------------- +# L3 — SqliteSink (persistence) +# ---------------------------------------------------------------------------- + + +@pytest.fixture +def sqlite_engine(empty_db) -> MeasurementEngine: + del empty_db + sink = SqliteSink(experiment_name="measure_v2_2d_test", sample_name="tracer") + eng = MeasurementEngine(sinks=[sink]) + try: + yield eng + finally: + eng.shutdown(wait=True, timeout=5.0) + + +def test_scan_nd_2d_persists_to_sqlite(sqlite_engine: MeasurementEngine) -> None: + g, h, i = _make_params() + outer = LinSweep(g, 0.0, 1.0, 3) + inner = LinSweep(h, 0.0, 1.0, 4) + plan = run(setpoints=(g, h), measured=(i,))(scan_nd(outer, inner, measured=[i])) + + handle = sqlite_engine.submit(plan) + handle.wait(timeout=10.0) + + ds = handle.dataset.result(timeout=1.0) + assert ds is not None + assert ds.completed + cache = ds.cache.data() + assert len(cache["i"]["g"]) == 3 * 4 + assert len(cache["i"]["h"]) == 3 * 4 + assert len(cache["i"]["i"]) == 3 * 4 + + +def test_convenience_scan_dispatches_to_scan_nd( + sqlite_engine: MeasurementEngine, +) -> None: + """``qc.measure_v2.scan(sweep1, sweep2, measure=[...])`` runs scan_nd.""" + g, h, i = _make_params() + + ds = scan( + LinSweep(g, 0.0, 1.0, 3), + LinSweep(h, 0.0, 1.0, 4), + measure=[i], + name="conv-2d", + engine=sqlite_engine, + ) + assert ds is not None + cache = ds.cache.data() + assert len(cache["i"]["g"]) == 3 * 4 + + +def test_convenience_scan_with_zero_sweeps_raises() -> None: + """No-sweep scan is a usage error.""" + g, h, i = _make_params() + del g, h + with pytest.raises(ValueError, match="at least one sweep"): + scan(measure=[i]) diff --git a/tests/measure_v2/test_scan_nd_l1.py b/tests/measure_v2/test_scan_nd_l1.py new file mode 100644 index 000000000000..e6006abac3d5 --- /dev/null +++ b/tests/measure_v2/test_scan_nd_l1.py @@ -0,0 +1,204 @@ +"""L1 tests for ``scan_nd``.""" + +from __future__ import annotations + +import numpy as np +import pytest + +from qcodes.dataset.dond.sweeps import LinSweep +from qcodes.measure_v2 import Emit, Read, Set, Sleep, scan_nd +from qcodes.measure_v2.testing import drive_plan +from qcodes.parameters import Parameter + + +def test_scan_nd_requires_at_least_one_sweep() -> None: + with pytest.raises(ValueError, match="at least one sweep"): + list(scan_nd(measured=[])) + + +def test_scan_nd_1d_equivalent_to_scan_1d() -> None: + """A single-sweep scan_nd matches the structure of scan_1d.""" + from qcodes.measure_v2 import scan_1d # noqa: PLC0415 + + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, 0.0, 1.0, 5) + + a = drive_plan(scan_nd(sweep, measured=[i])) + b = drive_plan(scan_1d(sweep, [i])) + + # Same message types in the same positions (values may differ trivially). + assert [type(m) for m in a.messages] == [type(m) for m in b.messages] + + +def test_scan_nd_2d_message_counts() -> None: + g = Parameter("g") + h = Parameter("h") + i = Parameter("i") + outer = LinSweep(g, 0.0, 1.0, 3) + inner = LinSweep(h, -1.0, 1.0, 4) + + result = drive_plan(scan_nd(outer, inner, measured=[i])) + + sets = [m for m in result.messages if isinstance(m, Set)] + reads = [m for m in result.messages if isinstance(m, Read)] + emits = [m for m in result.messages if isinstance(m, Emit)] + + # 3 outer Sets + 3*4 inner Sets + 2 cleanup Sets (g, h) + assert len(sets) == 3 + 3 * 4 + 2 + # One Read per innermost point + assert len(reads) == 3 * 4 + # One Emit per innermost point + assert len(emits) == 3 * 4 + + +def test_scan_nd_3d_message_counts() -> None: + a = Parameter("a") + b = Parameter("b") + c = Parameter("c") + i = Parameter("i") + s1 = LinSweep(a, 0.0, 1.0, 2) + s2 = LinSweep(b, 0.0, 1.0, 3) + s3 = LinSweep(c, 0.0, 1.0, 4) + + result = drive_plan(scan_nd(s1, s2, s3, measured=[i])) + + emits = [m for m in result.messages if isinstance(m, Emit)] + reads = [m for m in result.messages if isinstance(m, Read)] + assert len(emits) == 2 * 3 * 4 + assert len(reads) == 2 * 3 * 4 + + +def test_scan_nd_outer_set_appears_before_inner_sets() -> None: + """For each outer point, all inner points run before the next outer Set.""" + g = Parameter("g") # outer + h = Parameter("h") # inner + i = Parameter("i") + outer = LinSweep(g, 0.0, 1.0, 3) + inner = LinSweep(h, 0.0, 1.0, 4) + + result = drive_plan(scan_nd(outer, inner, measured=[i])) + + # Walk the messages: each outer Set should be followed by 4 inner Sets + # before the next outer Set. + g_sets = [ + idx + for idx, m in enumerate(result.messages) + if isinstance(m, Set) and m.param is g + ] + # Three sweep Sets + one cleanup Set + assert len(g_sets) == 4 + # Between consecutive sweep Sets of g, h should be Set 4 times + for k in range(3): + between = result.messages[g_sets[k] + 1 : g_sets[k + 1]] + h_sets_between = [m for m in between if isinstance(m, Set) and m.param is h] + if k < 3 - 1: + # interior gap: 4 inner sets + assert len(h_sets_between) == 4 + # The last gap (after last outer sweep Set) contains the inner sweep + # plus the cleanup h Set — drop check; tested elsewhere. + + +def test_scan_nd_cleanup_ramps_all_sweep_params_to_zero() -> None: + g = Parameter("g") + h = Parameter("h") + i = Parameter("i") + outer = LinSweep(g, 0.0, 5.0, 4) + inner = LinSweep(h, 0.0, 5.0, 4) + + result = drive_plan(scan_nd(outer, inner, measured=[i])) + + # Last two Sets in the stream must be (g→0, h→0) in outer-to-inner order. + sets_at_zero = [m for m in result.messages if isinstance(m, Set) and m.value == 0.0] + # Among those, the LAST two are the cleanup. + cleanup = sets_at_zero[-2:] + assert cleanup[0].param is g + assert cleanup[1].param is h + + +def test_scan_nd_cleanup_runs_on_cancel() -> None: + g = Parameter("g") + h = Parameter("h") + i = Parameter("i") + outer = LinSweep(g, 0.0, 1.0, 100) + inner = LinSweep(h, 0.0, 1.0, 100) + + result = drive_plan(scan_nd(outer, inner, measured=[i]), cancel_after=5) + + assert result.cancelled + # The last messages must be the two cleanup Sets. + last_two_sets = [m for m in result.messages if isinstance(m, Set)][-2:] + assert last_two_sets[0].param is g + assert last_two_sets[0].value == 0.0 + assert last_two_sets[1].param is h + assert last_two_sets[1].value == 0.0 + + +def test_scan_nd_sleeps_on_each_sweep_with_positive_delay() -> None: + g = Parameter("g") + h = Parameter("h") + i = Parameter("i") + # Outer has delay; inner has no delay + outer = LinSweep(g, 0.0, 1.0, 3, delay=0.05) + inner = LinSweep(h, 0.0, 1.0, 4, delay=0.0) + + result = drive_plan(scan_nd(outer, inner, measured=[i])) + + sleeps = [m for m in result.messages if isinstance(m, Sleep)] + # Sleep only after outer Set: 3 of them, each 0.05s + assert len(sleeps) == 3 + assert all(s.seconds == pytest.approx(0.05) for s in sleeps) + + +def test_scan_nd_get_after_set_inserts_readback() -> None: + """When sweep.get_after_set is True, scan_nd yields Read((param,)) after Set.""" + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, 0.0, 1.0, 3, get_after_set=True) + + result = drive_plan(scan_nd(sweep, measured=[i])) + + # Each sweep iteration: Set(g) → Read((g,)) → Read((i,)) → Emit + # So we have 3 Reads of (g,) and 3 Reads of (i,) — 6 total. + g_reads = [m for m in result.messages if isinstance(m, Read) and m.params == (g,)] + i_reads = [m for m in result.messages if isinstance(m, Read) and m.params == (i,)] + assert len(g_reads) == 3 + assert len(i_reads) == 3 + + +def test_scan_nd_get_after_set_off_no_readback() -> None: + g = Parameter("g") + i = Parameter("i") + sweep = LinSweep(g, 0.0, 1.0, 3, get_after_set=False) + + result = drive_plan(scan_nd(sweep, measured=[i])) + + # No standalone (g,) Read should appear. + g_only_reads = [ + m for m in result.messages if isinstance(m, Read) and m.params == (g,) + ] + assert g_only_reads == [] + + +def test_scan_nd_set_values_are_correct() -> None: + """Verify the actual sweep values land in the Set messages.""" + g = Parameter("g") + h = Parameter("h") + i = Parameter("i") + outer = LinSweep(g, 0.0, 1.0, 3) + inner = LinSweep(h, -1.0, 1.0, 2) + + result = drive_plan(scan_nd(outer, inner, measured=[i])) + + # The last Set of each sweep param is the cleanup ramp to 0.0; + # everything before that is the sweep itself. + g_sets = [m for m in result.messages if isinstance(m, Set) and m.param is g] + h_sets = [m for m in result.messages if isinstance(m, Set) and m.param is h] + # 3 sweep points + 1 cleanup for g + g_sweep_values = [s.value for s in g_sets[:-1]] + # 3 outer * 2 inner = 6 sweep points + 1 cleanup for h + h_sweep_values = [s.value for s in h_sets[:-1]] + expected_g = list(np.linspace(0.0, 1.0, 3)) + expected_h = list(np.linspace(-1.0, 1.0, 2)) * 3 + assert g_sweep_values == pytest.approx(expected_g) + assert h_sweep_values == pytest.approx(expected_h) From 3593ccc726c0cb389480fb61d642884208da9982 Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Tue, 12 May 2026 10:18:15 +0200 Subject: [PATCH 4/5] Add measure_v2 tutorial notebook Walks through qcodes.measure_v2 end-to-end with executable examples: 1. Setup (temp database, software-only parameters) 2. Simple 1D blocking scan via qc.measure_v2.scan() 3. Non-blocking submission with wait=False; peeking partial data 4. Cancellation with verified cleanup (g ramps back to 0) 5. 2D scan via scan_nd 6. Writing a custom plan-builder (bisection search) with try/finally 7. Custom sinks: MemorySink for inspection, plain-function sink for live progress 8. Unit-testing plan-builders without an engine (drive_plan helper) 9. Caveats and v1 backlog All cells executed; the cancel-safety guarantee and bisection convergence are demonstrated with real output values in the saved notebook. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../DataSet/measure_v2_tutorial.ipynb | 726 ++++++++++++++++++ 1 file changed, 726 insertions(+) create mode 100644 docs/examples/DataSet/measure_v2_tutorial.ipynb diff --git a/docs/examples/DataSet/measure_v2_tutorial.ipynb b/docs/examples/DataSet/measure_v2_tutorial.ipynb new file mode 100644 index 000000000000..d3f241dce579 --- /dev/null +++ b/docs/examples/DataSet/measure_v2_tutorial.ipynb @@ -0,0 +1,726 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "34c5e595", + "metadata": {}, + "source": [ + "# Tutorial: `qcodes.measure_v2` (experimental)\n", + "\n", + "This notebook walks through the experimental parallel measurement API in\n", + "QCoDeS, `qcodes.measure_v2`. It is a complement to (not a replacement for)\n", + "the existing `Measurement` / `dond` workflow.\n", + "\n", + "**What's new in `measure_v2`:**\n", + "\n", + "- **Plans are data, not for-loops.** A \"plan\" is a Python generator that\n", + " yields typed messages (`Set`, `Read`, `Sleep`, `Emit`, …). A\n", + " `MeasurementEngine` drives the plan on a dedicated worker thread.\n", + "- **Non-blocking submission.** `engine.submit(plan)` returns immediately\n", + " with a `RunHandle`. The notebook kernel stays responsive while the scan\n", + " runs.\n", + "- **Cancellation with guaranteed cleanup.** `handle.cancel()` runs the\n", + " plan's `try/finally` blocks, so user-defined cleanup (e.g., ramping a\n", + " gate back to 0) always executes — even on Ctrl-C.\n", + "- **Pluggable sinks.** A \"sink\" is just a callable that consumes events\n", + " (`RunStarted`, `RowEmitted`, `RunStopped`). The default sink writes to\n", + " the existing QCoDeS SQLite dataset; custom sinks can drive live plots,\n", + " network dashboards, ML feedback loops, etc.\n", + "\n", + "**Stability warning:** the `measure_v2` package is experimental. The API\n", + "may change in incompatible ways between releases. For production use,\n", + "stick with `Measurement` / `dond`.\n", + "\n", + "The architecture document lives at `src/qcodes/measure_v2/DESIGN.md`." + ] + }, + { + "cell_type": "markdown", + "id": "745cba7c", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "We'll use a temporary database file and a couple of software-only\n", + "parameters so the notebook is fully self-contained — no instruments\n", + "required." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "b8694eb6", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:29.173248Z", + "iopub.status.busy": "2026-05-12T08:13:29.173248Z", + "iopub.status.idle": "2026-05-12T08:13:31.214997Z", + "shell.execute_reply": "2026-05-12T08:13:31.214997Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Database: C:\\Users\\jenielse\\AppData\\Local\\Temp\\measure_v2_tutorial_s4_9ya6r\\tutorial.db\n" + ] + } + ], + "source": [ + "import os\n", + "import tempfile\n", + "import time\n", + "\n", + "import numpy as np\n", + "\n", + "import qcodes as qc\n", + "from qcodes import measure_v2 as mv2\n", + "from qcodes.dataset.dond.sweeps import LinSweep\n", + "from qcodes.dataset.sqlite.database import initialise_or_create_database_at\n", + "\n", + "# Temporary database for this notebook\n", + "tmpdir = tempfile.mkdtemp(prefix=\"measure_v2_tutorial_\")\n", + "db_path = os.path.join(tmpdir, \"tutorial.db\")\n", + "qc.config[\"core\"][\"db_location\"] = db_path\n", + "initialise_or_create_database_at(db_path)\n", + "print(f\"Database: {db_path}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "6e7ce2ea", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:31.217763Z", + "iopub.status.busy": "2026-05-12T08:13:31.217763Z", + "iopub.status.idle": "2026-05-12T08:13:31.224232Z", + "shell.execute_reply": "2026-05-12T08:13:31.224232Z" + } + }, + "outputs": [], + "source": [ + "# Software-only parameters. ``i`` is a function of ``g`` and ``h``,\n", + "# mimicking an instrument whose measurement depends on the gate voltages.\n", + "g = qc.Parameter(\"g\", initial_value=0.0, set_cmd=None, get_cmd=None)\n", + "h = qc.Parameter(\"h\", initial_value=0.0, set_cmd=None, get_cmd=None)\n", + "i = qc.Parameter(\"i\", get_cmd=lambda: np.sin(g.cache.get()) * np.cos(h.cache.get()))" + ] + }, + { + "cell_type": "markdown", + "id": "f9d68876", + "metadata": {}, + "source": [ + "## 1. The simplest case: a blocking 1D scan\n", + "\n", + "`qc.measure_v2.scan(...)` mirrors the role of `do1d` / `do2d` / `dond`.\n", + "By default it blocks until the run completes and returns the resulting\n", + "dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cbda23bd", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:31.226999Z", + "iopub.status.busy": "2026-05-12T08:13:31.226999Z", + "iopub.status.idle": "2026-05-12T08:13:31.285725Z", + "shell.execute_reply": "2026-05-12T08:13:31.285725Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting experimental run with id: 1. \n", + "DataSet\n", + "{'i': {'i': array([ 0.00000000e+00, 3.09016994e-01, 5.87785252e-01, 8.09016994e-01,\n", + " 9.51056516e-01, 1.00000000e+00, 9.51056516e-01, 8.09016994e-01,\n", + " 5.87785252e-01, 3.09016994e-01, 1.22464680e-16, -3.09016994e-01,\n", + " -5.87785252e-01, -8.09016994e-01, -9.51056516e-01, -1.00000000e+00,\n", + " -9.51056516e-01, -8.09016994e-01, -5.87785252e-01, -3.09016994e-01,\n", + " -2.44929360e-16]), 'g': array([0. , 0.31415927, 0.62831853, 0.9424778 , 1.25663706,\n", + " 1.57079633, 1.88495559, 2.19911486, 2.51327412, 2.82743339,\n", + " 3.14159265, 3.45575192, 3.76991118, 4.08407045, 4.39822972,\n", + " 4.71238898, 5.02654825, 5.34070751, 5.65486678, 5.96902604,\n", + " 6.28318531])}}\n" + ] + } + ], + "source": [ + "ds = mv2.scan(\n", + " LinSweep(g, 0.0, 2 * np.pi, 21),\n", + " measure=[i],\n", + " name=\"1d_blocking\",\n", + ")\n", + "\n", + "# The returned object is a standard QCoDeS DataSet — works with all\n", + "# existing plotting and analysis tools.\n", + "print(type(ds).__name__)\n", + "print(ds.cache.data())" + ] + }, + { + "cell_type": "markdown", + "id": "7e15ac95", + "metadata": {}, + "source": [ + "## 2. Non-blocking submission with `wait=False`\n", + "\n", + "Pass `wait=False` to keep the kernel responsive while a long scan runs.\n", + "You get a `RunHandle` back instead of a dataset. The handle exposes\n", + "`wait()`, `cancel()`, `status`, and futures for the dataset and final\n", + "result." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ab6fb703", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:31.288492Z", + "iopub.status.busy": "2026-05-12T08:13:31.288492Z", + "iopub.status.idle": "2026-05-12T08:13:34.415084Z", + "shell.execute_reply": "2026-05-12T08:13:34.415084Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "status while running: running\n", + "Starting experimental run with id: 2. \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "rows so far: 32\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "final reason: completed, n_rows: 200\n" + ] + } + ], + "source": [ + "handle = mv2.scan(\n", + " LinSweep(g, 0.0, 2 * np.pi, 200, delay=0.01),\n", + " measure=[i],\n", + " name=\"1d_nonblocking\",\n", + " wait=False,\n", + ")\n", + "print(\"status while running:\", handle.status)\n", + "\n", + "# The notebook kernel can do other work here — e.g., peek partial data\n", + "# via the in-memory cache.\n", + "time.sleep(0.5)\n", + "partial_ds = handle.dataset.result(timeout=1.0)\n", + "print(f\"rows so far: {len(partial_ds.cache.data()['i']['g'])}\")\n", + "\n", + "# Then wait for it to complete.\n", + "result = handle.wait(timeout=30.0)\n", + "print(f\"final reason: {result.reason}, n_rows: {result.n_rows_emitted}\")" + ] + }, + { + "cell_type": "markdown", + "id": "0307497f", + "metadata": {}, + "source": [ + "## 3. Cancellation with safe cleanup\n", + "\n", + "The architecture's central safety guarantee: when you cancel a run,\n", + "the plan's `try/finally` blocks always run. For built-in plan-builders\n", + "like `scan_1d` and `scan_nd`, the `finally` block ramps every swept\n", + "parameter back to `0.0` before reporting the run stopped.\n", + "\n", + "This is verifiable: after a mid-flight cancel, the swept parameter's\n", + "cache shows `0.0`, not the value it was at when we cancelled." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8e2085c9", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:34.417855Z", + "iopub.status.busy": "2026-05-12T08:13:34.417855Z", + "iopub.status.idle": "2026-05-12T08:13:34.483887Z", + "shell.execute_reply": "2026-05-12T08:13:34.483887Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting experimental run with id: 3. \n", + "g during scan: 0.004\n", + "reason: cancelled\n", + "g after cancel: 0.000 (back to 0 — finally ran)\n", + "partial dataset rows: 2\n" + ] + } + ], + "source": [ + "# Start a long scan, drive g away from 0.\n", + "handle = mv2.scan(\n", + " LinSweep(g, 0.0, 2.0, 1000, delay=0.01),\n", + " measure=[i],\n", + " name=\"cancel_safety_demo\",\n", + " wait=False,\n", + ")\n", + "time.sleep(0.05)\n", + "\n", + "# Mid-flight: g is somewhere between 0 and 2.\n", + "print(f\"g during scan: {g.cache.get():.3f}\")\n", + "\n", + "# Cancel. The plan's finally block runs and ramps g back to 0.\n", + "handle.cancel()\n", + "result = handle.wait(timeout=5.0)\n", + "\n", + "print(f\"reason: {result.reason}\")\n", + "print(f\"g after cancel: {g.cache.get():.3f} (back to 0 — finally ran)\")\n", + "print(f\"partial dataset rows: {result.n_rows_emitted}\")" + ] + }, + { + "cell_type": "markdown", + "id": "3dcbd8ef", + "metadata": {}, + "source": [ + "## 4. N-dimensional scans with `scan_nd`\n", + "\n", + "For multi-dimensional scans, just pass more sweeps. Outermost-first\n", + "ordering: the first sweep is the slowest axis." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "9d5292f9", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:34.486159Z", + "iopub.status.busy": "2026-05-12T08:13:34.486159Z", + "iopub.status.idle": "2026-05-12T08:13:34.528788Z", + "shell.execute_reply": "2026-05-12T08:13:34.528788Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting experimental run with id: 4. \n", + "InterDependencies_(dependencies={ParamSpecBase('i', 'numeric', 'i', ''): (ParamSpecBase('g', 'numeric', 'g', ''), ParamSpecBase('h', 'numeric', 'h', ''))}, inferences={}, standalones=frozenset())\n", + "shape: 121 rows\n" + ] + } + ], + "source": [ + "ds = mv2.scan(\n", + " LinSweep(g, 0.0, 2 * np.pi, 11), # outer\n", + " LinSweep(h, 0.0, 2 * np.pi, 11), # inner\n", + " measure=[i],\n", + " name=\"2d_scan\",\n", + ")\n", + "print(ds.description.interdeps)\n", + "print(f\"shape: {len(ds.cache.data()['i']['g'])} rows\")" + ] + }, + { + "cell_type": "markdown", + "id": "4e582712", + "metadata": {}, + "source": [ + "## 5. Writing your own plan-builder\n", + "\n", + "A plan is just a Python generator that yields message dataclasses. You\n", + "write the loop; the engine handles threading, dispatch, cancellation,\n", + "and event publication.\n", + "\n", + "The contract:\n", + "- Yield `Set(param, value)`, `Sleep(seconds)`, `Read((param, ...))`, or\n", + " `Emit()` messages.\n", + "- Use `result = yield Read((...))` to get the read values back into your\n", + " plan — this is what enables adaptive scans.\n", + "- Put cleanup in a `try/finally` block. The engine guarantees the\n", + " `finally` runs on cancel.\n", + "\n", + "Example: a **bisection search** for the gate voltage where a signal\n", + "crosses a threshold. The plan decides each next setpoint based on the\n", + "previous reading. We use `cos(g)`, which crosses 0 at `g = π/2`." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "3031081e", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:34.531566Z", + "iopub.status.busy": "2026-05-12T08:13:34.531566Z", + "iopub.status.idle": "2026-05-12T08:13:34.537130Z", + "shell.execute_reply": "2026-05-12T08:13:34.537130Z" + } + }, + "outputs": [], + "source": [ + "def bisect_search(gate, signal, *, lo, hi, threshold, tolerance):\n", + " \"\"\"Plan-builder: binary-search for the threshold crossing.\"\"\"\n", + " try:\n", + " while abs(hi - lo) > tolerance:\n", + " mid = 0.5 * (hi + lo)\n", + " yield mv2.Set(gate, mid)\n", + " yield mv2.Sleep(0.001)\n", + " r = yield mv2.Read((signal,))\n", + " yield mv2.Emit()\n", + " # cos is monotonically decreasing on [0, π]: shrink lo or hi\n", + " # to keep the threshold crossing bracketed.\n", + " if r[signal] < threshold:\n", + " hi = mid\n", + " else:\n", + " lo = mid\n", + " finally:\n", + " yield mv2.Set(gate, 0.0) # always return to safe value\n", + "\n", + "\n", + "# Engineer a signal that crosses 0 at g = π/2 (the canonical bisection target\n", + "# on [0, π] for cos).\n", + "target = qc.Parameter(\"target\", get_cmd=lambda: np.cos(g.cache.get()))" + ] + }, + { + "cell_type": "markdown", + "id": "2fb8a5ea", + "metadata": {}, + "source": [ + "Plan-builders that yield their own messages need to be wrapped with\n", + "the `run(...)` decorator to add the `OpenRun`/`CloseRun` lifecycle, then\n", + "submitted to an engine:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "69862857", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:34.539905Z", + "iopub.status.busy": "2026-05-12T08:13:34.539905Z", + "iopub.status.idle": "2026-05-12T08:13:34.726411Z", + "shell.execute_reply": "2026-05-12T08:13:34.726411Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting experimental run with id: 5. \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "crossing found near g = 1.5716 (expected: π/2 ≈ 1.5708)\n" + ] + } + ], + "source": [ + "plan = mv2.run(\n", + " name=\"bisect_demo\",\n", + " setpoints=(g,),\n", + " measured=(target,),\n", + ")(bisect_search(g, target, lo=0.0, hi=np.pi, threshold=0.0, tolerance=0.001))\n", + "\n", + "engine = mv2.default_engine()\n", + "handle = engine.submit(plan)\n", + "handle.wait(timeout=10.0)\n", + "\n", + "ds = handle.dataset.result()\n", + "data = ds.cache.data()\n", + "final_g = list(data[\"target\"][\"g\"])[-1]\n", + "print(f\"crossing found near g = {final_g:.4f} (expected: π/2 ≈ {np.pi / 2:.4f})\")" + ] + }, + { + "cell_type": "markdown", + "id": "96c9a64b", + "metadata": {}, + "source": [ + "## 6. Custom sinks for live data\n", + "\n", + "A \"sink\" is any callable that accepts an `Event`. Sinks run on the\n", + "engine's publisher thread; they're the natural place to attach live\n", + "plotting, network dashboards, ML feedback loops, etc.\n", + "\n", + "The simplest sink is `MemorySink`, which just records every event into\n", + "a list — perfect for inspecting what the engine actually emits." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f4e91121", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:34.729177Z", + "iopub.status.busy": "2026-05-12T08:13:34.729177Z", + "iopub.status.idle": "2026-05-12T08:13:34.800409Z", + "shell.execute_reply": "2026-05-12T08:13:34.800409Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting experimental run with id: 6. \n", + "total events: 7\n", + " RunStarted: 1\n", + " RowEmitted: 5\n", + " RunStopped: 1\n", + "\n", + "first emitted row's snapshot:\n", + "{'g': 0.0, 'i': np.float64(0.0)}\n" + ] + } + ], + "source": [ + "# Build an engine with both a SqliteSink AND a MemorySink.\n", + "sink = mv2.MemorySink()\n", + "engine = mv2.MeasurementEngine(\n", + " sinks=[mv2.SqliteSink(experiment_name=\"custom_sinks\"), sink],\n", + ")\n", + "try:\n", + " plan = mv2.run(setpoints=(g,), measured=(i,))(\n", + " mv2.scan_1d(LinSweep(g, 0.0, 1.0, 5), [i])\n", + " )\n", + " engine.submit(plan).wait(timeout=10.0)\n", + "finally:\n", + " engine.shutdown(wait=True, timeout=5.0)\n", + "\n", + "# Inspect the event stream.\n", + "print(f\"total events: {len(sink.events)}\")\n", + "print(f\" RunStarted: {len(sink.starts)}\")\n", + "print(f\" RowEmitted: {len(sink.rows)}\")\n", + "print(f\" RunStopped: {len(sink.stops)}\")\n", + "print()\n", + "print(\"first emitted row's snapshot:\")\n", + "print({p.name: v for p, v in sink.rows[0].snapshot.items()})" + ] + }, + { + "cell_type": "markdown", + "id": "5ce64761", + "metadata": {}, + "source": [ + "### Writing a custom sink\n", + "\n", + "A sink is just a callable. Want a real-time progress printer? One\n", + "function:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "693170d1", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:34.803176Z", + "iopub.status.busy": "2026-05-12T08:13:34.803176Z", + "iopub.status.idle": "2026-05-12T08:13:34.853552Z", + "shell.execute_reply": "2026-05-12T08:13:34.853552Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting experimental run with id: 7. \n", + " RunStarted: RunStarted(run_id=UUID('920d3fd3-2a38-46dd-bf8e-3f17257412b1'), name='', descriptor=Descriptor(setpoints=(,), measured=(,), shapes=None), exp=None, write_period=None, started_at=1778573614.8059433)\n", + " row #0\n", + " row #1\n", + " row #2\n", + " RunStopped: RunStopped(run_id=UUID('920d3fd3-2a38-46dd-bf8e-3f17257412b1'), reason='completed', error=None, started_at=1778573614.8059433, stopped_at=1778573614.8059433, cancel_latency=None, n_rows_emitted=3)\n" + ] + } + ], + "source": [ + "def progress_sink(event):\n", + " \"\"\"Print a status line for each event.\"\"\"\n", + " name = type(event).__name__\n", + " if isinstance(event, mv2.RowEmitted):\n", + " print(f\" row #{event.seq}\")\n", + " else:\n", + " print(f\" {name}: {event}\")\n", + "\n", + "\n", + "engine = mv2.MeasurementEngine(\n", + " sinks=[mv2.SqliteSink(experiment_name=\"custom_sinks\"), progress_sink],\n", + ")\n", + "try:\n", + " plan = mv2.run(setpoints=(g,), measured=(i,))(\n", + " mv2.scan_1d(LinSweep(g, 0.0, 1.0, 3), [i])\n", + " )\n", + " engine.submit(plan).wait(timeout=10.0)\n", + "finally:\n", + " engine.shutdown(wait=True, timeout=5.0)" + ] + }, + { + "cell_type": "markdown", + "id": "637b117c", + "metadata": {}, + "source": [ + "## 7. Unit-testing plan-builders without an engine\n", + "\n", + "Because plans are pure generators, you can unit-test them without\n", + "instantiating an engine, sinks, or instruments. The\n", + "`qcodes.measure_v2.testing.drive_plan` helper drives a plan generator,\n", + "synthesizing responses to `Read` messages and recording every yielded\n", + "message." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "812266d5", + "metadata": { + "execution": { + "iopub.execute_input": "2026-05-12T08:13:34.856324Z", + "iopub.status.busy": "2026-05-12T08:13:34.856324Z", + "iopub.status.idle": "2026-05-12T08:13:34.866541Z", + "shell.execute_reply": "2026-05-12T08:13:34.866541Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "number of bisection iterations: 8\n", + "plan converged near g = 0.9922 (expected: 1.0)\n" + ] + } + ], + "source": [ + "# drive_plan synthesizes responses to Read messages; we model a signal\n", + "# that's positive below g=1.0 and negative above. Bisection should\n", + "# converge near g=1.0.\n", + "fake_g = qc.Parameter(\"fake_g\")\n", + "fake_signal = qc.Parameter(\"fake_signal\")\n", + "\n", + "# We need to track the latest Set value because drive_plan's on_read\n", + "# callback gets parameters but not values. Use a closure.\n", + "state = {\"last_g\": 0.0}\n", + "\n", + "\n", + "def custom_drive():\n", + " plan = bisect_search(\n", + " fake_g,\n", + " fake_signal,\n", + " lo=0.0,\n", + " hi=2.0,\n", + " threshold=0.0,\n", + " tolerance=0.01,\n", + " )\n", + " msg = next(plan)\n", + " messages = [msg]\n", + " send_value = None\n", + " try:\n", + " while True:\n", + " if isinstance(msg, mv2.Set) and msg.param is fake_g:\n", + " state[\"last_g\"] = msg.value\n", + " if isinstance(msg, mv2.Read):\n", + " send_value = {\n", + " fake_signal: 1.0 if state[\"last_g\"] < 1.0 else -1.0,\n", + " }\n", + " else:\n", + " send_value = None\n", + " msg = plan.send(send_value)\n", + " messages.append(msg)\n", + " except StopIteration:\n", + " pass\n", + " return messages\n", + "\n", + "\n", + "messages = custom_drive()\n", + "sets = [m for m in messages if isinstance(m, mv2.Set)]\n", + "sweep_sets = sets[:-1] # last is the cleanup ramp-to-0\n", + "print(f\"number of bisection iterations: {len(sweep_sets)}\")\n", + "final_sweep_g = sweep_sets[-1].value\n", + "print(f\"plan converged near g = {final_sweep_g:.4f} (expected: 1.0)\")" + ] + }, + { + "cell_type": "markdown", + "id": "20b92599", + "metadata": {}, + "source": [ + "## 8. Things to watch out for\n", + "\n", + "- **Experimental status.** API may change in incompatible ways.\n", + "- **One run at a time per engine** in v1 — concurrent `submit()` raises\n", + " `RuntimeError`. Queuing is on the v1 backlog.\n", + "- **`Sleep` is cancellable** in ~100ms chunks; `Set`/`Read` are not\n", + " interruptible mid-call. A scan with 60-second per-point delays takes\n", + " up to 60s to cancel.\n", + "- **`dataset.cache.data()` is the safe way to read live data** from the\n", + " notebook thread. `dataset.get_parameter_data()` uses the connection\n", + " bound to the engine's publisher thread and will fail with a thread\n", + " mismatch from the main thread.\n", + "- **Things NOT in this release**: `enter_actions`/`exit_actions`,\n", + " inner-loop actions, `break_condition`, `additional_setpoints`,\n", + " `flush_columns`, `live_plot=True` sugar, queueing, pause/resume,\n", + " parallel reads via `underlying_instrument` grouping. They're all\n", + " on the v1 P0/P1 backlog and don't disturb the architectural foundation.\n", + "\n", + "## Further reading\n", + "\n", + "- Architecture document: `src/qcodes/measure_v2/DESIGN.md`\n", + "- Decisions log: same document, §13\n", + "- Tests: `tests/measure_v2/` — particularly `test_acceptance.py` for\n", + " end-to-end examples." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 175b20c58372bae1557e79f860221cf7f685475e Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Tue, 12 May 2026 10:44:58 +0200 Subject: [PATCH 5/5] Fix pyright errors and add type annotations to measure_v2 - Add @overload signatures to qc.measure_v2.scan() so wait=True returns DataSetProtocol | None and wait=False returns RunHandle. - Narrow Future[Any] -> Future[DataSetProtocol | None] for RunHandle.dataset and _Submission.dataset_future. Callers no longer need to cast to access .cache. - Type all test fixtures as Generator[T, None, None] (they yield, not return). - Move type-only application imports (e.g., ParameterBase) into TYPE_CHECKING blocks in test files. - Tutorial notebook: add type annotations throughout the code cells (function signatures, key variable annotations), and place type-only imports under TYPE_CHECKING. Notebook still executes end-to-end. pyright clean on src/qcodes/measure_v2 + tests/measure_v2 (0 errors). All 70 tests pass. prek hooks pass. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../DataSet/measure_v2_tutorial.ipynb | 247 ++++++++++-------- src/qcodes/measure_v2/convenience.py | 24 +- src/qcodes/measure_v2/engine.py | 9 +- tests/measure_v2/test_acceptance.py | 16 +- tests/measure_v2/test_engine.py | 6 +- tests/measure_v2/test_scan_nd.py | 8 +- tests/measure_v2/test_sqlite_sink.py | 6 +- 7 files changed, 191 insertions(+), 125 deletions(-) diff --git a/docs/examples/DataSet/measure_v2_tutorial.ipynb b/docs/examples/DataSet/measure_v2_tutorial.ipynb index d3f241dce579..120d1d8b47d0 100644 --- a/docs/examples/DataSet/measure_v2_tutorial.ipynb +++ b/docs/examples/DataSet/measure_v2_tutorial.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "34c5e595", + "id": "10600999", "metadata": {}, "source": [ "# Tutorial: `qcodes.measure_v2` (experimental)\n", @@ -36,7 +36,7 @@ }, { "cell_type": "markdown", - "id": "745cba7c", + "id": "efc8cb21", "metadata": {}, "source": [ "## Setup\n", @@ -49,13 +49,13 @@ { "cell_type": "code", "execution_count": 1, - "id": "b8694eb6", + "id": "1a786f6f", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:29.173248Z", - "iopub.status.busy": "2026-05-12T08:13:29.173248Z", - "iopub.status.idle": "2026-05-12T08:13:31.214997Z", - "shell.execute_reply": "2026-05-12T08:13:31.214997Z" + "iopub.execute_input": "2026-05-12T08:44:00.958271Z", + "iopub.status.busy": "2026-05-12T08:44:00.958271Z", + "iopub.status.idle": "2026-05-12T08:44:02.671480Z", + "shell.execute_reply": "2026-05-12T08:44:02.671480Z" } }, "outputs": [ @@ -63,14 +63,17 @@ "name": "stdout", "output_type": "stream", "text": [ - "Database: C:\\Users\\jenielse\\AppData\\Local\\Temp\\measure_v2_tutorial_s4_9ya6r\\tutorial.db\n" + "Database: C:\\Users\\jenielse\\AppData\\Local\\Temp\\measure_v2_tutorial_7j91qifa\\tutorial.db\n" ] } ], "source": [ + "from __future__ import annotations\n", + "\n", "import os\n", "import tempfile\n", "import time\n", + "from typing import TYPE_CHECKING\n", "\n", "import numpy as np\n", "\n", @@ -79,9 +82,16 @@ "from qcodes.dataset.dond.sweeps import LinSweep\n", "from qcodes.dataset.sqlite.database import initialise_or_create_database_at\n", "\n", + "if TYPE_CHECKING:\n", + " from collections.abc import Generator\n", + " from typing import Any\n", + "\n", + " from qcodes.measure_v2 import Msg\n", + " from qcodes.parameters import ParameterBase\n", + "\n", "# Temporary database for this notebook\n", - "tmpdir = tempfile.mkdtemp(prefix=\"measure_v2_tutorial_\")\n", - "db_path = os.path.join(tmpdir, \"tutorial.db\")\n", + "tmpdir: str = tempfile.mkdtemp(prefix=\"measure_v2_tutorial_\")\n", + "db_path: str = os.path.join(tmpdir, \"tutorial.db\")\n", "qc.config[\"core\"][\"db_location\"] = db_path\n", "initialise_or_create_database_at(db_path)\n", "print(f\"Database: {db_path}\")" @@ -90,27 +100,30 @@ { "cell_type": "code", "execution_count": 2, - "id": "6e7ce2ea", + "id": "2c933e1c", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:31.217763Z", - "iopub.status.busy": "2026-05-12T08:13:31.217763Z", - "iopub.status.idle": "2026-05-12T08:13:31.224232Z", - "shell.execute_reply": "2026-05-12T08:13:31.224232Z" + "iopub.execute_input": "2026-05-12T08:44:02.674248Z", + "iopub.status.busy": "2026-05-12T08:44:02.674248Z", + "iopub.status.idle": "2026-05-12T08:44:02.679838Z", + "shell.execute_reply": "2026-05-12T08:44:02.679838Z" } }, "outputs": [], "source": [ "# Software-only parameters. ``i`` is a function of ``g`` and ``h``,\n", "# mimicking an instrument whose measurement depends on the gate voltages.\n", - "g = qc.Parameter(\"g\", initial_value=0.0, set_cmd=None, get_cmd=None)\n", - "h = qc.Parameter(\"h\", initial_value=0.0, set_cmd=None, get_cmd=None)\n", - "i = qc.Parameter(\"i\", get_cmd=lambda: np.sin(g.cache.get()) * np.cos(h.cache.get()))" + "g: ParameterBase = qc.Parameter(\"g\", initial_value=0.0, set_cmd=None, get_cmd=None)\n", + "h: ParameterBase = qc.Parameter(\"h\", initial_value=0.0, set_cmd=None, get_cmd=None)\n", + "i: ParameterBase = qc.Parameter(\n", + " \"i\",\n", + " get_cmd=lambda: np.sin(g.cache.get()) * np.cos(h.cache.get()),\n", + ")" ] }, { "cell_type": "markdown", - "id": "f9d68876", + "id": "f2207216", "metadata": {}, "source": [ "## 1. The simplest case: a blocking 1D scan\n", @@ -123,13 +136,13 @@ { "cell_type": "code", "execution_count": 3, - "id": "cbda23bd", + "id": "e17be332", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:31.226999Z", - "iopub.status.busy": "2026-05-12T08:13:31.226999Z", - "iopub.status.idle": "2026-05-12T08:13:31.285725Z", - "shell.execute_reply": "2026-05-12T08:13:31.285725Z" + "iopub.execute_input": "2026-05-12T08:44:02.682608Z", + "iopub.status.busy": "2026-05-12T08:44:02.682608Z", + "iopub.status.idle": "2026-05-12T08:44:02.751770Z", + "shell.execute_reply": "2026-05-12T08:44:02.750752Z" } }, "outputs": [ @@ -158,16 +171,14 @@ " measure=[i],\n", " name=\"1d_blocking\",\n", ")\n", - "\n", - "# The returned object is a standard QCoDeS DataSet — works with all\n", - "# existing plotting and analysis tools.\n", + "assert ds is not None # tells the type checker this isn't None\n", "print(type(ds).__name__)\n", "print(ds.cache.data())" ] }, { "cell_type": "markdown", - "id": "7e15ac95", + "id": "2e3b7580", "metadata": {}, "source": [ "## 2. Non-blocking submission with `wait=False`\n", @@ -181,13 +192,13 @@ { "cell_type": "code", "execution_count": 4, - "id": "ab6fb703", + "id": "3259550b", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:31.288492Z", - "iopub.status.busy": "2026-05-12T08:13:31.288492Z", - "iopub.status.idle": "2026-05-12T08:13:34.415084Z", - "shell.execute_reply": "2026-05-12T08:13:34.415084Z" + "iopub.execute_input": "2026-05-12T08:44:02.753768Z", + "iopub.status.busy": "2026-05-12T08:44:02.753768Z", + "iopub.status.idle": "2026-05-12T08:44:05.893899Z", + "shell.execute_reply": "2026-05-12T08:44:05.893899Z" } }, "outputs": [ @@ -215,7 +226,7 @@ } ], "source": [ - "handle = mv2.scan(\n", + "handle: mv2.RunHandle = mv2.scan(\n", " LinSweep(g, 0.0, 2 * np.pi, 200, delay=0.01),\n", " measure=[i],\n", " name=\"1d_nonblocking\",\n", @@ -227,16 +238,17 @@ "# via the in-memory cache.\n", "time.sleep(0.5)\n", "partial_ds = handle.dataset.result(timeout=1.0)\n", + "assert partial_ds is not None\n", "print(f\"rows so far: {len(partial_ds.cache.data()['i']['g'])}\")\n", "\n", "# Then wait for it to complete.\n", - "result = handle.wait(timeout=30.0)\n", + "result: mv2.RunResult = handle.wait(timeout=30.0)\n", "print(f\"final reason: {result.reason}, n_rows: {result.n_rows_emitted}\")" ] }, { "cell_type": "markdown", - "id": "0307497f", + "id": "be70de71", "metadata": {}, "source": [ "## 3. Cancellation with safe cleanup\n", @@ -253,13 +265,13 @@ { "cell_type": "code", "execution_count": 5, - "id": "8e2085c9", + "id": "28f98206", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:34.417855Z", - "iopub.status.busy": "2026-05-12T08:13:34.417855Z", - "iopub.status.idle": "2026-05-12T08:13:34.483887Z", - "shell.execute_reply": "2026-05-12T08:13:34.483887Z" + "iopub.execute_input": "2026-05-12T08:44:05.896664Z", + "iopub.status.busy": "2026-05-12T08:44:05.896664Z", + "iopub.status.idle": "2026-05-12T08:44:05.962424Z", + "shell.execute_reply": "2026-05-12T08:44:05.962424Z" } }, "outputs": [ @@ -268,10 +280,10 @@ "output_type": "stream", "text": [ "Starting experimental run with id: 3. \n", - "g during scan: 0.004\n", + "g during scan: 0.006\n", "reason: cancelled\n", "g after cancel: 0.000 (back to 0 — finally ran)\n", - "partial dataset rows: 2\n" + "partial dataset rows: 3\n" ] } ], @@ -299,7 +311,7 @@ }, { "cell_type": "markdown", - "id": "3dcbd8ef", + "id": "60d4282b", "metadata": {}, "source": [ "## 4. N-dimensional scans with `scan_nd`\n", @@ -311,13 +323,13 @@ { "cell_type": "code", "execution_count": 6, - "id": "9d5292f9", + "id": "4850e177", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:34.486159Z", - "iopub.status.busy": "2026-05-12T08:13:34.486159Z", - "iopub.status.idle": "2026-05-12T08:13:34.528788Z", - "shell.execute_reply": "2026-05-12T08:13:34.528788Z" + "iopub.execute_input": "2026-05-12T08:44:05.965188Z", + "iopub.status.busy": "2026-05-12T08:44:05.965188Z", + "iopub.status.idle": "2026-05-12T08:44:06.010714Z", + "shell.execute_reply": "2026-05-12T08:44:06.010714Z" } }, "outputs": [ @@ -338,13 +350,14 @@ " measure=[i],\n", " name=\"2d_scan\",\n", ")\n", + "assert ds is not None\n", "print(ds.description.interdeps)\n", "print(f\"shape: {len(ds.cache.data()['i']['g'])} rows\")" ] }, { "cell_type": "markdown", - "id": "4e582712", + "id": "a637dc13", "metadata": {}, "source": [ "## 5. Writing your own plan-builder\n", @@ -369,25 +382,33 @@ { "cell_type": "code", "execution_count": 7, - "id": "3031081e", + "id": "84cf647e", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:34.531566Z", - "iopub.status.busy": "2026-05-12T08:13:34.531566Z", - "iopub.status.idle": "2026-05-12T08:13:34.537130Z", - "shell.execute_reply": "2026-05-12T08:13:34.537130Z" + "iopub.execute_input": "2026-05-12T08:44:06.013479Z", + "iopub.status.busy": "2026-05-12T08:44:06.013479Z", + "iopub.status.idle": "2026-05-12T08:44:06.020117Z", + "shell.execute_reply": "2026-05-12T08:44:06.020117Z" } }, "outputs": [], "source": [ - "def bisect_search(gate, signal, *, lo, hi, threshold, tolerance):\n", + "def bisect_search(\n", + " gate: ParameterBase,\n", + " signal: ParameterBase,\n", + " *,\n", + " lo: float,\n", + " hi: float,\n", + " threshold: float,\n", + " tolerance: float,\n", + ") -> Generator[Msg, Any, None]:\n", " \"\"\"Plan-builder: binary-search for the threshold crossing.\"\"\"\n", " try:\n", " while abs(hi - lo) > tolerance:\n", " mid = 0.5 * (hi + lo)\n", " yield mv2.Set(gate, mid)\n", " yield mv2.Sleep(0.001)\n", - " r = yield mv2.Read((signal,))\n", + " r: dict[ParameterBase, Any] = yield mv2.Read((signal,))\n", " yield mv2.Emit()\n", " # cos is monotonically decreasing on [0, π]: shrink lo or hi\n", " # to keep the threshold crossing bracketed.\n", @@ -401,12 +422,12 @@ "\n", "# Engineer a signal that crosses 0 at g = π/2 (the canonical bisection target\n", "# on [0, π] for cos).\n", - "target = qc.Parameter(\"target\", get_cmd=lambda: np.cos(g.cache.get()))" + "target: ParameterBase = qc.Parameter(\"target\", get_cmd=lambda: np.cos(g.cache.get()))" ] }, { "cell_type": "markdown", - "id": "2fb8a5ea", + "id": "937603a2", "metadata": {}, "source": [ "Plan-builders that yield their own messages need to be wrapped with\n", @@ -417,13 +438,13 @@ { "cell_type": "code", "execution_count": 8, - "id": "69862857", + "id": "b680c83a", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:34.539905Z", - "iopub.status.busy": "2026-05-12T08:13:34.539905Z", - "iopub.status.idle": "2026-05-12T08:13:34.726411Z", - "shell.execute_reply": "2026-05-12T08:13:34.726411Z" + "iopub.execute_input": "2026-05-12T08:44:06.022884Z", + "iopub.status.busy": "2026-05-12T08:44:06.022884Z", + "iopub.status.idle": "2026-05-12T08:44:06.211441Z", + "shell.execute_reply": "2026-05-12T08:44:06.211441Z" } }, "outputs": [ @@ -449,19 +470,20 @@ " measured=(target,),\n", ")(bisect_search(g, target, lo=0.0, hi=np.pi, threshold=0.0, tolerance=0.001))\n", "\n", - "engine = mv2.default_engine()\n", + "engine: mv2.MeasurementEngine = mv2.default_engine()\n", "handle = engine.submit(plan)\n", "handle.wait(timeout=10.0)\n", "\n", "ds = handle.dataset.result()\n", + "assert ds is not None\n", "data = ds.cache.data()\n", - "final_g = list(data[\"target\"][\"g\"])[-1]\n", + "final_g: float = float(list(data[\"target\"][\"g\"])[-1])\n", "print(f\"crossing found near g = {final_g:.4f} (expected: π/2 ≈ {np.pi / 2:.4f})\")" ] }, { "cell_type": "markdown", - "id": "96c9a64b", + "id": "0748d259", "metadata": {}, "source": [ "## 6. Custom sinks for live data\n", @@ -477,13 +499,13 @@ { "cell_type": "code", "execution_count": 9, - "id": "f4e91121", + "id": "40acba28", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:34.729177Z", - "iopub.status.busy": "2026-05-12T08:13:34.729177Z", - "iopub.status.idle": "2026-05-12T08:13:34.800409Z", - "shell.execute_reply": "2026-05-12T08:13:34.800409Z" + "iopub.execute_input": "2026-05-12T08:44:06.214207Z", + "iopub.status.busy": "2026-05-12T08:44:06.214207Z", + "iopub.status.idle": "2026-05-12T08:44:06.296336Z", + "shell.execute_reply": "2026-05-12T08:44:06.296336Z" } }, "outputs": [ @@ -504,7 +526,7 @@ ], "source": [ "# Build an engine with both a SqliteSink AND a MemorySink.\n", - "sink = mv2.MemorySink()\n", + "sink: mv2.MemorySink = mv2.MemorySink()\n", "engine = mv2.MeasurementEngine(\n", " sinks=[mv2.SqliteSink(experiment_name=\"custom_sinks\"), sink],\n", ")\n", @@ -528,7 +550,7 @@ }, { "cell_type": "markdown", - "id": "5ce64761", + "id": "4107747d", "metadata": {}, "source": [ "### Writing a custom sink\n", @@ -540,13 +562,13 @@ { "cell_type": "code", "execution_count": 10, - "id": "693170d1", + "id": "e88fe076", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:34.803176Z", - "iopub.status.busy": "2026-05-12T08:13:34.803176Z", - "iopub.status.idle": "2026-05-12T08:13:34.853552Z", - "shell.execute_reply": "2026-05-12T08:13:34.853552Z" + "iopub.execute_input": "2026-05-12T08:44:06.299102Z", + "iopub.status.busy": "2026-05-12T08:44:06.299102Z", + "iopub.status.idle": "2026-05-12T08:44:06.366332Z", + "shell.execute_reply": "2026-05-12T08:44:06.366332Z" } }, "outputs": [ @@ -555,18 +577,18 @@ "output_type": "stream", "text": [ "Starting experimental run with id: 7. \n", - " RunStarted: RunStarted(run_id=UUID('920d3fd3-2a38-46dd-bf8e-3f17257412b1'), name='', descriptor=Descriptor(setpoints=(,), measured=(,), shapes=None), exp=None, write_period=None, started_at=1778573614.8059433)\n", + " RunStarted: RunStarted(run_id=UUID('2c727ca7-931c-4f59-a437-dcd8db54b8c6'), name='', descriptor=Descriptor(setpoints=(,), measured=(,), shapes=None), exp=None, write_period=None, started_at=1778575446.3018694)\n", " row #0\n", " row #1\n", " row #2\n", - " RunStopped: RunStopped(run_id=UUID('920d3fd3-2a38-46dd-bf8e-3f17257412b1'), reason='completed', error=None, started_at=1778573614.8059433, stopped_at=1778573614.8059433, cancel_latency=None, n_rows_emitted=3)\n" + " RunStopped: RunStopped(run_id=UUID('2c727ca7-931c-4f59-a437-dcd8db54b8c6'), reason='completed', error=None, started_at=1778575446.3018694, stopped_at=1778575446.3018694, cancel_latency=None, n_rows_emitted=3)\n" ] } ], "source": [ - "def progress_sink(event):\n", + "def progress_sink(event: mv2.Event) -> None:\n", " \"\"\"Print a status line for each event.\"\"\"\n", - " name = type(event).__name__\n", + " name: str = type(event).__name__\n", " if isinstance(event, mv2.RowEmitted):\n", " print(f\" row #{event.seq}\")\n", " else:\n", @@ -587,28 +609,31 @@ }, { "cell_type": "markdown", - "id": "637b117c", + "id": "18f6004d", "metadata": {}, "source": [ "## 7. Unit-testing plan-builders without an engine\n", "\n", "Because plans are pure generators, you can unit-test them without\n", - "instantiating an engine, sinks, or instruments. The\n", - "`qcodes.measure_v2.testing.drive_plan` helper drives a plan generator,\n", - "synthesizing responses to `Read` messages and recording every yielded\n", - "message." + "instantiating an engine, sinks, or instruments. We can drive the plan\n", + "generator manually, synthesizing responses to `Read` messages and\n", + "recording every yielded message.\n", + "\n", + "(The `qcodes.measure_v2.testing.drive_plan` helper does this for\n", + "trivial cases. Plans that need stateful response synthesis are easiest\n", + "to drive by hand, as shown here.)" ] }, { "cell_type": "code", "execution_count": 11, - "id": "812266d5", + "id": "73509602", "metadata": { "execution": { - "iopub.execute_input": "2026-05-12T08:13:34.856324Z", - "iopub.status.busy": "2026-05-12T08:13:34.856324Z", - "iopub.status.idle": "2026-05-12T08:13:34.866541Z", - "shell.execute_reply": "2026-05-12T08:13:34.866541Z" + "iopub.execute_input": "2026-05-12T08:44:06.369097Z", + "iopub.status.busy": "2026-05-12T08:44:06.369097Z", + "iopub.status.idle": "2026-05-12T08:44:06.376862Z", + "shell.execute_reply": "2026-05-12T08:44:06.376862Z" } }, "outputs": [ @@ -622,19 +647,17 @@ } ], "source": [ - "# drive_plan synthesizes responses to Read messages; we model a signal\n", - "# that's positive below g=1.0 and negative above. Bisection should\n", - "# converge near g=1.0.\n", - "fake_g = qc.Parameter(\"fake_g\")\n", - "fake_signal = qc.Parameter(\"fake_signal\")\n", + "# A fake parameter pair. We'll drive bisect_search against a signal\n", + "# whose sign flips at g = 1.0; bisection should converge near 1.0.\n", + "fake_g: ParameterBase = qc.Parameter(\"fake_g\")\n", + "fake_signal: ParameterBase = qc.Parameter(\"fake_signal\")\n", "\n", - "# We need to track the latest Set value because drive_plan's on_read\n", - "# callback gets parameters but not values. Use a closure.\n", - "state = {\"last_g\": 0.0}\n", + "state: dict[str, float] = {\"last_g\": 0.0}\n", "\n", "\n", - "def custom_drive():\n", - " plan = bisect_search(\n", + "def custom_drive() -> list[Msg]:\n", + " \"\"\"Drive bisect_search to completion, synthesizing Read responses.\"\"\"\n", + " plan: Generator[Msg, Any, None] = bisect_search(\n", " fake_g,\n", " fake_signal,\n", " lo=0.0,\n", @@ -642,13 +665,13 @@ " threshold=0.0,\n", " tolerance=0.01,\n", " )\n", - " msg = next(plan)\n", - " messages = [msg]\n", - " send_value = None\n", + " msg: Msg = next(plan)\n", + " messages: list[Msg] = [msg]\n", + " send_value: Any = None\n", " try:\n", " while True:\n", " if isinstance(msg, mv2.Set) and msg.param is fake_g:\n", - " state[\"last_g\"] = msg.value\n", + " state[\"last_g\"] = float(msg.value)\n", " if isinstance(msg, mv2.Read):\n", " send_value = {\n", " fake_signal: 1.0 if state[\"last_g\"] < 1.0 else -1.0,\n", @@ -662,17 +685,17 @@ " return messages\n", "\n", "\n", - "messages = custom_drive()\n", - "sets = [m for m in messages if isinstance(m, mv2.Set)]\n", - "sweep_sets = sets[:-1] # last is the cleanup ramp-to-0\n", + "messages: list[Msg] = custom_drive()\n", + "sets: list[mv2.Set] = [m for m in messages if isinstance(m, mv2.Set)]\n", + "sweep_sets: list[mv2.Set] = sets[:-1] # last is the cleanup ramp-to-0\n", "print(f\"number of bisection iterations: {len(sweep_sets)}\")\n", - "final_sweep_g = sweep_sets[-1].value\n", + "final_sweep_g: float = float(sweep_sets[-1].value)\n", "print(f\"plan converged near g = {final_sweep_g:.4f} (expected: 1.0)\")" ] }, { "cell_type": "markdown", - "id": "20b92599", + "id": "547236e8", "metadata": {}, "source": [ "## 8. Things to watch out for\n", diff --git a/src/qcodes/measure_v2/convenience.py b/src/qcodes/measure_v2/convenience.py index 9f8a8388d317..9b57634e528e 100644 --- a/src/qcodes/measure_v2/convenience.py +++ b/src/qcodes/measure_v2/convenience.py @@ -10,7 +10,7 @@ from __future__ import annotations import threading -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Literal, overload from qcodes.measure_v2.decorators import run from qcodes.measure_v2.engine import MeasurementEngine, RunHandle @@ -63,6 +63,28 @@ def reset_default_engine() -> None: _default_engine = None +@overload +def scan( + *sweeps: AbstractSweep, + measure: Sequence[ParameterBase], + wait: Literal[True] = ..., + name: str = ..., + exp: Experiment | None = ..., + engine: MeasurementEngine | None = ..., +) -> DataSetProtocol | None: ... + + +@overload +def scan( + *sweeps: AbstractSweep, + measure: Sequence[ParameterBase], + wait: Literal[False], + name: str = ..., + exp: Experiment | None = ..., + engine: MeasurementEngine | None = ..., +) -> RunHandle: ... + + def scan( *sweeps: AbstractSweep, measure: Sequence[ParameterBase], diff --git a/src/qcodes/measure_v2/engine.py b/src/qcodes/measure_v2/engine.py index 99adf930afaf..aa5bbec37a96 100644 --- a/src/qcodes/measure_v2/engine.py +++ b/src/qcodes/measure_v2/engine.py @@ -53,6 +53,7 @@ if TYPE_CHECKING: from collections.abc import Generator, Sequence + from qcodes.dataset.data_set_protocol import DataSetProtocol from qcodes.measure_v2.events import Event, RunStopReason from qcodes.measure_v2.messages import Msg from qcodes.measure_v2.sinks import DataSink @@ -87,13 +88,13 @@ def __init__( run_id: UUID, cancel_event: threading.Event, future: Future[RunResult], - dataset_future: Future[Any], + dataset_future: Future[DataSetProtocol | None], ) -> None: self.run_id = run_id self._cancel_event = cancel_event self._cancel_reason_box: list[str] = [] self.future = future - self.dataset = dataset_future + self.dataset: Future[DataSetProtocol | None] = dataset_future def cancel(self, reason: str = "user") -> None: """Request graceful cancellation. @@ -132,7 +133,7 @@ class _Submission: cancel_event: threading.Event cancel_reason_box: list[str] future: Future[RunResult] - dataset_future: Future[Any] + dataset_future: Future[DataSetProtocol | None] descriptor: Descriptor | None = None state: dict[ParameterBase, Any] = field(default_factory=dict) n_rows: int = 0 @@ -240,7 +241,7 @@ def submit( run_id = uuid4() cancel_event = threading.Event() future: Future[RunResult] = Future() - dataset_future: Future[Any] = Future() + dataset_future: Future[DataSetProtocol | None] = Future() sub = _Submission( run_id=run_id, plan=plan, diff --git a/tests/measure_v2/test_acceptance.py b/tests/measure_v2/test_acceptance.py index 6f87c6f42c06..8a93493b4b3f 100644 --- a/tests/measure_v2/test_acceptance.py +++ b/tests/measure_v2/test_acceptance.py @@ -12,6 +12,7 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING import pytest @@ -25,9 +26,14 @@ SqliteSink, ) +if TYPE_CHECKING: + from collections.abc import Generator + + from qcodes.parameters import ParameterBase + @pytest.fixture -def fresh_engine(empty_db): +def fresh_engine(empty_db) -> Generator[MeasurementEngine, None, None]: """Acceptance engine: SqliteSink + isolated db, separate from default.""" del empty_db sink = SqliteSink(experiment_name="acceptance", sample_name="tracer") @@ -38,13 +44,15 @@ def fresh_engine(empty_db): eng.shutdown(wait=True, timeout=5.0) -def _make_params(): +def _make_params() -> tuple[ParameterBase, ParameterBase]: g = qc.Parameter("g", initial_value=0.0, set_cmd=None, get_cmd=None) i = qc.Parameter("i", get_cmd=lambda: g.cache.get() ** 2) return g, i -def test_acceptance_blocking_scan_returns_populated_dataset(fresh_engine) -> None: +def test_acceptance_blocking_scan_returns_populated_dataset( + fresh_engine: MeasurementEngine, +) -> None: """Acceptance #1: ``scan(..., wait=True)`` returns a real dataset.""" g, i = _make_params() @@ -65,7 +73,7 @@ def test_acceptance_blocking_scan_returns_populated_dataset(fresh_engine) -> Non def test_acceptance_nonblocking_cancel_preserves_partial_data( - fresh_engine, + fresh_engine: MeasurementEngine, ) -> None: """Acceptance #2: cancel mid-scan; finally ramps g to 0; partial data persisted.""" g, i = _make_params() diff --git a/tests/measure_v2/test_engine.py b/tests/measure_v2/test_engine.py index 52ec716d5fa3..9d4e25b90a8d 100644 --- a/tests/measure_v2/test_engine.py +++ b/tests/measure_v2/test_engine.py @@ -9,6 +9,7 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING import pytest @@ -28,9 +29,12 @@ ) from qcodes.parameters import Parameter, ParameterBase +if TYPE_CHECKING: + from collections.abc import Generator + @pytest.fixture -def engine() -> MeasurementEngine: +def engine() -> Generator[MeasurementEngine, None, None]: sink = MemorySink() eng = MeasurementEngine(sinks=[sink]) # Stash the sink on the engine for test convenience. diff --git a/tests/measure_v2/test_scan_nd.py b/tests/measure_v2/test_scan_nd.py index 6c064a542531..1921b3b20c3b 100644 --- a/tests/measure_v2/test_scan_nd.py +++ b/tests/measure_v2/test_scan_nd.py @@ -3,6 +3,7 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING import pytest @@ -20,6 +21,9 @@ ) from qcodes.parameters import Parameter, ParameterBase +if TYPE_CHECKING: + from collections.abc import Generator + def _make_params() -> tuple[ParameterBase, ParameterBase, ParameterBase]: g = Parameter("g", initial_value=0.0, set_cmd=None, get_cmd=None) @@ -35,7 +39,7 @@ def _make_params() -> tuple[ParameterBase, ParameterBase, ParameterBase]: @pytest.fixture -def memory_engine() -> MeasurementEngine: +def memory_engine() -> Generator[MeasurementEngine, None, None]: sink = MemorySink() eng = MeasurementEngine(sinks=[sink]) eng._test_sink = sink # type: ignore[attr-defined] @@ -171,7 +175,7 @@ def test_scan_nd_event_ordering(memory_engine: MeasurementEngine) -> None: @pytest.fixture -def sqlite_engine(empty_db) -> MeasurementEngine: +def sqlite_engine(empty_db) -> Generator[MeasurementEngine, None, None]: del empty_db sink = SqliteSink(experiment_name="measure_v2_2d_test", sample_name="tracer") eng = MeasurementEngine(sinks=[sink]) diff --git a/tests/measure_v2/test_sqlite_sink.py b/tests/measure_v2/test_sqlite_sink.py index 67d4f277a24e..5a0b98cd7f0d 100644 --- a/tests/measure_v2/test_sqlite_sink.py +++ b/tests/measure_v2/test_sqlite_sink.py @@ -3,6 +3,7 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING import pytest @@ -15,9 +16,12 @@ ) from qcodes.parameters import Parameter, ParameterBase +if TYPE_CHECKING: + from collections.abc import Generator + @pytest.fixture -def engine_with_sqlite(empty_db) -> MeasurementEngine: +def engine_with_sqlite(empty_db) -> Generator[MeasurementEngine, None, None]: del empty_db # fixture activated for side effects sink = SqliteSink(experiment_name="measure_v2_test", sample_name="tracer") eng = MeasurementEngine(sinks=[sink])