Skip to content

API Reference

QuartumSE - Quantum Measurement Optimization & Observability Platform

A vendor-neutral framework for running quantum experiments with: - Classical shadows for shot-efficient observable estimation - Rigorous error mitigation and confidence intervals - Full provenance tracking and reproducibility - Cross-platform backend support (IBM, AWS, and more)

License: Apache 2.0

ClassicalShadows

Bases: ABC

Abstract base class for classical shadows implementations.

Different versions (v0-v4) subclass this to provide specific algorithms.

Source code in src/quartumse/shadows/core.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
class ClassicalShadows(ABC):
    """
    Abstract base class for classical shadows implementations.

    Different versions (v0-v4) subclass this to provide specific algorithms.
    """

    def __init__(self, config: Any):
        self.config = config
        self.shadow_data: np.ndarray | None = None
        self.measurement_bases: np.ndarray | None = None
        self.measurement_outcomes: np.ndarray | None = None

    @abstractmethod
    def generate_measurement_circuits(
        self, base_circuit: QuantumCircuit, num_shadows: int
    ) -> list[QuantumCircuit]:
        """
        Generate randomized measurement circuits for shadows protocol.

        Args:
            base_circuit: The state preparation circuit
            num_shadows: Number of random measurements

        Returns:
            List of circuits with randomized measurements appended
        """
        pass

    @abstractmethod
    def reconstruct_classical_shadow(
        self, measurement_outcomes: np.ndarray, measurement_bases: np.ndarray
    ) -> np.ndarray:
        """
        Reconstruct classical shadow snapshots from measurement data.

        Args:
            measurement_outcomes: Binary outcomes (0/1) for each measurement
            measurement_bases: Which basis was measured for each qubit

        Returns:
            Array of shadow snapshots (density matrix representations)
        """
        pass

    @abstractmethod
    def estimate_observable(
        self, observable: Observable, shadow_data: np.ndarray | None = None
    ) -> ShadowEstimate:
        """
        Estimate expectation value of an observable using shadow data.

        Args:
            observable: The observable to estimate
            shadow_data: Pre-computed shadow snapshots (or use self.shadow_data)

        Returns:
            Estimate with confidence interval
        """
        pass

    @abstractmethod
    def estimate_shadow_size_needed(self, observable: Observable, target_precision: float) -> int:
        """Estimate the number of shadows required for a desired precision."""

        raise NotImplementedError

    def estimate_multiple_observables(
        self, observables: list[Observable]
    ) -> dict[str, ShadowEstimate]:
        """
        Estimate multiple observables from the same shadow data.

        This is the key advantage: one shadow dataset, many observables.
        """
        if self.shadow_data is None:
            raise ValueError("No shadow data available. Run generate_measurement_circuits first.")

        results = {}
        for obs in observables:
            estimate = self.estimate_observable(obs)
            results[str(obs)] = estimate

        return results

    def compute_variance_bound(self, observable: Observable, shadow_size: int) -> float:
        """
        Theoretical variance bound for the shadow estimator.

        Useful for shot allocation and adaptive strategies.
        """
        # Default implementation (subclasses can override)
        # For random local Clifford: Var ≤ 4^k / M, where k = support size
        support_size = sum(1 for p in observable.pauli_string if p != "I")
        return float(4**support_size) / float(shadow_size)

    def compute_confidence_interval(
        self, mean: float, variance: float, n_samples: int, confidence: float = 0.95
    ) -> tuple[float, float]:
        """Compute confidence interval using normal approximation."""
        from scipy import stats

        std_error = np.sqrt(variance / n_samples)
        z_score = float(stats.norm.ppf((1 + confidence) / 2))

        ci_lower = mean - z_score * std_error
        ci_upper = mean + z_score * std_error

        return (ci_lower, ci_upper)

compute_confidence_interval(mean, variance, n_samples, confidence=0.95)

Compute confidence interval using normal approximation.

Source code in src/quartumse/shadows/core.py
167
168
169
170
171
172
173
174
175
176
177
178
179
def compute_confidence_interval(
    self, mean: float, variance: float, n_samples: int, confidence: float = 0.95
) -> tuple[float, float]:
    """Compute confidence interval using normal approximation."""
    from scipy import stats

    std_error = np.sqrt(variance / n_samples)
    z_score = float(stats.norm.ppf((1 + confidence) / 2))

    ci_lower = mean - z_score * std_error
    ci_upper = mean + z_score * std_error

    return (ci_lower, ci_upper)

compute_variance_bound(observable, shadow_size)

Theoretical variance bound for the shadow estimator.

Useful for shot allocation and adaptive strategies.

Source code in src/quartumse/shadows/core.py
156
157
158
159
160
161
162
163
164
165
def compute_variance_bound(self, observable: Observable, shadow_size: int) -> float:
    """
    Theoretical variance bound for the shadow estimator.

    Useful for shot allocation and adaptive strategies.
    """
    # Default implementation (subclasses can override)
    # For random local Clifford: Var ≤ 4^k / M, where k = support size
    support_size = sum(1 for p in observable.pauli_string if p != "I")
    return float(4**support_size) / float(shadow_size)

estimate_multiple_observables(observables)

Estimate multiple observables from the same shadow data.

This is the key advantage: one shadow dataset, many observables.

Source code in src/quartumse/shadows/core.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def estimate_multiple_observables(
    self, observables: list[Observable]
) -> dict[str, ShadowEstimate]:
    """
    Estimate multiple observables from the same shadow data.

    This is the key advantage: one shadow dataset, many observables.
    """
    if self.shadow_data is None:
        raise ValueError("No shadow data available. Run generate_measurement_circuits first.")

    results = {}
    for obs in observables:
        estimate = self.estimate_observable(obs)
        results[str(obs)] = estimate

    return results

estimate_observable(observable, shadow_data=None) abstractmethod

Estimate expectation value of an observable using shadow data.

Parameters:

Name Type Description Default
observable Observable

The observable to estimate

required
shadow_data ndarray | None

Pre-computed shadow snapshots (or use self.shadow_data)

None

Returns:

Type Description
ShadowEstimate

Estimate with confidence interval

Source code in src/quartumse/shadows/core.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
@abstractmethod
def estimate_observable(
    self, observable: Observable, shadow_data: np.ndarray | None = None
) -> ShadowEstimate:
    """
    Estimate expectation value of an observable using shadow data.

    Args:
        observable: The observable to estimate
        shadow_data: Pre-computed shadow snapshots (or use self.shadow_data)

    Returns:
        Estimate with confidence interval
    """
    pass

estimate_shadow_size_needed(observable, target_precision) abstractmethod

Estimate the number of shadows required for a desired precision.

Source code in src/quartumse/shadows/core.py
132
133
134
135
136
@abstractmethod
def estimate_shadow_size_needed(self, observable: Observable, target_precision: float) -> int:
    """Estimate the number of shadows required for a desired precision."""

    raise NotImplementedError

generate_measurement_circuits(base_circuit, num_shadows) abstractmethod

Generate randomized measurement circuits for shadows protocol.

Parameters:

Name Type Description Default
base_circuit QuantumCircuit

The state preparation circuit

required
num_shadows int

Number of random measurements

required

Returns:

Type Description
list[QuantumCircuit]

List of circuits with randomized measurements appended

Source code in src/quartumse/shadows/core.py
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
@abstractmethod
def generate_measurement_circuits(
    self, base_circuit: QuantumCircuit, num_shadows: int
) -> list[QuantumCircuit]:
    """
    Generate randomized measurement circuits for shadows protocol.

    Args:
        base_circuit: The state preparation circuit
        num_shadows: Number of random measurements

    Returns:
        List of circuits with randomized measurements appended
    """
    pass

reconstruct_classical_shadow(measurement_outcomes, measurement_bases) abstractmethod

Reconstruct classical shadow snapshots from measurement data.

Parameters:

Name Type Description Default
measurement_outcomes ndarray

Binary outcomes (0/1) for each measurement

required
measurement_bases ndarray

Which basis was measured for each qubit

required

Returns:

Type Description
ndarray

Array of shadow snapshots (density matrix representations)

Source code in src/quartumse/shadows/core.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
@abstractmethod
def reconstruct_classical_shadow(
    self, measurement_outcomes: np.ndarray, measurement_bases: np.ndarray
) -> np.ndarray:
    """
    Reconstruct classical shadow snapshots from measurement data.

    Args:
        measurement_outcomes: Binary outcomes (0/1) for each measurement
        measurement_bases: Which basis was measured for each qubit

    Returns:
        Array of shadow snapshots (density matrix representations)
    """
    pass

Estimator

Bases: ABC

Abstract base class for quantum observable estimators.

Provides unified interface for different estimation strategies: - Classical shadows (various versions) - Direct measurement - Grouped Pauli measurement

Source code in src/quartumse/estimator/base.py
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
class Estimator(ABC):
    """
    Abstract base class for quantum observable estimators.

    Provides unified interface for different estimation strategies:
    - Classical shadows (various versions)
    - Direct measurement
    - Grouped Pauli measurement
    """

    def __init__(self, backend: Any, config: Any | None = None) -> None:
        self.backend = backend
        self.config = config

    @abstractmethod
    def estimate(
        self,
        circuit: QuantumCircuit,
        observables: list[Observable],
        target_precision: float | None = None,
    ) -> EstimationResult:
        """
        Estimate expectation values of observables.

        Args:
            circuit: State preparation circuit
            observables: List of observables to estimate
            target_precision: Desired precision (optional)

        Returns:
            Estimation results with confidence intervals
        """
        raise NotImplementedError

    @abstractmethod
    def estimate_shots_needed(self, observables: list[Observable], target_precision: float) -> int:
        """
        Estimate number of shots needed for target precision.

        Used for cost estimation and shot allocation.
        """
        raise NotImplementedError

estimate(circuit, observables, target_precision=None) abstractmethod

Estimate expectation values of observables.

Parameters:

Name Type Description Default
circuit QuantumCircuit

State preparation circuit

required
observables list[Observable]

List of observables to estimate

required
target_precision float | None

Desired precision (optional)

None

Returns:

Type Description
EstimationResult

Estimation results with confidence intervals

Source code in src/quartumse/estimator/base.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
@abstractmethod
def estimate(
    self,
    circuit: QuantumCircuit,
    observables: list[Observable],
    target_precision: float | None = None,
) -> EstimationResult:
    """
    Estimate expectation values of observables.

    Args:
        circuit: State preparation circuit
        observables: List of observables to estimate
        target_precision: Desired precision (optional)

    Returns:
        Estimation results with confidence intervals
    """
    raise NotImplementedError

estimate_shots_needed(observables, target_precision) abstractmethod

Estimate number of shots needed for target precision.

Used for cost estimation and shot allocation.

Source code in src/quartumse/estimator/base.py
68
69
70
71
72
73
74
75
@abstractmethod
def estimate_shots_needed(self, observables: list[Observable], target_precision: float) -> int:
    """
    Estimate number of shots needed for target precision.

    Used for cost estimation and shot allocation.
    """
    raise NotImplementedError

ProvenanceManifest

High-level interface for creating and managing provenance manifests.

Source code in src/quartumse/reporting/manifest.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
class ProvenanceManifest:
    """
    High-level interface for creating and managing provenance manifests.
    """

    def __init__(self, schema: ManifestSchema):
        self.schema = schema

    @classmethod
    def create(
        cls,
        experiment_id: str,
        circuit_fingerprint: CircuitFingerprint,
        backend_snapshot: BackendSnapshot,
        **kwargs: Any,
    ) -> "ProvenanceManifest":
        """Create a new manifest with required fields."""
        schema = ManifestSchema(
            experiment_id=experiment_id,
            circuit=circuit_fingerprint,
            backend=backend_snapshot,
            **kwargs,
        )
        return cls(schema)

    def to_json(self, path: str | Path | None = None) -> str:
        """Export manifest as JSON."""
        json_str = self.schema.model_dump_json(indent=2)

        if path:
            Path(path).write_text(json_str)

        return json_str

    @classmethod
    def from_json(cls, path: str | Path) -> "ProvenanceManifest":
        """Load manifest from JSON file."""
        json_data = Path(path).read_text()
        schema = ManifestSchema.model_validate_json(json_data)
        return cls(schema)

    def add_tag(self, tag: str) -> None:
        """Add a searchable tag."""
        if tag not in self.schema.tags:
            self.schema.tags.append(tag)

    def update_results(self, results: dict[str, Any]) -> None:
        """Update the results summary."""
        self.schema.results_summary.update(results)

    def validate(self, *, require_shot_file: bool = True) -> bool:
        """Validate the manifest schema and ensure referenced artifacts exist."""

        if require_shot_file:
            shot_path = Path(self.schema.shot_data_path)
            if not shot_path.exists():
                raise FileNotFoundError(f"Shot data referenced by manifest is missing: {shot_path}")

        return True

    def __repr__(self) -> str:
        return (
            f"ProvenanceManifest(id={self.schema.experiment_id}, "
            f"backend={self.schema.backend.backend_name}, "
            f"created={self.schema.created_at.isoformat()})"
        )

add_tag(tag)

Add a searchable tag.

Source code in src/quartumse/reporting/manifest.py
250
251
252
253
def add_tag(self, tag: str) -> None:
    """Add a searchable tag."""
    if tag not in self.schema.tags:
        self.schema.tags.append(tag)

create(experiment_id, circuit_fingerprint, backend_snapshot, **kwargs) classmethod

Create a new manifest with required fields.

Source code in src/quartumse/reporting/manifest.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
@classmethod
def create(
    cls,
    experiment_id: str,
    circuit_fingerprint: CircuitFingerprint,
    backend_snapshot: BackendSnapshot,
    **kwargs: Any,
) -> "ProvenanceManifest":
    """Create a new manifest with required fields."""
    schema = ManifestSchema(
        experiment_id=experiment_id,
        circuit=circuit_fingerprint,
        backend=backend_snapshot,
        **kwargs,
    )
    return cls(schema)

from_json(path) classmethod

Load manifest from JSON file.

Source code in src/quartumse/reporting/manifest.py
243
244
245
246
247
248
@classmethod
def from_json(cls, path: str | Path) -> "ProvenanceManifest":
    """Load manifest from JSON file."""
    json_data = Path(path).read_text()
    schema = ManifestSchema.model_validate_json(json_data)
    return cls(schema)

to_json(path=None)

Export manifest as JSON.

Source code in src/quartumse/reporting/manifest.py
234
235
236
237
238
239
240
241
def to_json(self, path: str | Path | None = None) -> str:
    """Export manifest as JSON."""
    json_str = self.schema.model_dump_json(indent=2)

    if path:
        Path(path).write_text(json_str)

    return json_str

update_results(results)

Update the results summary.

Source code in src/quartumse/reporting/manifest.py
255
256
257
def update_results(self, results: dict[str, Any]) -> None:
    """Update the results summary."""
    self.schema.results_summary.update(results)

validate(*, require_shot_file=True)

Validate the manifest schema and ensure referenced artifacts exist.

Source code in src/quartumse/reporting/manifest.py
259
260
261
262
263
264
265
266
267
def validate(self, *, require_shot_file: bool = True) -> bool:
    """Validate the manifest schema and ensure referenced artifacts exist."""

    if require_shot_file:
        shot_path = Path(self.schema.shot_data_path)
        if not shot_path.exists():
            raise FileNotFoundError(f"Shot data referenced by manifest is missing: {shot_path}")

    return True

Report

Container for experiment report data.

Source code in src/quartumse/reporting/report.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
class Report:
    """Container for experiment report data."""

    def __init__(
        self,
        manifest: ProvenanceManifest,
        plots: dict[str, Any] | None = None,
        shot_diagnostics: ShotDataDiagnostics | None = None,
    ):
        self.manifest = manifest
        self.plots = plots or {}
        self.shot_diagnostics = shot_diagnostics

    def to_html(self, output_path: str | Path | None = None) -> str:
        """Generate HTML report."""
        template = Template(HTML_TEMPLATE)
        metrics_context = normalise_metrics_for_report(
            self.manifest.schema.results_summary.get("metrics")
            if isinstance(self.manifest.schema.results_summary, dict)
            else None
        )
        html = template.render(
            manifest=self.manifest.schema,
            now=datetime.now(timezone.utc).isoformat(),
            shot_diagnostics=self.shot_diagnostics.to_dict() if self.shot_diagnostics else None,
            metrics=metrics_context,
        )

        if output_path:
            Path(output_path).write_text(html, encoding="utf-8")

        return html

    def to_pdf(self, output_path: str | Path) -> None:
        """Generate PDF report (requires weasyprint)."""
        try:
            from weasyprint import HTML

            html_content = self.to_html()
            HTML(string=html_content).write_pdf(output_path)
        except ImportError as err:
            raise ImportError(
                "PDF generation requires weasyprint. Install with: pip install weasyprint"
            ) from err

to_html(output_path=None)

Generate HTML report.

Source code in src/quartumse/reporting/report.py
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
def to_html(self, output_path: str | Path | None = None) -> str:
    """Generate HTML report."""
    template = Template(HTML_TEMPLATE)
    metrics_context = normalise_metrics_for_report(
        self.manifest.schema.results_summary.get("metrics")
        if isinstance(self.manifest.schema.results_summary, dict)
        else None
    )
    html = template.render(
        manifest=self.manifest.schema,
        now=datetime.now(timezone.utc).isoformat(),
        shot_diagnostics=self.shot_diagnostics.to_dict() if self.shot_diagnostics else None,
        metrics=metrics_context,
    )

    if output_path:
        Path(output_path).write_text(html, encoding="utf-8")

    return html

to_pdf(output_path)

Generate PDF report (requires weasyprint).

Source code in src/quartumse/reporting/report.py
392
393
394
395
396
397
398
399
400
401
402
def to_pdf(self, output_path: str | Path) -> None:
    """Generate PDF report (requires weasyprint)."""
    try:
        from weasyprint import HTML

        html_content = self.to_html()
        HTML(string=html_content).write_pdf(output_path)
    except ImportError as err:
        raise ImportError(
            "PDF generation requires weasyprint. Install with: pip install weasyprint"
        ) from err

ShadowConfig

Bases: BaseModel

Configuration for classical shadows estimation.

Source code in src/quartumse/shadows/config.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
class ShadowConfig(BaseModel):
    """Configuration for classical shadows estimation."""

    # Core parameters
    version: ShadowVersion = Field(
        default=ShadowVersion.V0_BASELINE, description="Shadows algorithm version"
    )
    shadow_size: int = Field(
        default=1000, description="Number of random measurements (shadow size)"
    )
    measurement_ensemble: MeasurementEnsemble = Field(
        default=MeasurementEnsemble.RANDOM_LOCAL_CLIFFORD
    )

    # v1+ (noise-aware)
    apply_inverse_channel: bool = Field(
        default=False, description="Apply noise-aware inverse channel (v1+)"
    )
    noise_model_path: str | None = Field(None, description="Path to serialized noise model")

    # v2+ (fermionic)
    fermionic_mode: bool = Field(default=False, description="Enable fermionic shadows (v2+)")
    rdm_order: int = Field(default=1, description="RDM order for fermionic mode (1 or 2)")

    # v3+ (adaptive)
    adaptive: bool = Field(default=False, description="Use adaptive measurement selection (v3+)")
    target_observables: list[str] | None = Field(
        None, description="Observable strings for adaptive prioritization"
    )
    derandomization_strategy: str | None = Field(
        None, description="greedy, importance_sampling, etc."
    )

    # v4+ (robust)
    bayesian_inference: bool = Field(
        default=False, description="Enable Bayesian robust estimation (v4+)"
    )
    bootstrap_samples: int = Field(default=1000, description="Bootstrap samples for CI (v4+)")
    confidence_level: float = Field(default=0.95, description="Confidence interval level")

    # General settings
    random_seed: int | None = Field(None, description="Random seed for reproducibility")
    parallel_shots: bool = Field(
        default=True, description="Execute shadow measurements in parallel batches"
    )
    batch_size: int | None = Field(None, description="Batch size for parallel execution")

    # Variance reduction
    median_of_means: bool = Field(
        default=False, description="Use median-of-means estimator for robustness"
    )
    num_groups: int = Field(default=10, description="Number of groups for median-of-means")

    # Advanced
    custom_parameters: dict[str, Any] = Field(
        default_factory=dict, description="Version-specific custom parameters"
    )

    model_config = ConfigDict(use_enum_values=False)

    def validate_version_compatibility(self) -> None:
        """Validate that enabled features match the selected version."""

        # Warning: simplified validation
        # In production, this would check feature availability
        pass

validate_version_compatibility()

Validate that enabled features match the selected version.

Source code in src/quartumse/shadows/config.py
88
89
90
91
92
93
def validate_version_compatibility(self) -> None:
    """Validate that enabled features match the selected version."""

    # Warning: simplified validation
    # In production, this would check feature availability
    pass

ShadowEstimator

Bases: Estimator

Observable estimator using classical shadows.

Automatically selects shadow version based on config and orchestrates: 1. Shadow measurement generation 2. Circuit execution 3. Shadow reconstruction 4. Observable estimation 5. Provenance tracking

Source code in src/quartumse/estimator/shadow_estimator.py
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
class ShadowEstimator(Estimator):
    """
    Observable estimator using classical shadows.

    Automatically selects shadow version based on config and orchestrates:
    1. Shadow measurement generation
    2. Circuit execution
    3. Shadow reconstruction
    4. Observable estimation
    5. Provenance tracking
    """

    def __init__(
        self,
        backend: Backend | str,
        shadow_config: ShadowConfig | None = None,
        mitigation_config: MitigationConfig | None = None,
        data_dir: str | Path | None = None,
    ):
        """
        Initialize shadow estimator.

        Args:
            backend: Qiskit backend or backend name (e.g., "aer_simulator")
            shadow_config: Classical shadows configuration
            mitigation_config: Error mitigation configuration
            data_dir: Directory for storing shot data and manifests
        """
        # Handle backend
        self._backend_descriptor: str | None = None
        self._backend_snapshot: BackendSnapshot | None = None

        if isinstance(backend, str):
            self._backend_descriptor = backend
            if ":" in backend:
                resolved_backend, snapshot = resolve_backend(backend)
                backend = resolved_backend
                self._backend_snapshot = snapshot
            elif backend == "aer_simulator":
                backend = AerSimulator()
                self._backend_snapshot = create_backend_snapshot(backend)
            else:
                raise ValueError(f"Unknown backend string: {backend}")
        else:
            self._backend_descriptor = getattr(backend, "name", None)

        super().__init__(backend, shadow_config)

        self._runtime_sampler: SamplerPrimitive | None = None
        self._runtime_sampler_checked = False
        self._use_runtime_sampler = is_ibm_runtime_backend(self.backend)

        self.shadow_config = shadow_config or ShadowConfig.model_validate({})
        self.mitigation_config = mitigation_config or MitigationConfig()
        self.data_dir = Path(data_dir) if data_dir else Path("./data")
        self.data_dir.mkdir(parents=True, exist_ok=True)

        self.measurement_error_mitigation: MeasurementErrorMitigation | None = None
        self._mem_required = (
            self.shadow_config.version == ShadowVersion.V1_NOISE_AWARE
            or self.shadow_config.apply_inverse_channel
            or ("MEM" in self.mitigation_config.techniques)
        )
        if self._mem_required:
            self.measurement_error_mitigation = MeasurementErrorMitigation(self.backend)

        # Initialize shadow implementation based on version
        self.shadow_impl: ClassicalShadows = self._create_shadow_implementation()

        # Initialize shot data writer
        self.shot_data_writer = ShotDataWriter(self.data_dir)

    def _get_runtime_sampler(self) -> SamplerPrimitive | None:
        """Initialise (if necessary) and return the IBM Runtime sampler."""

        if not self._use_runtime_sampler:
            return None

        if not self._runtime_sampler_checked:
            self._runtime_sampler = create_runtime_sampler(self.backend)
            self._runtime_sampler_checked = True

        return self._runtime_sampler

    def _create_shadow_implementation(self) -> ClassicalShadows:
        """Factory for shadow implementations."""
        version = self.shadow_config.version

        if version == ShadowVersion.V0_BASELINE:
            return RandomLocalCliffordShadows(self.shadow_config)
        elif version == ShadowVersion.V1_NOISE_AWARE:
            if self.measurement_error_mitigation is None:
                self.measurement_error_mitigation = MeasurementErrorMitigation(self.backend)
            return NoiseAwareRandomLocalCliffordShadows(
                self.shadow_config, self.measurement_error_mitigation
            )
        elif version == ShadowVersion.V2_FERMIONIC:
            # TODO: Implement v2
            raise NotImplementedError("Shadows v2 (fermionic) not yet implemented")
        elif version == ShadowVersion.V3_ADAPTIVE:
            # TODO: Implement v3
            raise NotImplementedError("Shadows v3 (adaptive) not yet implemented")
        elif version == ShadowVersion.V4_ROBUST:
            # TODO: Implement v4
            raise NotImplementedError("Shadows v4 (robust) not yet implemented")
        else:
            raise ValueError(f"Unknown shadow version: {version}")

    def estimate(
        self,
        circuit: QuantumCircuit,
        observables: list[Observable],
        target_precision: float | None = None,
        save_manifest: bool = True,
    ) -> EstimationResult:
        """
        Estimate observables using classical shadows.

        Workflow:
        1. Generate shadow measurement circuits
        2. Transpile and execute on backend
        3. Reconstruct shadow snapshots
        4. Estimate all observables
        5. Generate provenance manifest
        """
        experiment_id = str(uuid.uuid4())
        start_time = time.time()

        # Determine shadow size
        if target_precision:
            required_sizes = [
                self.shadow_impl.estimate_shadow_size_needed(obs, target_precision)
                for obs in observables
            ]
            shadow_size = max(required_sizes) if required_sizes else self.shadow_config.shadow_size
            if shadow_size <= 0:
                raise ValueError("Shadow size estimation produced a non-positive value")
            self.shadow_config.shadow_size = shadow_size
            self.shadow_impl.config.shadow_size = shadow_size
        else:
            shadow_size = self.shadow_config.shadow_size
            self.shadow_impl.config.shadow_size = shadow_size

        # Generate shadow measurement circuits
        shadow_circuits = self.shadow_impl.generate_measurement_circuits(circuit, shadow_size)

        # Calibrate measurement error mitigation if required
        if isinstance(self.shadow_impl, NoiseAwareRandomLocalCliffordShadows):
            mem_params = self.mitigation_config.parameters
            mem_shots = int(mem_params.get("mem_shots", 4096))
            mem_qubits_param = mem_params.get("mem_qubits")
            if mem_qubits_param is None:
                mem_qubits = list(range(circuit.num_qubits))
            elif isinstance(mem_qubits_param, (list, tuple)):
                mem_qubits = [int(q) for q in mem_qubits_param]
            else:
                mem_qubits = [int(mem_qubits_param)]

            mem_force = bool(mem_params.get("mem_force_calibration", False))
            run_options = mem_params.get("mem_run_options", {})
            mem_confusion_path_str = self.mitigation_config.confusion_matrix_path

            if mem_confusion_path_str and not mem_force:
                try:
                    self.shadow_impl.mem.load_confusion_matrix(mem_confusion_path_str)
                    metadata = self.shadow_impl.mem.get_confusion_metadata()
                    if isinstance(metadata.get("shots_per_state"), (int, float)):
                        mem_shots = int(metadata["shots_per_state"])
                        mem_params["mem_shots"] = mem_shots
                    if isinstance(metadata.get("qubits"), (list, tuple)):
                        mem_qubits = [int(q) for q in metadata["qubits"]]
                        mem_params["mem_qubits"] = mem_qubits
                except FileNotFoundError:
                    LOGGER.warning(
                        "Configured confusion matrix %s not found; recalibrating.",
                        mem_confusion_path_str,
                    )
                    mem_confusion_path_str = None

            if (
                self.shadow_impl.mem.confusion_matrix is None
                or mem_force
                or not mem_confusion_path_str
            ):
                mem_dir = self.data_dir / "mem"
                mem_dir.mkdir(parents=True, exist_ok=True)
                confusion_matrix_path = mem_dir / f"{experiment_id}.npz"
                saved_confusion_path = self.shadow_impl.mem.calibrate(
                    mem_qubits,
                    shots=mem_shots,
                    run_options=run_options,
                    output_path=confusion_matrix_path,
                )
                mem_confusion_path = (
                    saved_confusion_path
                    if saved_confusion_path is not None
                    else confusion_matrix_path
                )
                self.mitigation_config.confusion_matrix_path = str(mem_confusion_path.resolve())
                mem_confusion_path_str = self.mitigation_config.confusion_matrix_path
                self.shadow_impl.mem.confusion_matrix_path = Path(mem_confusion_path_str)
            else:
                self.mitigation_config.confusion_matrix_path = mem_confusion_path_str

            if "MEM" not in self.mitigation_config.techniques:
                self.mitigation_config.techniques.append("MEM")
            mem_params["mem_qubits"] = mem_qubits
            mem_params["mem_shots"] = mem_shots

        # Transpile for backend
        transpiled_circuits = transpile(shadow_circuits, backend=self.backend)

        # Respect backend batching limits
        max_experiments = None
        backend_config = None
        if hasattr(self.backend, "configuration"):
            try:
                backend_config = self.backend.configuration()
            except Exception:
                backend_config = None

        if backend_config is not None:
            max_experiments = getattr(backend_config, "max_experiments", None)

        if isinstance(max_experiments, np.integer):
            max_experiments = int(max_experiments)

        if not isinstance(max_experiments, int) or max_experiments <= 0:
            # Use safe default batch size for IBM backends to avoid submission failures
            max_experiments = 500
            print(
                f"Warning: Backend max_experiments unavailable or invalid. "
                f"Using safe default batch size: {max_experiments}"
            )

        measurement_outcomes_list: list[np.ndarray] = []

        sampler = self._get_runtime_sampler()

        for start_idx in range(0, len(transpiled_circuits), max_experiments):
            circuit_batch = transpiled_circuits[start_idx : start_idx + max_experiments]
            if sampler is not None:
                job = sampler.run(list(circuit_batch), shots=1)
                result = job.result()

                for batch_idx, _ in enumerate(circuit_batch):
                    counts = result[batch_idx].data.meas.get_counts()
                    bitstring = list(counts.keys())[0].replace(" ", "")
                    outcomes = np.array([int(b) for b in bitstring[::-1]], dtype=int)
                    measurement_outcomes_list.append(outcomes)
            else:
                job = self.backend.run(circuit_batch, shots=1)  # Each circuit is one shadow
                result = job.result()

                for batch_idx, _ in enumerate(circuit_batch):
                    counts = result.get_counts(batch_idx)
                    bitstring = list(counts.keys())[0].replace(" ", "")
                    outcomes = np.array([int(b) for b in bitstring[::-1]], dtype=int)
                    measurement_outcomes_list.append(outcomes)

        if len(measurement_outcomes_list) != shadow_size:
            raise RuntimeError(
                "Collected measurement outcomes do not match the requested shadow size."
            )

        measurement_outcomes = np.asarray(measurement_outcomes_list, dtype=int)

        measurement_bases = self.shadow_impl.measurement_bases
        if measurement_bases is None:
            raise ValueError("Shadow implementation did not record measurement bases.")
        measurement_bases = np.asarray(measurement_bases, dtype=int)
        self.shadow_impl.measurement_bases = measurement_bases

        # Save shot data to Parquet
        shot_data_path = self.shot_data_writer.save_shadow_measurements(
            experiment_id=experiment_id,
            measurement_bases=measurement_bases,
            measurement_outcomes=measurement_outcomes,
            num_qubits=circuit.num_qubits,
        )

        # Reconstruct shadows
        self.shadow_impl.reconstruct_classical_shadow(measurement_outcomes, measurement_bases)

        # Estimate all observables
        estimates: dict[str, dict[str, object]] = {}
        for obs in observables:
            estimate = self.shadow_impl.estimate_observable(obs)
            estimates[str(obs)] = {
                "expectation_value": estimate.expectation_value,
                "variance": estimate.variance,
                "ci_95": estimate.confidence_interval,
                "ci_width": estimate.ci_width,
            }

        execution_time = time.time() - start_time

        # Create provenance manifest
        if save_manifest:
            manifest = self._create_manifest(
                experiment_id,
                circuit,
                observables,
                estimates,
                shadow_size,
                execution_time,
                shot_data_path,
            )
            manifest_path = self.data_dir / "manifests" / f"{experiment_id}.json"
            manifest_path.parent.mkdir(parents=True, exist_ok=True)
            manifest.to_json(manifest_path)
        else:
            manifest_path = None

        return EstimationResult(
            observables=estimates,
            shots_used=shadow_size,
            execution_time=execution_time,
            backend_name=self.backend.name,
            experiment_id=experiment_id,
            manifest_path=str(manifest_path) if manifest_path else None,
            shot_data_path=str(shot_data_path),
            mitigation_confusion_matrix_path=self.mitigation_config.confusion_matrix_path,
        )

    def estimate_shots_needed(self, observables: list[Observable], target_precision: float) -> int:
        """Estimate shadow size needed for target precision."""
        # Use worst-case observable
        max_shadow_size = 0
        for obs in observables:
            size = self.shadow_impl.estimate_shadow_size_needed(obs, target_precision)
            max_shadow_size = max(max_shadow_size, size)

        return max_shadow_size

    def replay_from_manifest(
        self,
        manifest_path: str | Path,
        observables: list[Observable] | None = None,
    ) -> EstimationResult:
        """
        Replay an experiment from a saved manifest and shot data.

        This allows re-estimation of observables from previously collected shot data
        without re-executing circuits on the backend.

        Args:
            manifest_path: Path to the provenance manifest JSON file
            observables: Optional new list of observables to estimate. If None,
                        uses observables from the original manifest.

        Returns:
            EstimationResult with re-estimated observables
        """
        manifest_path = Path(manifest_path)
        if not manifest_path.exists():
            raise FileNotFoundError(f"Manifest not found: {manifest_path}")

        # Load manifest
        manifest = ProvenanceManifest.from_json(manifest_path)
        experiment_id = manifest.schema.experiment_id

        # Load shot data
        measurement_bases, measurement_outcomes, num_qubits = (
            self.shot_data_writer.load_shadow_measurements(experiment_id)
        )

        if manifest.schema.shadows is None:
            raise ValueError(
                "Manifest does not contain classical shadows configuration information."
            )

        # Reconstruct shadows with loaded data
        # Create temporary shadow implementation if needed
        shadow_payload = manifest.schema.shadows.model_dump()
        shadow_payload["random_seed"] = manifest.schema.random_seed
        shadow_config = ShadowConfig.model_validate(shadow_payload)

        resolved_confusion_matrix_path: str | None = (
            manifest.schema.mitigation.confusion_matrix_path
        )

        if shadow_config.version == ShadowVersion.V0_BASELINE:
            shadow_impl = RandomLocalCliffordShadows(shadow_config)
        elif shadow_config.version == ShadowVersion.V1_NOISE_AWARE:
            confusion_matrix_path_str = manifest.schema.mitigation.confusion_matrix_path

            if not confusion_matrix_path_str:
                raise FileNotFoundError(
                    "Noise-aware manifest does not include a persisted confusion matrix path. "
                    "Re-run estimation or provide the saved calibration artifact before replaying."
                )

            raw_confusion_path = Path(confusion_matrix_path_str)
            candidate_paths = [raw_confusion_path]

            if not raw_confusion_path.is_absolute():
                candidate_paths.append((manifest_path.parent / raw_confusion_path).resolve())
                candidate_paths.append((self.data_dir / raw_confusion_path).resolve())

            candidate_paths.append((self.data_dir / "mem" / raw_confusion_path.name).resolve())
            candidate_paths.append(
                (manifest_path.parent / "mem" / raw_confusion_path.name).resolve()
            )

            confusion_matrix_path: Path | None = None
            for candidate in candidate_paths:
                if candidate and candidate.exists():
                    confusion_matrix_path = candidate
                    break

            if confusion_matrix_path is None:
                raise FileNotFoundError(
                    "Unable to locate the persisted confusion matrix required for noise-aware replay. "
                    f"Looked for {raw_confusion_path} and related paths."
                )

            with np.load(confusion_matrix_path, allow_pickle=False) as archive:
                if "confusion_matrix" not in archive:
                    raise ValueError(
                        "Confusion matrix archive is missing the 'confusion_matrix' dataset."
                    )
                confusion_matrix = archive["confusion_matrix"]

            mem = MeasurementErrorMitigation(self.backend)
            mem.confusion_matrix = confusion_matrix
            mem.confusion_matrix_path = confusion_matrix_path.resolve()
            mem._calibrated_qubits = tuple(range(num_qubits))

            shadow_impl = NoiseAwareRandomLocalCliffordShadows(shadow_config, mem)
            resolved_confusion_matrix_path = str(confusion_matrix_path.resolve())
        else:
            raise NotImplementedError(
                f"Replay for shadow version {shadow_config.version.value} is not implemented"
            )
        shadow_impl.measurement_bases = measurement_bases
        shadow_impl.reconstruct_classical_shadow(measurement_outcomes, measurement_bases)

        # Use observables from manifest if not provided
        if observables is None:
            observables = [
                Observable(obs_dict["pauli"], obs_dict.get("coefficient", 1.0))
                for obs_dict in manifest.schema.observables
            ]

        # Estimate all observables
        estimates: dict[str, dict[str, object]] = {}
        for obs in observables:
            estimate = shadow_impl.estimate_observable(obs)
            estimates[str(obs)] = {
                "expectation_value": estimate.expectation_value,
                "variance": estimate.variance,
                "ci_95": estimate.confidence_interval,
                "ci_width": estimate.ci_width,
            }

        return EstimationResult(
            observables=estimates,
            shots_used=manifest.schema.shadows.shadow_size,
            execution_time=0.0,  # No execution time for replay
            backend_name=manifest.schema.backend.backend_name,
            experiment_id=experiment_id,
            manifest_path=str(manifest_path),
            shot_data_path=manifest.schema.shot_data_path,
            mitigation_confusion_matrix_path=resolved_confusion_matrix_path,
        )

    def _create_manifest(
        self,
        experiment_id: str,
        circuit: QuantumCircuit,
        observables: list[Observable],
        estimates: dict[str, dict[str, object]],
        shadow_size: int,
        execution_time: float,
        shot_data_path: Path,
    ) -> ProvenanceManifest:
        """Create provenance manifest for the experiment."""
        import sys

        import qiskit

        # Circuit fingerprint
        try:
            qasm_str = qasm3.dumps(circuit)
        except Exception:
            qasm_str = circuit.qasm()

        gate_counts: dict[str, int] = {}
        for instruction in circuit.data:
            gate_name = instruction.operation.name
            gate_counts[gate_name] = gate_counts.get(gate_name, 0) + 1

        circuit_hash = hashlib.sha256(qasm_str.encode()).hexdigest()[:16]

        circuit_fp = CircuitFingerprint(
            qasm3=qasm_str,
            num_qubits=circuit.num_qubits,
            depth=circuit.depth(),
            gate_counts=gate_counts,
            circuit_hash=circuit_hash,
        )

        # Backend snapshot
        backend_snapshot = self._backend_snapshot or create_backend_snapshot(self.backend)

        # Shadows config
        shadows_config = ShadowsConfig.model_validate(
            {
                "version": self.shadow_config.version.value,
                "shadow_size": shadow_size,
                "measurement_ensemble": self.shadow_config.measurement_ensemble.value,
                "noise_model_path": self.shadow_config.noise_model_path,
                "inverse_channel_applied": self.shadow_config.apply_inverse_channel,
                "fermionic_mode": self.shadow_config.fermionic_mode,
                "rdm_order": self.shadow_config.rdm_order,
                "adaptive": self.shadow_config.adaptive,
                "target_observables": self.shadow_config.target_observables,
                "bayesian_inference": self.shadow_config.bayesian_inference,
                "bootstrap_samples": self.shadow_config.bootstrap_samples,
            }
        )

        # Resource usage
        resource_usage = ResourceUsage.model_validate(
            {
                "total_shots": shadow_size,
                "execution_time_seconds": execution_time,
                "queue_time_seconds": None,
                "estimated_cost_usd": None,
                "credits_used": None,
                "classical_compute_seconds": None,
            }
        )

        metadata = {}
        if self._backend_descriptor:
            metadata["backend_descriptor"] = self._backend_descriptor

        # Create manifest
        shot_checksum = compute_file_checksum(shot_data_path)

        mitigation_config = self.mitigation_config.model_copy(deep=True)
        confusion_path = mitigation_config.confusion_matrix_path
        if confusion_path:
            mitigation_config.confusion_matrix_checksum = compute_file_checksum(confusion_path)

        manifest_schema = ManifestSchema(
            experiment_id=experiment_id,
            experiment_name=None,
            circuit=circuit_fp,
            observables=[
                {"pauli": obs.pauli_string, "coefficient": obs.coefficient} for obs in observables
            ],
            backend=backend_snapshot,
            mitigation=mitigation_config,
            shadows=shadows_config,
            shot_data_path=str(shot_data_path),
            shot_data_checksum=shot_checksum,
            results_summary=estimates,
            resource_usage=resource_usage,
            metadata=metadata,
            random_seed=self.shadow_config.random_seed,
            quartumse_version=__version__,
            qiskit_version=qiskit.__version__,
            python_version=f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
        )

        return ProvenanceManifest(manifest_schema)

__init__(backend, shadow_config=None, mitigation_config=None, data_dir=None)

Initialize shadow estimator.

Parameters:

Name Type Description Default
backend Backend | str

Qiskit backend or backend name (e.g., "aer_simulator")

required
shadow_config ShadowConfig | None

Classical shadows configuration

None
mitigation_config MitigationConfig | None

Error mitigation configuration

None
data_dir str | Path | None

Directory for storing shot data and manifests

None
Source code in src/quartumse/estimator/shadow_estimator.py
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def __init__(
    self,
    backend: Backend | str,
    shadow_config: ShadowConfig | None = None,
    mitigation_config: MitigationConfig | None = None,
    data_dir: str | Path | None = None,
):
    """
    Initialize shadow estimator.

    Args:
        backend: Qiskit backend or backend name (e.g., "aer_simulator")
        shadow_config: Classical shadows configuration
        mitigation_config: Error mitigation configuration
        data_dir: Directory for storing shot data and manifests
    """
    # Handle backend
    self._backend_descriptor: str | None = None
    self._backend_snapshot: BackendSnapshot | None = None

    if isinstance(backend, str):
        self._backend_descriptor = backend
        if ":" in backend:
            resolved_backend, snapshot = resolve_backend(backend)
            backend = resolved_backend
            self._backend_snapshot = snapshot
        elif backend == "aer_simulator":
            backend = AerSimulator()
            self._backend_snapshot = create_backend_snapshot(backend)
        else:
            raise ValueError(f"Unknown backend string: {backend}")
    else:
        self._backend_descriptor = getattr(backend, "name", None)

    super().__init__(backend, shadow_config)

    self._runtime_sampler: SamplerPrimitive | None = None
    self._runtime_sampler_checked = False
    self._use_runtime_sampler = is_ibm_runtime_backend(self.backend)

    self.shadow_config = shadow_config or ShadowConfig.model_validate({})
    self.mitigation_config = mitigation_config or MitigationConfig()
    self.data_dir = Path(data_dir) if data_dir else Path("./data")
    self.data_dir.mkdir(parents=True, exist_ok=True)

    self.measurement_error_mitigation: MeasurementErrorMitigation | None = None
    self._mem_required = (
        self.shadow_config.version == ShadowVersion.V1_NOISE_AWARE
        or self.shadow_config.apply_inverse_channel
        or ("MEM" in self.mitigation_config.techniques)
    )
    if self._mem_required:
        self.measurement_error_mitigation = MeasurementErrorMitigation(self.backend)

    # Initialize shadow implementation based on version
    self.shadow_impl: ClassicalShadows = self._create_shadow_implementation()

    # Initialize shot data writer
    self.shot_data_writer = ShotDataWriter(self.data_dir)

estimate(circuit, observables, target_precision=None, save_manifest=True)

Estimate observables using classical shadows.

Workflow: 1. Generate shadow measurement circuits 2. Transpile and execute on backend 3. Reconstruct shadow snapshots 4. Estimate all observables 5. Generate provenance manifest

Source code in src/quartumse/estimator/shadow_estimator.py
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
def estimate(
    self,
    circuit: QuantumCircuit,
    observables: list[Observable],
    target_precision: float | None = None,
    save_manifest: bool = True,
) -> EstimationResult:
    """
    Estimate observables using classical shadows.

    Workflow:
    1. Generate shadow measurement circuits
    2. Transpile and execute on backend
    3. Reconstruct shadow snapshots
    4. Estimate all observables
    5. Generate provenance manifest
    """
    experiment_id = str(uuid.uuid4())
    start_time = time.time()

    # Determine shadow size
    if target_precision:
        required_sizes = [
            self.shadow_impl.estimate_shadow_size_needed(obs, target_precision)
            for obs in observables
        ]
        shadow_size = max(required_sizes) if required_sizes else self.shadow_config.shadow_size
        if shadow_size <= 0:
            raise ValueError("Shadow size estimation produced a non-positive value")
        self.shadow_config.shadow_size = shadow_size
        self.shadow_impl.config.shadow_size = shadow_size
    else:
        shadow_size = self.shadow_config.shadow_size
        self.shadow_impl.config.shadow_size = shadow_size

    # Generate shadow measurement circuits
    shadow_circuits = self.shadow_impl.generate_measurement_circuits(circuit, shadow_size)

    # Calibrate measurement error mitigation if required
    if isinstance(self.shadow_impl, NoiseAwareRandomLocalCliffordShadows):
        mem_params = self.mitigation_config.parameters
        mem_shots = int(mem_params.get("mem_shots", 4096))
        mem_qubits_param = mem_params.get("mem_qubits")
        if mem_qubits_param is None:
            mem_qubits = list(range(circuit.num_qubits))
        elif isinstance(mem_qubits_param, (list, tuple)):
            mem_qubits = [int(q) for q in mem_qubits_param]
        else:
            mem_qubits = [int(mem_qubits_param)]

        mem_force = bool(mem_params.get("mem_force_calibration", False))
        run_options = mem_params.get("mem_run_options", {})
        mem_confusion_path_str = self.mitigation_config.confusion_matrix_path

        if mem_confusion_path_str and not mem_force:
            try:
                self.shadow_impl.mem.load_confusion_matrix(mem_confusion_path_str)
                metadata = self.shadow_impl.mem.get_confusion_metadata()
                if isinstance(metadata.get("shots_per_state"), (int, float)):
                    mem_shots = int(metadata["shots_per_state"])
                    mem_params["mem_shots"] = mem_shots
                if isinstance(metadata.get("qubits"), (list, tuple)):
                    mem_qubits = [int(q) for q in metadata["qubits"]]
                    mem_params["mem_qubits"] = mem_qubits
            except FileNotFoundError:
                LOGGER.warning(
                    "Configured confusion matrix %s not found; recalibrating.",
                    mem_confusion_path_str,
                )
                mem_confusion_path_str = None

        if (
            self.shadow_impl.mem.confusion_matrix is None
            or mem_force
            or not mem_confusion_path_str
        ):
            mem_dir = self.data_dir / "mem"
            mem_dir.mkdir(parents=True, exist_ok=True)
            confusion_matrix_path = mem_dir / f"{experiment_id}.npz"
            saved_confusion_path = self.shadow_impl.mem.calibrate(
                mem_qubits,
                shots=mem_shots,
                run_options=run_options,
                output_path=confusion_matrix_path,
            )
            mem_confusion_path = (
                saved_confusion_path
                if saved_confusion_path is not None
                else confusion_matrix_path
            )
            self.mitigation_config.confusion_matrix_path = str(mem_confusion_path.resolve())
            mem_confusion_path_str = self.mitigation_config.confusion_matrix_path
            self.shadow_impl.mem.confusion_matrix_path = Path(mem_confusion_path_str)
        else:
            self.mitigation_config.confusion_matrix_path = mem_confusion_path_str

        if "MEM" not in self.mitigation_config.techniques:
            self.mitigation_config.techniques.append("MEM")
        mem_params["mem_qubits"] = mem_qubits
        mem_params["mem_shots"] = mem_shots

    # Transpile for backend
    transpiled_circuits = transpile(shadow_circuits, backend=self.backend)

    # Respect backend batching limits
    max_experiments = None
    backend_config = None
    if hasattr(self.backend, "configuration"):
        try:
            backend_config = self.backend.configuration()
        except Exception:
            backend_config = None

    if backend_config is not None:
        max_experiments = getattr(backend_config, "max_experiments", None)

    if isinstance(max_experiments, np.integer):
        max_experiments = int(max_experiments)

    if not isinstance(max_experiments, int) or max_experiments <= 0:
        # Use safe default batch size for IBM backends to avoid submission failures
        max_experiments = 500
        print(
            f"Warning: Backend max_experiments unavailable or invalid. "
            f"Using safe default batch size: {max_experiments}"
        )

    measurement_outcomes_list: list[np.ndarray] = []

    sampler = self._get_runtime_sampler()

    for start_idx in range(0, len(transpiled_circuits), max_experiments):
        circuit_batch = transpiled_circuits[start_idx : start_idx + max_experiments]
        if sampler is not None:
            job = sampler.run(list(circuit_batch), shots=1)
            result = job.result()

            for batch_idx, _ in enumerate(circuit_batch):
                counts = result[batch_idx].data.meas.get_counts()
                bitstring = list(counts.keys())[0].replace(" ", "")
                outcomes = np.array([int(b) for b in bitstring[::-1]], dtype=int)
                measurement_outcomes_list.append(outcomes)
        else:
            job = self.backend.run(circuit_batch, shots=1)  # Each circuit is one shadow
            result = job.result()

            for batch_idx, _ in enumerate(circuit_batch):
                counts = result.get_counts(batch_idx)
                bitstring = list(counts.keys())[0].replace(" ", "")
                outcomes = np.array([int(b) for b in bitstring[::-1]], dtype=int)
                measurement_outcomes_list.append(outcomes)

    if len(measurement_outcomes_list) != shadow_size:
        raise RuntimeError(
            "Collected measurement outcomes do not match the requested shadow size."
        )

    measurement_outcomes = np.asarray(measurement_outcomes_list, dtype=int)

    measurement_bases = self.shadow_impl.measurement_bases
    if measurement_bases is None:
        raise ValueError("Shadow implementation did not record measurement bases.")
    measurement_bases = np.asarray(measurement_bases, dtype=int)
    self.shadow_impl.measurement_bases = measurement_bases

    # Save shot data to Parquet
    shot_data_path = self.shot_data_writer.save_shadow_measurements(
        experiment_id=experiment_id,
        measurement_bases=measurement_bases,
        measurement_outcomes=measurement_outcomes,
        num_qubits=circuit.num_qubits,
    )

    # Reconstruct shadows
    self.shadow_impl.reconstruct_classical_shadow(measurement_outcomes, measurement_bases)

    # Estimate all observables
    estimates: dict[str, dict[str, object]] = {}
    for obs in observables:
        estimate = self.shadow_impl.estimate_observable(obs)
        estimates[str(obs)] = {
            "expectation_value": estimate.expectation_value,
            "variance": estimate.variance,
            "ci_95": estimate.confidence_interval,
            "ci_width": estimate.ci_width,
        }

    execution_time = time.time() - start_time

    # Create provenance manifest
    if save_manifest:
        manifest = self._create_manifest(
            experiment_id,
            circuit,
            observables,
            estimates,
            shadow_size,
            execution_time,
            shot_data_path,
        )
        manifest_path = self.data_dir / "manifests" / f"{experiment_id}.json"
        manifest_path.parent.mkdir(parents=True, exist_ok=True)
        manifest.to_json(manifest_path)
    else:
        manifest_path = None

    return EstimationResult(
        observables=estimates,
        shots_used=shadow_size,
        execution_time=execution_time,
        backend_name=self.backend.name,
        experiment_id=experiment_id,
        manifest_path=str(manifest_path) if manifest_path else None,
        shot_data_path=str(shot_data_path),
        mitigation_confusion_matrix_path=self.mitigation_config.confusion_matrix_path,
    )

estimate_shots_needed(observables, target_precision)

Estimate shadow size needed for target precision.

Source code in src/quartumse/estimator/shadow_estimator.py
368
369
370
371
372
373
374
375
376
def estimate_shots_needed(self, observables: list[Observable], target_precision: float) -> int:
    """Estimate shadow size needed for target precision."""
    # Use worst-case observable
    max_shadow_size = 0
    for obs in observables:
        size = self.shadow_impl.estimate_shadow_size_needed(obs, target_precision)
        max_shadow_size = max(max_shadow_size, size)

    return max_shadow_size

replay_from_manifest(manifest_path, observables=None)

Replay an experiment from a saved manifest and shot data.

This allows re-estimation of observables from previously collected shot data without re-executing circuits on the backend.

Parameters:

Name Type Description Default
manifest_path str | Path

Path to the provenance manifest JSON file

required
observables list[Observable] | None

Optional new list of observables to estimate. If None, uses observables from the original manifest.

None

Returns:

Type Description
EstimationResult

EstimationResult with re-estimated observables

Source code in src/quartumse/estimator/shadow_estimator.py
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
def replay_from_manifest(
    self,
    manifest_path: str | Path,
    observables: list[Observable] | None = None,
) -> EstimationResult:
    """
    Replay an experiment from a saved manifest and shot data.

    This allows re-estimation of observables from previously collected shot data
    without re-executing circuits on the backend.

    Args:
        manifest_path: Path to the provenance manifest JSON file
        observables: Optional new list of observables to estimate. If None,
                    uses observables from the original manifest.

    Returns:
        EstimationResult with re-estimated observables
    """
    manifest_path = Path(manifest_path)
    if not manifest_path.exists():
        raise FileNotFoundError(f"Manifest not found: {manifest_path}")

    # Load manifest
    manifest = ProvenanceManifest.from_json(manifest_path)
    experiment_id = manifest.schema.experiment_id

    # Load shot data
    measurement_bases, measurement_outcomes, num_qubits = (
        self.shot_data_writer.load_shadow_measurements(experiment_id)
    )

    if manifest.schema.shadows is None:
        raise ValueError(
            "Manifest does not contain classical shadows configuration information."
        )

    # Reconstruct shadows with loaded data
    # Create temporary shadow implementation if needed
    shadow_payload = manifest.schema.shadows.model_dump()
    shadow_payload["random_seed"] = manifest.schema.random_seed
    shadow_config = ShadowConfig.model_validate(shadow_payload)

    resolved_confusion_matrix_path: str | None = (
        manifest.schema.mitigation.confusion_matrix_path
    )

    if shadow_config.version == ShadowVersion.V0_BASELINE:
        shadow_impl = RandomLocalCliffordShadows(shadow_config)
    elif shadow_config.version == ShadowVersion.V1_NOISE_AWARE:
        confusion_matrix_path_str = manifest.schema.mitigation.confusion_matrix_path

        if not confusion_matrix_path_str:
            raise FileNotFoundError(
                "Noise-aware manifest does not include a persisted confusion matrix path. "
                "Re-run estimation or provide the saved calibration artifact before replaying."
            )

        raw_confusion_path = Path(confusion_matrix_path_str)
        candidate_paths = [raw_confusion_path]

        if not raw_confusion_path.is_absolute():
            candidate_paths.append((manifest_path.parent / raw_confusion_path).resolve())
            candidate_paths.append((self.data_dir / raw_confusion_path).resolve())

        candidate_paths.append((self.data_dir / "mem" / raw_confusion_path.name).resolve())
        candidate_paths.append(
            (manifest_path.parent / "mem" / raw_confusion_path.name).resolve()
        )

        confusion_matrix_path: Path | None = None
        for candidate in candidate_paths:
            if candidate and candidate.exists():
                confusion_matrix_path = candidate
                break

        if confusion_matrix_path is None:
            raise FileNotFoundError(
                "Unable to locate the persisted confusion matrix required for noise-aware replay. "
                f"Looked for {raw_confusion_path} and related paths."
            )

        with np.load(confusion_matrix_path, allow_pickle=False) as archive:
            if "confusion_matrix" not in archive:
                raise ValueError(
                    "Confusion matrix archive is missing the 'confusion_matrix' dataset."
                )
            confusion_matrix = archive["confusion_matrix"]

        mem = MeasurementErrorMitigation(self.backend)
        mem.confusion_matrix = confusion_matrix
        mem.confusion_matrix_path = confusion_matrix_path.resolve()
        mem._calibrated_qubits = tuple(range(num_qubits))

        shadow_impl = NoiseAwareRandomLocalCliffordShadows(shadow_config, mem)
        resolved_confusion_matrix_path = str(confusion_matrix_path.resolve())
    else:
        raise NotImplementedError(
            f"Replay for shadow version {shadow_config.version.value} is not implemented"
        )
    shadow_impl.measurement_bases = measurement_bases
    shadow_impl.reconstruct_classical_shadow(measurement_outcomes, measurement_bases)

    # Use observables from manifest if not provided
    if observables is None:
        observables = [
            Observable(obs_dict["pauli"], obs_dict.get("coefficient", 1.0))
            for obs_dict in manifest.schema.observables
        ]

    # Estimate all observables
    estimates: dict[str, dict[str, object]] = {}
    for obs in observables:
        estimate = shadow_impl.estimate_observable(obs)
        estimates[str(obs)] = {
            "expectation_value": estimate.expectation_value,
            "variance": estimate.variance,
            "ci_95": estimate.confidence_interval,
            "ci_width": estimate.ci_width,
        }

    return EstimationResult(
        observables=estimates,
        shots_used=manifest.schema.shadows.shadow_size,
        execution_time=0.0,  # No execution time for replay
        backend_name=manifest.schema.backend.backend_name,
        experiment_id=experiment_id,
        manifest_path=str(manifest_path),
        shot_data_path=manifest.schema.shot_data_path,
        mitigation_confusion_matrix_path=resolved_confusion_matrix_path,
    )

Commands

See the CLI reference for command-line usage details.