From 9d07f7cb974906eca4cabd381b9c7622d01ee594 Mon Sep 17 00:00:00 2001 From: James Ross Date: Sun, 15 Mar 2026 01:49:42 -0700 Subject: [PATCH 01/18] feat(warp-core): hard-cut phase 4 provenance entries --- crates/warp-core/src/coordinator.rs | 212 +- crates/warp-core/src/lib.rs | 6 +- .../src/materialization/reduce_op.rs | 14 +- crates/warp-core/src/playback.rs | 54 +- crates/warp-core/src/provenance_store.rs | 1801 +++++++++-------- crates/warp-core/src/snapshot.rs | 6 + crates/warp-core/src/worldline.rs | 2 +- .../warp-core/tests/checkpoint_fork_tests.rs | 71 +- crates/warp-core/tests/common/mod.rs | 90 +- .../warp-core/tests/golden_vectors_phase0.rs | 12 +- crates/warp-core/tests/inbox.rs | 31 +- .../tests/invariant_property_tests.rs | 44 +- .../warp-core/tests/outputs_playback_tests.rs | 67 +- .../warp-core/tests/playback_cursor_tests.rs | 10 +- crates/warp-core/tests/slice_theorem_proof.rs | 9 +- crates/warp-wasm/src/lib.rs | 42 +- crates/warp-wasm/src/warp_kernel.rs | 69 +- 17 files changed, 1446 insertions(+), 1094 deletions(-) diff --git a/crates/warp-core/src/coordinator.rs b/crates/warp-core/src/coordinator.rs index a115f849..97adfa1c 100644 --- a/crates/warp-core/src/coordinator.rs +++ b/crates/warp-core/src/coordinator.rs @@ -14,6 +14,7 @@ use crate::engine_impl::{CommitOutcome, Engine, EngineError}; use crate::head::{PlaybackHeadRegistry, RunnableWriterSet, WriterHead, WriterHeadKey}; use crate::head_inbox::{InboxAddress, InboxIngestResult, IngressEnvelope, IngressTarget}; use crate::ident::Hash; +use crate::provenance_store::{HistoryError, ProvenanceEntry, ProvenanceService, ProvenanceStore}; use crate::worldline::WorldlineId; use crate::worldline_registry::WorldlineRegistry; use crate::worldline_state::WorldlineState; @@ -65,6 +66,9 @@ pub enum RuntimeError { /// A commit against a worldline frontier failed. #[error(transparent)] Engine(#[from] EngineError), + /// Provenance append or lookup failed during a runtime step. + #[error(transparent)] + Provenance(#[from] HistoryError), /// Attempted to advance a frontier tick past `u64::MAX`. #[error("frontier tick overflow for worldline: {0:?}")] FrontierTickOverflow(WorldlineId), @@ -306,8 +310,9 @@ impl SchedulerCoordinator { /// commits each non-empty head against its worldline frontier. /// /// The SuperTick is failure-atomic with respect to runtime state: if any - /// head commit fails, all prior runtime mutations from this pass are - /// discarded and the runtime is restored to its pre-SuperTick state. + /// head commit fails, all prior runtime and provenance mutations from this + /// pass are discarded and both subsystems are restored to their + /// pre-SuperTick state. /// /// # Panics /// @@ -315,6 +320,7 @@ impl SchedulerCoordinator { /// its pre-SuperTick state. pub fn super_tick( runtime: &mut WorldlineRuntime, + provenance: &mut ProvenanceService, engine: &mut Engine, ) -> Result, RuntimeError> { let next_global_tick = runtime @@ -345,6 +351,7 @@ impl SchedulerCoordinator { } let runtime_before = runtime.clone(); + let provenance_before = provenance.clone(); for key in &keys { let admitted = runtime @@ -357,49 +364,104 @@ impl SchedulerCoordinator { continue; } - let outcome = catch_unwind(AssertUnwindSafe(|| { - let frontier = runtime + let outcome = catch_unwind(AssertUnwindSafe(|| -> Result { + let worldline_tick = runtime .worldlines - .frontier_mut(&key.worldline_id) - .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))?; - engine - .commit_with_state(frontier.state_mut(), &admitted) - .map_err(RuntimeError::from) + .get(&key.worldline_id) + .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))? + .frontier_tick(); + let parents = provenance.tip_ref(key.worldline_id)?.into_iter().collect(); + + let CommitOutcome { + snapshot, + patch, + receipt: _, + } = { + let frontier = runtime + .worldlines + .frontier_mut(&key.worldline_id) + .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))?; + engine + .commit_with_state(frontier.state_mut(), &admitted) + .map_err(RuntimeError::from)? + }; + + let (state_root, frontier_tick_after) = { + let frontier = runtime + .worldlines + .frontier_mut(&key.worldline_id) + .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))?; + let outputs = frontier + .state() + .last_materialization() + .iter() + .map(|channel| (channel.channel, channel.data.clone())) + .collect(); + let worldline_patch = crate::worldline::WorldlineTickPatchV1 { + header: crate::worldline::WorldlineTickHeaderV1 { + global_tick: next_global_tick, + policy_id: patch.policy_id(), + rule_pack_id: patch.rule_pack_id(), + plan_digest: snapshot.plan_digest, + decision_digest: snapshot.decision_digest, + rewrites_digest: snapshot.rewrites_digest, + }, + warp_id: snapshot.root.warp_id, + ops: patch.ops().to_vec(), + in_slots: patch.in_slots().to_vec(), + out_slots: patch.out_slots().to_vec(), + patch_digest: patch.digest(), + }; + let entry = ProvenanceEntry::local_commit( + key.worldline_id, + worldline_tick, + next_global_tick, + *key, + parents, + crate::worldline::HashTriplet { + state_root: snapshot.state_root, + patch_digest: snapshot.patch_digest, + commit_hash: snapshot.hash, + }, + worldline_patch, + outputs, + Vec::new(), + ); + provenance.append_local_commit(entry)?; + frontier.state_mut().record_committed_ingress( + *key, + admitted.iter().map(IngressEnvelope::ingress_id), + ); + let frontier_tick_after = frontier + .advance_tick() + .ok_or(RuntimeError::FrontierTickOverflow(key.worldline_id))?; + (snapshot.state_root, frontier_tick_after) + }; + + Ok(StepRecord { + head_key: *key, + admitted_count: admitted.len(), + frontier_tick_after, + state_root, + commit_hash: snapshot.hash, + }) })); - let CommitOutcome { snapshot, .. } = match outcome { - Ok(Ok(outcome)) => outcome, + let record = match outcome { + Ok(Ok(record)) => record, Ok(Err(err)) => { *runtime = runtime_before; + *provenance = provenance_before; return Err(err); } Err(payload) => { *runtime = runtime_before; + *provenance = provenance_before; resume_unwind(payload); } }; - let frontier_tick_after = { - let frontier = runtime - .worldlines - .frontier_mut(&key.worldline_id) - .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))?; - frontier.state_mut().record_committed_ingress( - *key, - admitted.iter().map(IngressEnvelope::ingress_id), - ); - frontier - .advance_tick() - .ok_or(RuntimeError::FrontierTickOverflow(key.worldline_id))? - }; - - records.push(StepRecord { - head_key: *key, - admitted_count: admitted.len(), - frontier_tick_after, - state_root: snapshot.state_root, - commit_hash: snapshot.hash, - }); + records.push(record); } runtime.global_tick = next_global_tick; @@ -479,6 +541,16 @@ mod tests { .unwrap() } + fn mirrored_provenance(runtime: &WorldlineRuntime) -> ProvenanceService { + let mut provenance = ProvenanceService::new(); + for (worldline_id, frontier) in runtime.worldlines().iter() { + provenance + .register_worldline(*worldline_id, frontier.state()) + .unwrap(); + } + provenance + } + fn runtime_marker_matches(view: GraphView<'_>, scope: &NodeId) -> bool { matches!( view.node_attachment(scope), @@ -686,7 +758,9 @@ mod tests { } ); - let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); assert_eq!( @@ -838,7 +912,9 @@ mod tests { .unwrap(); let expected_order = SchedulerCoordinator::peek_order(&runtime); - let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!( records @@ -848,6 +924,15 @@ mod tests { expected_order ); assert!(records.iter().all(|record| record.admitted_count == 1)); + assert_eq!(provenance.len(worldline_id).unwrap(), 2); + assert_eq!( + provenance.entry(worldline_id, 0).unwrap().head_key, + Some(first) + ); + assert_eq!( + provenance.entry(worldline_id, 1).unwrap().head_key, + Some(second) + ); } #[test] @@ -897,8 +982,20 @@ mod tests { runtime.ingest(env_a.clone()).unwrap(); runtime.ingest(env_b.clone()).unwrap(); - let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 2); + assert_eq!(provenance.len(worldline_a).unwrap(), 1); + assert_eq!(provenance.len(worldline_b).unwrap(), 1); + assert_eq!( + provenance.entry(worldline_a, 0).unwrap().head_key, + Some(head_a) + ); + assert_eq!( + provenance.entry(worldline_b, 0).unwrap().head_key, + Some(head_b) + ); assert_eq!( runtime .worldlines @@ -958,8 +1055,11 @@ mod tests { InboxPolicy::AcceptAll, ); - let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert!(records.is_empty()); + assert_eq!(provenance.len(worldline_id).unwrap(), 0); assert_eq!( runtime .worldlines @@ -1000,7 +1100,9 @@ mod tests { .unwrap() .frontier_tick = u64::MAX; - let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + let mut provenance = mirrored_provenance(&runtime); + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine) + .unwrap_err(); assert!(matches!(err, RuntimeError::FrontierTickOverflow(id) if id == worldline_id)); assert_eq!( runtime @@ -1064,7 +1166,9 @@ mod tests { runtime.ingest(envelope.clone()).unwrap(); runtime.global_tick = u64::MAX; - let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + let mut provenance = mirrored_provenance(&runtime); + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine) + .unwrap_err(); assert!(matches!(err, RuntimeError::GlobalTickOverflow)); assert_eq!( runtime @@ -1146,7 +1250,9 @@ mod tests { assert!(frontier.state.warp_state.delete_instance(&broken_root)); } - let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + let mut provenance = mirrored_provenance(&runtime); + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine) + .unwrap_err(); assert!(matches!( err, RuntimeError::Engine(EngineError::UnknownWarp(warp_id)) @@ -1200,6 +1306,8 @@ mod tests { .is_none(), "rollback must discard earlier runtime ingress materialization" ); + assert_eq!(provenance.len(worldline_a).unwrap(), 0); + assert_eq!(provenance.len(worldline_b).unwrap(), 0); assert!( runtime .worldlines @@ -1270,9 +1378,10 @@ mod tests { let env_a_ingress_id = env_a.ingress_id(); runtime.ingest(env_a).unwrap(); runtime.ingest(env_b).unwrap(); + let mut provenance = mirrored_provenance(&runtime); let panic_result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - let _ = SchedulerCoordinator::super_tick(&mut runtime, &mut engine); + let _ = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine); })); let Err(payload) = panic_result else { unreachable!("later head panic should resume through coordinator"); @@ -1314,6 +1423,8 @@ mod tests { .is_none(), "panic rollback must discard earlier runtime ingress materialization" ); + assert_eq!(provenance.len(worldline_a).unwrap(), 0); + assert_eq!(provenance.len(worldline_b).unwrap(), 0); } #[test] @@ -1344,9 +1455,12 @@ mod tests { .unwrap(); } - let first = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let first = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(first.len(), 1); assert_eq!(first[0].admitted_count, 2); + assert_eq!(provenance.len(worldline_id).unwrap(), 1); assert_eq!( runtime .heads @@ -1357,9 +1471,11 @@ mod tests { 1 ); - let second = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let second = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(second.len(), 1); assert_eq!(second[0].admitted_count, 1); + assert_eq!(provenance.len(worldline_id).unwrap(), 2); assert!(runtime.heads.get(&budget_key).unwrap().inbox().is_empty()); } @@ -1387,7 +1503,9 @@ mod tests { ); runtime.ingest(envelope.clone()).unwrap(); - let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); let store = runtime_store(&runtime, worldline_id); @@ -1446,7 +1564,9 @@ mod tests { .unwrap() .frontier_tick = u64::MAX; - let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + let mut provenance = mirrored_provenance(&runtime); + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine) + .unwrap_err(); assert!(matches!(err, RuntimeError::FrontierTickOverflow(id) if id == worldline_id)); } @@ -1468,7 +1588,9 @@ mod tests { ); runtime.global_tick = u64::MAX; - let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + let mut provenance = mirrored_provenance(&runtime); + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine) + .unwrap_err(); assert!(matches!(err, RuntimeError::GlobalTickOverflow)); } } diff --git a/crates/warp-core/src/lib.rs b/crates/warp-core/src/lib.rs index 4a15c4da..abdc71c1 100644 --- a/crates/warp-core/src/lib.rs +++ b/crates/warp-core/src/lib.rs @@ -191,7 +191,11 @@ pub use playback::{ pub use playback::{SessionId, ViewSession}; // --- Truth delivery --- pub use playback::{CursorReceipt, TruthFrame, TruthSink}; -pub use provenance_store::{CheckpointRef, HistoryError, LocalProvenanceStore, ProvenanceStore}; +pub use provenance_store::{ + BoundaryTransitionRecord, BtrError, BtrPayload, CheckpointRef, HistoryError, + LocalProvenanceStore, ProvenanceEntry, ProvenanceEventKind, ProvenanceRef, ProvenanceService, + ProvenanceStore, +}; pub use receipt::{TickReceipt, TickReceiptDisposition, TickReceiptEntry, TickReceiptRejection}; pub use record::{EdgeRecord, NodeRecord}; pub use rule::{ConflictPolicy, ExecuteFn, MatchFn, PatternGraph, RewriteRule}; diff --git a/crates/warp-core/src/materialization/reduce_op.rs b/crates/warp-core/src/materialization/reduce_op.rs index 93c8aeed..027dde79 100644 --- a/crates/warp-core/src/materialization/reduce_op.rs +++ b/crates/warp-core/src/materialization/reduce_op.rs @@ -230,13 +230,13 @@ mod tests { #[test] fn empty_input_others_return_empty() { let empty: Vec> = vec![]; - assert_eq!(ReduceOp::Max.apply(empty.clone()), vec![]); - assert_eq!(ReduceOp::Min.apply(empty.clone()), vec![]); - assert_eq!(ReduceOp::First.apply(empty.clone()), vec![]); - assert_eq!(ReduceOp::Last.apply(empty.clone()), vec![]); - assert_eq!(ReduceOp::BitOr.apply(empty.clone()), vec![]); - assert_eq!(ReduceOp::BitAnd.apply(empty.clone()), vec![]); - assert_eq!(ReduceOp::Concat.apply(empty), vec![]); + assert_eq!(ReduceOp::Max.apply(empty.clone()), Vec::::new()); + assert_eq!(ReduceOp::Min.apply(empty.clone()), Vec::::new()); + assert_eq!(ReduceOp::First.apply(empty.clone()), Vec::::new()); + assert_eq!(ReduceOp::Last.apply(empty.clone()), Vec::::new()); + assert_eq!(ReduceOp::BitOr.apply(empty.clone()), Vec::::new()); + assert_eq!(ReduceOp::BitAnd.apply(empty.clone()), Vec::::new()); + assert_eq!(ReduceOp::Concat.apply(empty), Vec::::new()); } // ─── SUM ─────────────────────────────────────────────────────────── diff --git a/crates/warp-core/src/playback.rs b/crates/warp-core/src/playback.rs index c7cd5358..41e71109 100644 --- a/crates/warp-core/src/playback.rs +++ b/crates/warp-core/src/playback.rs @@ -411,14 +411,6 @@ impl PlaybackCursor { /// recorded patches. This ensures deterministic replay regardless of /// rule changes or execution order. /// - /// # Limitations - /// - /// This method assumes **strictly linear single-parent history**. It constructs - /// the parent chain as `parents = vec![prev_commit_hash]` for each tick. If the - /// recorded history contains merge commits (multiple parents), the recomputed - /// `commit_hash` will not match the recorded value and verification will fail - /// with [`SeekError::CommitHashMismatch`]. Supporting multi-parent replay requires - /// extending `ProvenanceStore` to expose parent vectors per tick. pub fn seek_to( &mut self, target: u64, @@ -459,29 +451,22 @@ impl PlaybackCursor { self.tick }; - // Establish parent commit_hash chain for Merkle verification. - // For forward seeks (start_tick > 0), the parent is the prior tick's commit_hash. - // For rebuilds from initial state, the first tick has no parents. - let mut parents: Vec = if start_tick > 0 { - let prev = provenance - .expected(self.worldline_id, start_tick - 1) - .map_err(|_| SeekError::HistoryUnavailable { tick: start_tick })?; - vec![prev.commit_hash] - } else { - Vec::new() - }; - // Apply patches from start_tick to target // If start_tick = 2 and target = 5, we apply patches 2, 3, 4 for patch_tick in start_tick..target { - // Get patch and expected hash triplet - let patch = provenance - .patch(self.worldline_id, patch_tick) - .map_err(|_| SeekError::HistoryUnavailable { tick: patch_tick })?; - - let expected = provenance - .expected(self.worldline_id, patch_tick) + let entry = provenance + .entry(self.worldline_id, patch_tick) .map_err(|_| SeekError::HistoryUnavailable { tick: patch_tick })?; + let patch = entry + .patch + .ok_or(SeekError::HistoryUnavailable { tick: patch_tick })?; + let expected = entry.expected; + let parents = provenance + .parents(self.worldline_id, patch_tick) + .map_err(|_| SeekError::HistoryUnavailable { tick: patch_tick })? + .into_iter() + .map(|parent| parent.commit_hash) + .collect::>(); // Apply the patch to our store patch @@ -515,9 +500,6 @@ impl PlaybackCursor { if computed_commit_hash != expected.commit_hash { return Err(SeekError::CommitHashMismatch { tick: patch_tick }); } - - // Advance parent chain for next tick - parents = vec![expected.commit_hash]; } // Update cursor position @@ -584,7 +566,8 @@ impl PlaybackCursor { self.mode = PlaybackMode::Paused; Ok(StepResult::Advanced) } else { - // Writers advance via provenance.append(), not cursor stepping. + // Writers advance via provenance append on the runtime side, + // not through cursor stepping. self.mode = PlaybackMode::Paused; Ok(StepResult::NoOp) } @@ -719,8 +702,7 @@ impl ViewSession { // patches 0..N-1 have been applied, the current state corresponds to index N-1. let prov_tick = cursor.tick - 1; - // Get expected hashes for commit_hash - let expected = provenance.expected(cursor.worldline_id, prov_tick)?; + let entry = provenance.entry(cursor.worldline_id, prov_tick)?; // Build receipt let receipt = CursorReceipt { @@ -729,17 +711,15 @@ impl ViewSession { worldline_id: cursor.worldline_id, warp_id: cursor.warp_id, tick: cursor.tick, - commit_hash: expected.commit_hash, + commit_hash: entry.expected.commit_hash, }; // Publish receipt sink.publish_receipt(self.session_id, receipt); // Get recorded outputs for this tick - let outputs = provenance.outputs(cursor.worldline_id, prov_tick)?; - // Publish frames for subscribed channels only - for (channel, value) in outputs { + for (channel, value) in entry.outputs { if self.subscriptions.contains(&channel) { let value_hash = compute_value_hash(&value); sink.publish_frame( diff --git a/crates/warp-core/src/provenance_store.rs b/crates/warp-core/src/provenance_store.rs index 55f6a3e1..a9337283 100644 --- a/crates/warp-core/src/provenance_store.rs +++ b/crates/warp-core/src/provenance_store.rs @@ -1,18 +1,19 @@ // SPDX-License-Identifier: Apache-2.0 // © James Ross Ω FLYING•ROBOTS -//! Provenance store trait and implementations for SPEC-0004. +//! Entry-oriented provenance store and BTR helpers for SPEC-0004. //! -//! The provenance store provides the historical data needed for worldline replay: -//! patches, expected hashes, recorded outputs, and checkpoints. This module defines -//! the trait interface (seam for future wormhole integration) and a simple in-memory -//! implementation for local use. +//! Phase 4 replaces the old aligned side arrays with a single entry model that +//! is structurally ready for DAG parents, richer event kinds, and deterministic +//! export packaging. //! //! # Key Types //! -//! - [`ProvenanceStore`]: Trait defining the provenance data access interface. -//! - [`LocalProvenanceStore`]: In-memory Vec-backed implementation. +//! - [`ProvenanceStore`]: Trait defining the authoritative provenance access API. +//! - [`LocalProvenanceStore`]: In-memory entry-backed implementation. +//! - [`ProvenanceService`]: Standalone multi-worldline provenance subsystem. +//! - [`ProvenanceEntry`]: Single source of truth for one recorded provenance step. +//! - [`BoundaryTransitionRecord`]: Deterministic contiguous provenance segment. //! - [`HistoryError`]: Error type for history access failures. -//! - [`CheckpointRef`]: Reference to a checkpoint for fast seek. //! //! # `U0Ref` = `WarpId` //! @@ -20,8 +21,6 @@ //! simply the `WarpId`. The engine's `initial_state` for a warp serves as the U0 //! starting point for replay. -// The crate uses u64 ticks but Vec lengths are usize; on 64-bit platforms these -// are the same size, and we don't support 32-bit targets for this crate. #![allow(clippy::cast_possible_truncation)] use std::collections::BTreeMap; @@ -29,8 +28,10 @@ use std::collections::BTreeMap; use thiserror::Error; use crate::graph::GraphStore; +use crate::head::WriterHeadKey; use crate::ident::{Hash, WarpId}; -use crate::snapshot::compute_state_root_for_warp_store; +use crate::snapshot::{compute_state_root_for_warp_state, compute_state_root_for_warp_store}; +use crate::worldline_state::WorldlineState; use super::worldline::{ AtomWrite, AtomWriteSet, HashTriplet, OutputFrameSet, WorldlineId, WorldlineTickPatchV1, @@ -40,9 +41,6 @@ use super::worldline::{ #[derive(Debug, Clone, PartialEq, Eq, Error)] pub enum HistoryError { /// The requested tick is not available in the store. - /// - /// This can occur when seeking beyond recorded history or when - /// retention policy has pruned older ticks. #[error("history unavailable for tick {tick}")] HistoryUnavailable { /// The tick that was requested but not found. @@ -58,9 +56,6 @@ pub enum HistoryError { WorldlineAlreadyExists(WorldlineId), /// The provided tick does not match the expected next tick (append-only invariant). - /// - /// This occurs when attempting to append a tick that would create a gap or - /// overlap in the history sequence. #[error("tick gap: expected tick {expected}, got {got}")] TickGap { /// The tick that was expected (current history length). @@ -68,17 +63,118 @@ pub enum HistoryError { /// The tick that was provided. got: u64, }, + + /// The entry worldline does not match the destination worldline. + #[error("entry worldline mismatch: expected {expected:?}, got {got:?}")] + EntryWorldlineMismatch { + /// The registered worldline that was being appended. + expected: WorldlineId, + /// The worldline encoded in the entry. + got: WorldlineId, + }, + + /// A local commit entry must carry the committing writer head. + #[error("local commit missing head attribution for tick {tick}")] + LocalCommitMissingHeadKey { + /// The entry tick. + tick: u64, + }, + + /// A local commit entry must carry a replay patch. + #[error("local commit missing patch for tick {tick}")] + LocalCommitMissingPatch { + /// The entry tick. + tick: u64, + }, + + /// The local commit head must belong to the same worldline as the entry. + #[error("local commit head/worldline mismatch: entry {entry_worldline:?}, head {head_key:?}")] + HeadWorldlineMismatch { + /// Worldline encoded in the entry. + entry_worldline: WorldlineId, + /// Head key carried by the entry. + head_key: WriterHeadKey, + }, + + /// Parent references must already be stored in canonical commit-hash order. + #[error("parent refs must be in canonical commit-hash order at tick {tick}")] + NonCanonicalParents { + /// The entry tick whose parent refs were non-canonical. + tick: u64, + }, +} + +/// Errors that can occur when constructing or validating a BTR. +#[derive(Debug, Clone, PartialEq, Eq, Error)] +pub enum BtrError { + /// Wrapped history lookup failure. + #[error(transparent)] + History(#[from] HistoryError), + + /// A BTR must carry at least one provenance entry. + #[error("BTR payload cannot be empty")] + EmptyPayload, + + /// The record worldline must match the payload worldline. + #[error("BTR worldline mismatch: expected {expected:?}, got {got:?}")] + WorldlineMismatch { + /// Worldline claimed by the record. + expected: WorldlineId, + /// Worldline found in payload content. + got: WorldlineId, + }, + + /// A payload entry belonged to a different worldline. + #[error("BTR payload mixed worldlines: expected {expected:?}, got {got:?}")] + MixedWorldline { + /// The payload worldline that all entries must match. + expected: WorldlineId, + /// The mismatching entry worldline. + got: WorldlineId, + }, + + /// Payload ticks must form one contiguous run. + #[error("BTR payload is not contiguous: expected tick {expected}, got {got}")] + NonContiguousTicks { + /// The next expected tick. + expected: u64, + /// The observed tick. + got: u64, + }, + + /// The record worldline was not registered in the provenance service. + #[error("BTR references unknown worldline: {0:?}")] + UnknownWorldline(WorldlineId), + + /// The record `u0_ref` does not match the registered worldline. + #[error("BTR u0_ref mismatch: expected {expected:?}, got {got:?}")] + U0RefMismatch { + /// Registered value. + expected: WarpId, + /// Value carried by the BTR. + got: WarpId, + }, + + /// The input boundary hash does not match the worldline prefix before the payload. + #[error("BTR input boundary hash mismatch")] + InputBoundaryHashMismatch { + /// Expected deterministic input boundary. + expected: Hash, + /// Value carried by the BTR. + got: Hash, + }, + + /// The output boundary hash does not match the payload tip. + #[error("BTR output boundary hash mismatch")] + OutputBoundaryHashMismatch { + /// Expected deterministic output boundary. + expected: Hash, + /// Value carried by the BTR. + got: Hash, + }, } /// Reference to a checkpoint within the provenance store. -/// -/// Checkpoints enable fast seeking by providing a known-good state snapshot -/// at a specific tick. Instead of replaying from U0, cursors can replay -/// from the nearest checkpoint before the target tick. -/// -/// This type is only meaningful within the provenance/checkpoint subsystem. -/// It is created via [`LocalProvenanceStore::add_checkpoint`] and consumed -/// by [`ProvenanceStore::checkpoint_before`] during cursor seek operations. #[derive(Clone, Copy, PartialEq, Eq, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct CheckpointRef { @@ -88,79 +184,249 @@ pub struct CheckpointRef { pub state_hash: Hash, } -/// Trait for accessing worldline provenance data. -/// -/// This trait defines the seam for provenance data access, allowing different -/// backing stores (local memory, disk, wormhole network) to provide the same -/// interface for cursor replay operations. -/// -/// # Thread Safety -/// -/// Implementations should be thread-safe (`Send + Sync`) to allow concurrent -/// cursor access from multiple sessions. -/// -/// # `U0Ref` = `WarpId` -/// -/// The `u0` method returns a `WarpId` which serves as a handle to the engine's -/// `initial_state` for the warp. This is the MVP approach; future versions may -/// return a richer checkpoint reference. -pub trait ProvenanceStore: Send + Sync { - /// Returns the U0 reference (initial state handle) for a worldline. - /// - /// For MVP, this is the `WarpId` that can be used to retrieve the initial - /// state from the engine. +/// Reference to a parent provenance commit. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct ProvenanceRef { + /// Parent worldline. + pub worldline_id: WorldlineId, + /// Parent tick identity. + pub worldline_tick: u64, + /// Parent commit hash. + pub commit_hash: Hash, +} + +/// Event kind recorded by a provenance entry. +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum ProvenanceEventKind { + /// A local writer-head commit produced by the live runtime. + LocalCommit, + /// Placeholder for a future cross-worldline message delivery. + CrossWorldlineMessage { + /// Source worldline. + source_worldline: WorldlineId, + /// Source tick. + source_tick: u64, + /// Stable message id. + message_id: Hash, + }, + /// Placeholder for a future merge/import event. + MergeImport { + /// Source worldline. + source_worldline: WorldlineId, + /// Source tick. + source_tick: u64, + /// Stable imported op id. + op_id: Hash, + }, + /// Placeholder for a future conflict artifact. + ConflictArtifact { + /// Stable conflict artifact id. + artifact_id: Hash, + }, +} + +/// Single authoritative provenance record for one worldline step. +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct ProvenanceEntry { + /// Worldline that owns this entry. + pub worldline_id: WorldlineId, + /// Append identity within the worldline (0-based). + pub worldline_tick: u64, + /// Correlation metadata from the runtime SuperTick. + pub global_tick: u64, + /// Writer head that produced this entry, when applicable. + pub head_key: Option, + /// Explicit parent refs in canonical stored order. + pub parents: Vec, + /// Recorded event kind. + pub event_kind: ProvenanceEventKind, + /// Recorded state/patch/commit commitments. + pub expected: HashTriplet, + /// Replay patch for this entry, when applicable. + pub patch: Option, + /// Recorded materialization outputs. + pub outputs: OutputFrameSet, + /// Recorded atom-write provenance. + pub atom_writes: AtomWriteSet, +} + +impl ProvenanceEntry { + /// Returns the commit reference for this entry. + #[must_use] + pub fn as_ref(&self) -> ProvenanceRef { + ProvenanceRef { + worldline_id: self.worldline_id, + worldline_tick: self.worldline_tick, + commit_hash: self.expected.commit_hash, + } + } + + /// Constructs a local commit provenance entry. + #[must_use] + #[allow(clippy::too_many_arguments)] + pub fn local_commit( + worldline_id: WorldlineId, + worldline_tick: u64, + global_tick: u64, + head_key: WriterHeadKey, + parents: Vec, + expected: HashTriplet, + patch: WorldlineTickPatchV1, + outputs: OutputFrameSet, + atom_writes: AtomWriteSet, + ) -> Self { + Self { + worldline_id, + worldline_tick, + global_tick, + head_key: Some(head_key), + parents, + event_kind: ProvenanceEventKind::LocalCommit, + expected, + patch: Some(patch), + outputs, + atom_writes, + } + } +} + +/// Single-worldline contiguous provenance payload. +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct BtrPayload { + /// Worldline represented by this payload. + pub worldline_id: WorldlineId, + /// First worldline tick included in `entries`. + pub start_tick: u64, + /// Contiguous entries in append order. + pub entries: Vec, +} + +impl BtrPayload { + /// Returns the exclusive end tick for the payload. + #[must_use] + pub fn end_tick_exclusive(&self) -> u64 { + self.start_tick + self.entries.len() as u64 + } + + /// Validates structural payload invariants. /// /// # Errors /// - /// Returns [`HistoryError::WorldlineNotFound`] if the worldline doesn't exist. - fn u0(&self, w: WorldlineId) -> Result; + /// Returns [`BtrError`] if the payload is empty, mixes worldlines, or is + /// not contiguous by `worldline_tick`. + pub fn validate(&self) -> Result<(), BtrError> { + let Some(first) = self.entries.first() else { + return Err(BtrError::EmptyPayload); + }; + if first.worldline_id != self.worldline_id { + return Err(BtrError::MixedWorldline { + expected: self.worldline_id, + got: first.worldline_id, + }); + } + if first.worldline_tick != self.start_tick { + return Err(BtrError::NonContiguousTicks { + expected: self.start_tick, + got: first.worldline_tick, + }); + } - /// Returns the number of recorded ticks for a worldline. - /// - /// This is the length of the patch history, not the current tick number - /// (which may be `len() - 1` if 0-indexed). + let mut expected_tick = self.start_tick; + for entry in &self.entries { + if entry.worldline_id != self.worldline_id { + return Err(BtrError::MixedWorldline { + expected: self.worldline_id, + got: entry.worldline_id, + }); + } + if entry.worldline_tick != expected_tick { + return Err(BtrError::NonContiguousTicks { + expected: expected_tick, + got: entry.worldline_tick, + }); + } + expected_tick += 1; + } + + Ok(()) + } +} + +/// Boundary Transition Record (BTR) for a contiguous provenance segment. +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct BoundaryTransitionRecord { + /// Worldline carried by this record. + pub worldline_id: WorldlineId, + /// Initial worldline state handle. + pub u0_ref: WarpId, + /// State boundary hash before the payload begins. + pub input_boundary_hash: Hash, + /// State boundary hash after the payload ends. + pub output_boundary_hash: Hash, + /// Contiguous payload entries. + pub payload: BtrPayload, + /// Deterministic monotone counter. No wall-clock semantics. + pub logical_counter: u64, + /// Opaque auth payload reserved for later phases. + pub auth_tag: Vec, +} + +impl BoundaryTransitionRecord { + /// Validates self-contained BTR invariants. /// /// # Errors /// - /// Returns [`HistoryError::WorldlineNotFound`] if the worldline doesn't exist. + /// Returns [`BtrError`] if the payload is malformed, worldline ids disagree, + /// or the output boundary does not match the payload tip. + pub fn validate(&self) -> Result<(), BtrError> { + self.payload.validate()?; + if self.payload.worldline_id != self.worldline_id { + return Err(BtrError::WorldlineMismatch { + expected: self.worldline_id, + got: self.payload.worldline_id, + }); + } + let Some(last) = self.payload.entries.last() else { + return Err(BtrError::EmptyPayload); + }; + if self.output_boundary_hash != last.expected.state_root { + return Err(BtrError::OutputBoundaryHashMismatch { + expected: last.expected.state_root, + got: self.output_boundary_hash, + }); + } + Ok(()) + } +} + +/// Trait for accessing worldline provenance data. +pub trait ProvenanceStore: Send + Sync { + /// Returns the U0 reference (initial state handle) for a worldline. + fn u0(&self, w: WorldlineId) -> Result; + + /// Returns the number of recorded ticks for a worldline. fn len(&self, w: WorldlineId) -> Result; - /// Returns the patch for a specific tick. - /// - /// # Errors - /// - /// - [`HistoryError::WorldlineNotFound`] if the worldline doesn't exist. - /// - [`HistoryError::HistoryUnavailable`] if the tick is out of range or pruned. - fn patch(&self, w: WorldlineId, tick: u64) -> Result; + /// Returns the entry for a specific tick. + fn entry(&self, w: WorldlineId, tick: u64) -> Result; - /// Returns the expected hash triplet for verification at a specific tick. - /// - /// Cursors use this to verify their replayed state matches the recorded - /// state root, patch digest, and commit hash. - /// - /// # Errors - /// - /// - [`HistoryError::WorldlineNotFound`] if the worldline doesn't exist. - /// - [`HistoryError::HistoryUnavailable`] if the tick is out of range or pruned. - fn expected(&self, w: WorldlineId, tick: u64) -> Result; + /// Returns the stored parent refs for a specific tick. + fn parents(&self, w: WorldlineId, tick: u64) -> Result, HistoryError>; - /// Returns the recorded channel outputs for a specific tick. - /// - /// These are the materialization bus outputs that were emitted during - /// the original tick execution. Playback uses these for truth frame - /// delivery rather than re-executing rules. + /// Appends a local commit entry. /// /// # Errors /// - /// - [`HistoryError::WorldlineNotFound`] if the worldline doesn't exist. - /// - [`HistoryError::HistoryUnavailable`] if the tick is out of range or pruned. - fn outputs(&self, w: WorldlineId, tick: u64) -> Result; + /// Returns [`HistoryError`] if the worldline does not exist, the tick is not + /// append-only, or the entry violates local-commit invariants. + fn append_local_commit(&mut self, entry: ProvenanceEntry) -> Result<(), HistoryError>; /// Returns the nearest checkpoint before a given tick, if any. - /// - /// This enables fast seeking by starting replay from a checkpoint rather - /// than from U0. Returns `None` if no checkpoint exists before the given - /// tick, or if the worldline doesn't exist in the store. fn checkpoint_before(&self, w: WorldlineId, tick: u64) -> Option; /// Returns whether the worldline has any recorded history. @@ -173,36 +439,17 @@ pub trait ProvenanceStore: Send + Sync { } } -// Per-worldline history storage. #[derive(Debug, Clone)] struct WorldlineHistory { - // U0 reference (`WarpId` for MVP). u0_ref: WarpId, - // Patches in tick order. - patches: Vec, - // Expected hash triplets in tick order. - expected: Vec, - // Recorded outputs in tick order. - outputs: Vec, - // Atom writes in tick order (for provenance tracking). - atom_writes: Vec, - // Checkpoints for fast seeking. + initial_boundary_hash: Hash, + entries: Vec, checkpoints: Vec, } /// In-memory provenance store backed by `Vec`s. -/// -/// This is the simplest implementation suitable for testing and single-process -/// scenarios. For production use with large histories, consider a disk-backed -/// or network-backed implementation. -/// -/// # Invariant -/// -/// For each worldline: `patches.len() == expected.len() == outputs.len() == atom_writes.len()`. -/// This maintains index alignment so tick N's data is at index N. #[derive(Debug, Clone, Default)] pub struct LocalProvenanceStore { - /// Per-worldline history, keyed by worldline ID. worldlines: BTreeMap, } @@ -215,9 +462,10 @@ impl LocalProvenanceStore { /// Registers a new worldline with its U0 reference. /// - /// This must be called before appending any history for a worldline. - /// Re-registering with the same `u0_ref` is a no-op. Re-registering with - /// a different `u0_ref` returns an error to prevent integrity bugs. + /// This convenience helper uses a deterministic zero digest as the initial + /// boundary. [`ProvenanceService`] should prefer + /// [`Self::register_worldline_with_boundary`] so BTR construction can use the + /// real genesis boundary hash. /// /// # Errors /// @@ -227,23 +475,39 @@ impl LocalProvenanceStore { &mut self, id: WorldlineId, u0_ref: WarpId, + ) -> Result<(), HistoryError> { + self.register_worldline_with_boundary(id, u0_ref, crate::constants::digest_len0_u64()) + } + + /// Registers a new worldline with its U0 reference and initial boundary hash. + /// + /// # Errors + /// + /// Returns [`HistoryError::WorldlineAlreadyExists`] if the worldline is already + /// registered with a different configuration. + pub fn register_worldline_with_boundary( + &mut self, + id: WorldlineId, + u0_ref: WarpId, + initial_boundary_hash: Hash, ) -> Result<(), HistoryError> { use std::collections::btree_map::Entry; + match self.worldlines.entry(id) { Entry::Occupied(existing) => { - if existing.get().u0_ref != u0_ref { + let existing = existing.get(); + if existing.u0_ref != u0_ref + || existing.initial_boundary_hash != initial_boundary_hash + { return Err(HistoryError::WorldlineAlreadyExists(id)); } - // Same u0_ref: idempotent no-op Ok(()) } Entry::Vacant(vacant) => { vacant.insert(WorldlineHistory { u0_ref, - patches: Vec::new(), - expected: Vec::new(), - outputs: Vec::new(), - atom_writes: Vec::new(), + initial_boundary_hash, + entries: Vec::new(), checkpoints: Vec::new(), }); Ok(()) @@ -251,143 +515,74 @@ impl LocalProvenanceStore { } } - /// Appends a tick's data to a worldline's history. - /// - /// The tick number must equal the current length (append-only, no gaps). - /// This method stores an empty atom write set; use [`Self::append_with_writes`] - /// to include provenance metadata. - /// - /// # Errors - /// - /// - Returns [`HistoryError::WorldlineNotFound`] if the worldline hasn't been registered. - /// - Returns [`HistoryError::TickGap`] if the patch's `global_tick` doesn't equal the - /// current history length (the expected next tick). - pub fn append( - &mut self, - w: WorldlineId, - patch: WorldlineTickPatchV1, - expected: HashTriplet, - outputs: OutputFrameSet, - ) -> Result<(), HistoryError> { - self.append_with_writes(w, patch, expected, outputs, Vec::new()) + fn history(&self, w: WorldlineId) -> Result<&WorldlineHistory, HistoryError> { + self.worldlines + .get(&w) + .ok_or(HistoryError::WorldlineNotFound(w)) } - /// Appends a tick's data to a worldline's history, including atom write provenance. - /// - /// The tick number must equal the current length (append-only, no gaps). - /// - /// # Arguments - /// - /// * `w` - The worldline to append to - /// * `patch` - The tick patch data - /// * `expected` - The expected hash triplet for verification - /// * `outputs` - Channel outputs emitted during this tick - /// * `atom_writes` - Atom writes for provenance tracking (rule→atom attribution). - /// **Invariant**: Each entry's `tick` must equal `patch.global_tick()`, and each - /// `AtomWrite` must reference an atom whose slot appears in - /// `patch.out_slots` (either as `SlotId::Attachment(node_alpha(atom))` or - /// `SlotId::Node(atom)`). Writes to atoms not declared in `out_slots` will be - /// stored and retrievable via [`atom_writes()`](Self::atom_writes), but will be - /// invisible to [`atom_history()`](Self::atom_history) which uses `out_slots` as - /// its causal cone index. The engine enforces this structurally — atom writes are - /// derived from the same footprint that produces `out_slots`. - /// - /// # Errors - /// - /// - Returns [`HistoryError::WorldlineNotFound`] if the worldline hasn't been registered. - /// - Returns [`HistoryError::TickGap`] if the patch's `global_tick` doesn't equal the - /// current history length (the expected next tick). - pub fn append_with_writes( - &mut self, - w: WorldlineId, - patch: WorldlineTickPatchV1, - expected: HashTriplet, - outputs: OutputFrameSet, - atom_writes: AtomWriteSet, - ) -> Result<(), HistoryError> { - let history = self - .worldlines + fn history_mut(&mut self, w: WorldlineId) -> Result<&mut WorldlineHistory, HistoryError> { + self.worldlines .get_mut(&w) - .ok_or(HistoryError::WorldlineNotFound(w))?; + .ok_or(HistoryError::WorldlineNotFound(w)) + } - let expected_tick = history.patches.len() as u64; - let got_tick = patch.global_tick(); - if got_tick != expected_tick { + fn validate_local_commit_entry( + worldline_id: WorldlineId, + expected_tick: u64, + entry: &ProvenanceEntry, + ) -> Result<(), HistoryError> { + if entry.worldline_id != worldline_id { + return Err(HistoryError::EntryWorldlineMismatch { + expected: worldline_id, + got: entry.worldline_id, + }); + } + if entry.worldline_tick != expected_tick { return Err(HistoryError::TickGap { expected: expected_tick, - got: got_tick, + got: entry.worldline_tick, }); } - - // Debug-only: validate atom write invariants. Zero cost in release builds. - #[cfg(debug_assertions)] + let Some(head_key) = entry.head_key else { + return Err(HistoryError::LocalCommitMissingHeadKey { + tick: entry.worldline_tick, + }); + }; + if head_key.worldline_id != entry.worldline_id { + return Err(HistoryError::HeadWorldlineMismatch { + entry_worldline: entry.worldline_id, + head_key, + }); + } + if entry.patch.is_none() { + return Err(HistoryError::LocalCommitMissingPatch { + tick: entry.worldline_tick, + }); + } + if !entry + .parents + .windows(2) + .all(|pair| pair[0].commit_hash <= pair[1].commit_hash) { - for aw in &atom_writes { - // Each AtomWrite.tick must match the enclosing patch tick. - debug_assert_eq!( - aw.tick, got_tick, - "AtomWrite tick {} does not match enclosing patch tick {}", - aw.tick, got_tick, - ); - // Each AtomWrite must reference an atom declared in out_slots. - let att = crate::tick_patch::SlotId::Attachment( - crate::attachment::AttachmentKey::node_alpha(aw.atom), - ); - let node = crate::tick_patch::SlotId::Node(aw.atom); - debug_assert!( - patch.out_slots.contains(&att) || patch.out_slots.contains(&node), - "AtomWrite for {:?} at tick {} not declared in out_slots — \ - atom_history() will not find this write", - aw.atom, - got_tick, - ); - } + return Err(HistoryError::NonCanonicalParents { + tick: entry.worldline_tick, + }); } - - history.patches.push(patch); - history.expected.push(expected); - history.outputs.push(outputs); - history.atom_writes.push(atom_writes); Ok(()) } /// Returns the atom writes for a specific tick. /// - /// This enables the TTD "Show Me Why" provenance feature: tracing which - /// rules wrote which atoms during a tick. - /// /// # Errors /// - /// - [`HistoryError::WorldlineNotFound`] if the worldline doesn't exist. - /// - [`HistoryError::HistoryUnavailable`] if the tick is out of range or pruned. + /// Returns [`HistoryError`] if the worldline or tick is unavailable. pub fn atom_writes(&self, w: WorldlineId, tick: u64) -> Result { - let history = self - .worldlines - .get(&w) - .ok_or(HistoryError::WorldlineNotFound(w))?; - - // SAFETY: cast_possible_truncation — on 32-bit targets (WASM), tick values - // above usize::MAX would truncate and index the wrong element. Guard with a - // bounds check against the actual length before casting. - if tick >= history.atom_writes.len() as u64 { - return Err(HistoryError::HistoryUnavailable { tick }); - } - Ok(history.atom_writes[tick as usize].clone()) + Ok(self.entry(w, tick)?.atom_writes) } /// Returns the atom write history for a specific atom by walking its causal cone. /// - /// Walks backwards through the worldline's patch history, using the declared - /// `out_slots` (Paper III's `Out(μ)`) to filter which ticks' atom writes are - /// examined. Only ticks whose `out_slots` declare the atom's slot have their - /// writes collected. The walk terminates early when a creation write is found - /// (the atom's origin). - /// - /// This implements the derivation graph `D(v)` from Paper III (§3.2), restricted - /// to the target atom's slot dependencies. - /// - /// The returned writes are in tick order (oldest first). - /// /// # Errors /// /// Returns [`HistoryError::WorldlineNotFound`] if the worldline doesn't exist. @@ -396,49 +591,34 @@ impl LocalProvenanceStore { w: WorldlineId, atom: &crate::ident::NodeKey, ) -> Result, HistoryError> { - let history = self - .worldlines - .get(&w) - .ok_or(HistoryError::WorldlineNotFound(w))?; - - // The atom's attachment slot — this is what Out(μ) contains when a - // tick writes to an atom's payload (SetAttachment on the node's α plane). + let history = self.history(w)?; let attachment_slot = crate::tick_patch::SlotId::Attachment( crate::attachment::AttachmentKey::node_alpha(*atom), ); - // The node skeleton slot — Out(μ) contains this for UpsertNode/DeleteNode. let node_slot = crate::tick_patch::SlotId::Node(*atom); - // Walk backwards from tip, collecting writes in reverse chronological order. let mut writes_rev: Vec = Vec::new(); - let len = history.patches.len(); - for tick_idx in (0..len).rev() { - let patch = &history.patches[tick_idx]; + for entry in history.entries.iter().rev() { + let Some(patch) = entry.patch.as_ref() else { + continue; + }; - // Check Out(μ): did this tick produce the atom's slot? let touched = patch .out_slots .iter() - .any(|s| *s == attachment_slot || *s == node_slot); - + .any(|slot| *slot == attachment_slot || *slot == node_slot); if !touched { continue; } - // This tick wrote to the atom — collect matching AtomWrites. - // Iterate in reverse so the final writes_rev.reverse() preserves - // within-tick execution order (forward iteration would flip it). - if let Some(tick_writes) = history.atom_writes.get(tick_idx) { - for aw in tick_writes.iter().rev() { - if &aw.atom == atom { - let is_creation = aw.is_create(); - writes_rev.push(aw.clone()); - if is_creation { - // Reached the atom's origin — stop walking. - writes_rev.reverse(); - return Ok(writes_rev); - } + for aw in entry.atom_writes.iter().rev() { + if &aw.atom == atom { + let is_creation = aw.is_create(); + writes_rev.push(aw.clone()); + if is_creation { + writes_rev.reverse(); + return Ok(writes_rev); } } } @@ -450,8 +630,6 @@ impl LocalProvenanceStore { /// Records a checkpoint for a worldline. /// - /// Checkpoints are stored in tick order for efficient binary search. - /// /// # Errors /// /// Returns [`HistoryError::WorldlineNotFound`] if the worldline hasn't been registered. @@ -460,13 +638,7 @@ impl LocalProvenanceStore { w: WorldlineId, checkpoint: CheckpointRef, ) -> Result<(), HistoryError> { - let history = self - .worldlines - .get_mut(&w) - .ok_or(HistoryError::WorldlineNotFound(w))?; - - // Maintain sorted order by tick; replace if a checkpoint at this tick - // already exists (prevents duplicate ticks breaking "before" semantics). + let history = self.history_mut(w)?; match history .checkpoints .binary_search_by_key(&checkpoint.tick, |c| c.tick) @@ -479,10 +651,6 @@ impl LocalProvenanceStore { /// Creates a checkpoint at the given tick by computing the state hash. /// - /// This computes the canonical state hash for the given `GraphStore` and - /// records a checkpoint at the specified tick. The checkpoint enables fast - /// seeking during cursor replay. - /// /// # Errors /// /// Returns [`HistoryError::WorldlineNotFound`] if the worldline hasn't been registered. @@ -492,16 +660,9 @@ impl LocalProvenanceStore { tick: u64, state: &GraphStore, ) -> Result { - let history = self - .worldlines - .get_mut(&w) - .ok_or(HistoryError::WorldlineNotFound(w))?; - + let history = self.history_mut(w)?; let state_hash = compute_state_root_for_warp_store(state, history.u0_ref); let checkpoint_ref = CheckpointRef { tick, state_hash }; - - // Insert in sorted order by tick; replace existing checkpoint at this - // tick to prevent duplicates (same semantics as add_checkpoint). match history .checkpoints .binary_search_by_key(&checkpoint_ref.tick, |c| c.tick) @@ -509,22 +670,15 @@ impl LocalProvenanceStore { Ok(index) => history.checkpoints[index] = checkpoint_ref, Err(pos) => history.checkpoints.insert(pos, checkpoint_ref), } - Ok(checkpoint_ref) } /// Creates a new worldline that is a prefix-copy of the source up to `fork_tick`. /// - /// The new worldline shares the same U0 reference as the source and contains - /// copies of all history data (patches, expected hashes, outputs, atom writes, - /// and checkpoints) from tick 0 through `fork_tick` inclusive. - /// /// # Errors /// - /// - Returns [`HistoryError::WorldlineAlreadyExists`] if `new_id` is already registered. - /// - Returns [`HistoryError::WorldlineNotFound`] if the source worldline doesn't exist. - /// - Returns [`HistoryError::HistoryUnavailable`] if `fork_tick` is beyond the - /// available history in the source worldline. + /// Returns [`HistoryError`] if the source is missing, the target exists, or + /// `fork_tick` is out of range. pub fn fork( &mut self, source: WorldlineId, @@ -535,29 +689,17 @@ impl LocalProvenanceStore { return Err(HistoryError::WorldlineAlreadyExists(new_id)); } - let source_history = self - .worldlines - .get(&source) - .ok_or(HistoryError::WorldlineNotFound(source))?; - - // Validate fork_tick is within available history - let source_len = source_history.patches.len(); - // SAFETY: cast_possible_truncation — history length fits in u64 because Vec - // cannot exceed isize::MAX elements, and on 64-bit platforms usize == u64. + let source_history = self.history(source)?; + let source_len = source_history.entries.len(); if fork_tick >= source_len as u64 { return Err(HistoryError::HistoryUnavailable { tick: fork_tick }); } - // Copy prefix data up to and including fork_tick - // SAFETY: cast_possible_truncation — fork_tick < source_len (checked above), - // so fork_tick + 1 <= source_len <= usize::MAX; the cast back to usize is lossless. let end_idx = (fork_tick + 1) as usize; let new_history = WorldlineHistory { u0_ref: source_history.u0_ref, - patches: source_history.patches[..end_idx].to_vec(), - expected: source_history.expected[..end_idx].to_vec(), - outputs: source_history.outputs[..end_idx].to_vec(), - atom_writes: source_history.atom_writes[..end_idx].to_vec(), + initial_boundary_hash: source_history.initial_boundary_hash, + entries: source_history.entries[..end_idx].to_vec(), checkpoints: source_history .checkpoints .iter() @@ -565,75 +707,64 @@ impl LocalProvenanceStore { .copied() .collect(), }; - self.worldlines.insert(new_id, new_history); Ok(()) } + + /// Returns the initial boundary hash registered for this worldline. + /// + /// # Errors + /// + /// Returns [`HistoryError::WorldlineNotFound`] if the worldline hasn't been registered. + pub fn initial_boundary_hash(&self, w: WorldlineId) -> Result { + Ok(self.history(w)?.initial_boundary_hash) + } + + /// Returns the tip ref for a worldline, if any. + /// + /// # Errors + /// + /// Returns [`HistoryError::WorldlineNotFound`] if the worldline hasn't been registered. + pub fn tip_ref(&self, w: WorldlineId) -> Result, HistoryError> { + Ok(self.history(w)?.entries.last().map(ProvenanceEntry::as_ref)) + } } impl ProvenanceStore for LocalProvenanceStore { fn u0(&self, w: WorldlineId) -> Result { - self.worldlines - .get(&w) - .map(|h| h.u0_ref) - .ok_or(HistoryError::WorldlineNotFound(w)) + Ok(self.history(w)?.u0_ref) } fn len(&self, w: WorldlineId) -> Result { - self.worldlines - .get(&w) - .map(|h| h.patches.len() as u64) - .ok_or(HistoryError::WorldlineNotFound(w)) + Ok(self.history(w)?.entries.len() as u64) } - fn patch(&self, w: WorldlineId, tick: u64) -> Result { - let history = self - .worldlines - .get(&w) - .ok_or(HistoryError::WorldlineNotFound(w))?; - - history - .patches + fn entry(&self, w: WorldlineId, tick: u64) -> Result { + self.history(w)? + .entries .get(tick as usize) .cloned() .ok_or(HistoryError::HistoryUnavailable { tick }) } - fn expected(&self, w: WorldlineId, tick: u64) -> Result { - let history = self - .worldlines - .get(&w) - .ok_or(HistoryError::WorldlineNotFound(w))?; - - history - .expected - .get(tick as usize) - .copied() - .ok_or(HistoryError::HistoryUnavailable { tick }) + fn parents(&self, w: WorldlineId, tick: u64) -> Result, HistoryError> { + Ok(self.entry(w, tick)?.parents) } - fn outputs(&self, w: WorldlineId, tick: u64) -> Result { - let history = self - .worldlines - .get(&w) - .ok_or(HistoryError::WorldlineNotFound(w))?; - - history - .outputs - .get(tick as usize) - .cloned() - .ok_or(HistoryError::HistoryUnavailable { tick }) + fn append_local_commit(&mut self, entry: ProvenanceEntry) -> Result<(), HistoryError> { + let history = self.history_mut(entry.worldline_id)?; + let expected_tick = history.entries.len() as u64; + Self::validate_local_commit_entry(entry.worldline_id, expected_tick, &entry)?; + history.entries.push(entry); + Ok(()) } fn checkpoint_before(&self, w: WorldlineId, tick: u64) -> Option { let history = self.worldlines.get(&w)?; - - // Binary search for the largest checkpoint tick < target tick let pos = history .checkpoints .binary_search_by_key(&tick, |c| c.tick) .unwrap_or_else(|e| e); - if pos == 0 { None } else { @@ -642,24 +773,229 @@ impl ProvenanceStore for LocalProvenanceStore { } } -#[cfg(test)] -mod tests { - #![allow(clippy::unwrap_used)] - #![allow(clippy::expect_used)] - #![allow(clippy::cast_possible_truncation)] - #![allow(clippy::redundant_clone)] - - use super::*; - use crate::ident::WarpId; - use crate::worldline::WorldlineTickHeaderV1; +/// Standalone multi-worldline provenance subsystem. +#[derive(Debug, Clone, Default)] +pub struct ProvenanceService { + store: LocalProvenanceStore, +} - fn test_worldline_id() -> WorldlineId { - WorldlineId([1u8; 32]) +impl ProvenanceService { + /// Creates an empty provenance service. + #[must_use] + pub fn new() -> Self { + Self::default() } - fn test_warp_id() -> WarpId { - WarpId([2u8; 32]) - } + /// Registers a worldline using its deterministic replay base. + /// + /// # Errors + /// + /// Returns [`HistoryError::WorldlineAlreadyExists`] if the worldline is already + /// registered with different U0 or boundary metadata. + pub fn register_worldline( + &mut self, + worldline_id: WorldlineId, + state: &WorldlineState, + ) -> Result<(), HistoryError> { + let initial_boundary_hash = + compute_state_root_for_warp_state(state.initial_state(), state.root()); + self.store.register_worldline_with_boundary( + worldline_id, + state.root().warp_id, + initial_boundary_hash, + ) + } + + /// Returns the deterministic tip ref for a worldline, if any. + /// + /// # Errors + /// + /// Returns [`HistoryError::WorldlineNotFound`] if the worldline isn't registered. + pub fn tip_ref( + &self, + worldline_id: WorldlineId, + ) -> Result, HistoryError> { + self.store.tip_ref(worldline_id) + } + + /// Builds a contiguous BTR from the registered provenance history. + /// + /// # Errors + /// + /// Returns [`BtrError`] if the selected range is malformed or the worldline + /// is unknown. + pub fn build_btr( + &self, + worldline_id: WorldlineId, + start_tick: u64, + end_tick_exclusive: u64, + logical_counter: u64, + auth_tag: Vec, + ) -> Result { + let history_len = self.store.len(worldline_id)?; + if start_tick >= end_tick_exclusive || end_tick_exclusive > history_len { + return Err(BtrError::EmptyPayload); + } + + let entries = (start_tick..end_tick_exclusive) + .map(|tick| self.store.entry(worldline_id, tick)) + .collect::, _>>()?; + let payload = BtrPayload { + worldline_id, + start_tick, + entries, + }; + payload.validate()?; + + let input_boundary_hash = if start_tick == 0 { + self.store.initial_boundary_hash(worldline_id)? + } else { + self.store + .entry(worldline_id, start_tick - 1)? + .expected + .state_root + }; + let output_boundary_hash = payload + .entries + .last() + .ok_or(BtrError::EmptyPayload)? + .expected + .state_root; + let record = BoundaryTransitionRecord { + worldline_id, + u0_ref: self.store.u0(worldline_id)?, + input_boundary_hash, + output_boundary_hash, + payload, + logical_counter, + auth_tag, + }; + self.validate_btr(&record)?; + Ok(record) + } + + /// Validates a BTR against the registered provenance history. + /// + /// # Errors + /// + /// Returns [`BtrError`] if the record is structurally invalid or does not + /// match the registered worldline history. + pub fn validate_btr(&self, record: &BoundaryTransitionRecord) -> Result<(), BtrError> { + record.validate()?; + + let history = self + .store + .worldlines + .get(&record.worldline_id) + .ok_or(BtrError::UnknownWorldline(record.worldline_id))?; + if record.u0_ref != history.u0_ref { + return Err(BtrError::U0RefMismatch { + expected: history.u0_ref, + got: record.u0_ref, + }); + } + + let expected_input = if record.payload.start_tick == 0 { + history.initial_boundary_hash + } else { + self.store + .entry(record.worldline_id, record.payload.start_tick - 1)? + .expected + .state_root + }; + if record.input_boundary_hash != expected_input { + return Err(BtrError::InputBoundaryHashMismatch { + expected: expected_input, + got: record.input_boundary_hash, + }); + } + + let expected_output = record + .payload + .entries + .last() + .ok_or(BtrError::EmptyPayload)? + .expected + .state_root; + if record.output_boundary_hash != expected_output { + return Err(BtrError::OutputBoundaryHashMismatch { + expected: expected_output, + got: record.output_boundary_hash, + }); + } + + for entry in &record.payload.entries { + let stored = self + .store + .entry(record.worldline_id, entry.worldline_tick)?; + if &stored != entry { + return Err(BtrError::OutputBoundaryHashMismatch { + expected: stored.expected.state_root, + got: entry.expected.state_root, + }); + } + } + + Ok(()) + } +} + +impl ProvenanceStore for ProvenanceService { + fn u0(&self, w: WorldlineId) -> Result { + self.store.u0(w) + } + + fn len(&self, w: WorldlineId) -> Result { + self.store.len(w) + } + + fn entry(&self, w: WorldlineId, tick: u64) -> Result { + self.store.entry(w, tick) + } + + fn parents(&self, w: WorldlineId, tick: u64) -> Result, HistoryError> { + self.store.parents(w, tick) + } + + fn append_local_commit(&mut self, entry: ProvenanceEntry) -> Result<(), HistoryError> { + self.store.append_local_commit(entry) + } + + fn checkpoint_before(&self, w: WorldlineId, tick: u64) -> Option { + self.store.checkpoint_before(w, tick) + } +} + +#[cfg(test)] +mod tests { + #![allow(clippy::cast_possible_truncation)] + #![allow(clippy::expect_used)] + #![allow(clippy::redundant_clone)] + #![allow(clippy::unwrap_used)] + + use super::*; + use crate::attachment::AttachmentKey; + use crate::graph::GraphStore; + use crate::head::{make_head_id, WriterHeadKey}; + use crate::ident::{make_node_id, NodeKey, WarpId}; + use crate::materialization::make_channel_id; + use crate::tick_patch::SlotId; + use crate::worldline::{AtomWrite, WorldlineTickHeaderV1}; + + fn test_worldline_id() -> WorldlineId { + WorldlineId([1u8; 32]) + } + + fn test_warp_id() -> WarpId { + WarpId([2u8; 32]) + } + + fn test_head_key() -> WriterHeadKey { + WriterHeadKey { + worldline_id: test_worldline_id(), + head_id: make_head_id("default"), + } + } fn test_patch(tick: u64) -> WorldlineTickPatchV1 { WorldlineTickPatchV1 { @@ -687,6 +1023,98 @@ mod tests { } } + fn test_node_key() -> NodeKey { + NodeKey { + warp_id: test_warp_id(), + local_id: make_node_id("test-atom"), + } + } + + fn test_rule_id() -> [u8; 32] { + [42u8; 32] + } + + fn test_entry(tick: u64) -> ProvenanceEntry { + ProvenanceEntry::local_commit( + test_worldline_id(), + tick, + tick, + test_head_key(), + if tick == 0 { + Vec::new() + } else { + vec![ProvenanceRef { + worldline_id: test_worldline_id(), + worldline_tick: tick - 1, + commit_hash: test_triplet(tick - 1).commit_hash, + }] + }, + test_triplet(tick), + test_patch(tick), + vec![], + Vec::new(), + ) + } + + fn append_test_entry( + store: &mut LocalProvenanceStore, + worldline_id: WorldlineId, + patch: WorldlineTickPatchV1, + expected: HashTriplet, + outputs: OutputFrameSet, + atom_writes: AtomWriteSet, + ) { + let tick = patch.global_tick(); + let parents = store + .tip_ref(worldline_id) + .unwrap() + .into_iter() + .collect::>(); + let entry = ProvenanceEntry::local_commit( + worldline_id, + tick, + tick, + WriterHeadKey { + worldline_id, + head_id: make_head_id("fixture"), + }, + parents, + expected, + patch, + outputs, + atom_writes, + ); + store.append_local_commit(entry).unwrap(); + } + + fn test_patch_with_atom_slots(tick: u64, atoms: &[NodeKey]) -> WorldlineTickPatchV1 { + let mut patch = test_patch(tick); + for atom in atoms { + patch + .out_slots + .push(SlotId::Attachment(AttachmentKey::node_alpha(*atom))); + } + patch + } + + fn test_patch_with_atom_mutation(tick: u64, atoms: &[NodeKey]) -> WorldlineTickPatchV1 { + let mut patch = test_patch(tick); + for atom in atoms { + let slot = SlotId::Attachment(AttachmentKey::node_alpha(*atom)); + patch.in_slots.push(slot); + patch.out_slots.push(slot); + } + patch + } + + fn test_patch_with_node_slots(tick: u64, atoms: &[NodeKey]) -> WorldlineTickPatchV1 { + let mut patch = test_patch(tick); + for atom in atoms { + patch.out_slots.push(SlotId::Node(*atom)); + } + patch + } + #[test] fn worldline_not_found() { let store = LocalProvenanceStore::new(); @@ -708,38 +1136,49 @@ mod tests { } #[test] - fn append_and_query() { + fn append_and_query_entry_api() { let mut store = LocalProvenanceStore::new(); let w = test_worldline_id(); - let warp = test_warp_id(); + store.register_worldline(w, test_warp_id()).unwrap(); - store.register_worldline(w, warp).unwrap(); + let entry = test_entry(0); + store.append_local_commit(entry.clone()).unwrap(); - let patch = test_patch(0); - let triplet = test_triplet(0); - let outputs = vec![]; + assert_eq!(store.len(w).unwrap(), 1); + assert_eq!(store.entry(w, 0).unwrap(), entry); + assert!(store.parents(w, 0).unwrap().is_empty()); + } - store.append(w, patch, triplet, outputs.clone()).unwrap(); + #[test] + fn entry_round_trips_patch_expected_outputs() { + let mut store = LocalProvenanceStore::new(); + let w = test_worldline_id(); + let outputs = vec![(make_channel_id("test:ok"), b"ok".to_vec())]; - assert_eq!(store.len(w).unwrap(), 1); - assert!(!store.is_empty(w).unwrap()); - assert_eq!(store.patch(w, 0).unwrap().global_tick(), 0); - assert_eq!(store.expected(w, 0).unwrap(), triplet); - assert_eq!(store.outputs(w, 0).unwrap(), outputs); + store.register_worldline(w, test_warp_id()).unwrap(); + append_test_entry( + &mut store, + w, + test_patch(0), + test_triplet(0), + outputs.clone(), + Vec::new(), + ); + + let entry = store.entry(w, 0).unwrap(); + assert_eq!(entry.patch.unwrap().global_tick(), 0); + assert_eq!(entry.expected, test_triplet(0)); + assert_eq!(entry.outputs, outputs); } #[test] fn history_unavailable_for_missing_tick() { let mut store = LocalProvenanceStore::new(); let w = test_worldline_id(); - let warp = test_warp_id(); - - store.register_worldline(w, warp).unwrap(); - store - .append(w, test_patch(0), test_triplet(0), vec![]) - .unwrap(); + store.register_worldline(w, test_warp_id()).unwrap(); + store.append_local_commit(test_entry(0)).unwrap(); - let result = store.patch(w, 1); + let result = store.entry(w, 1); assert!(matches!( result, Err(HistoryError::HistoryUnavailable { tick: 1 }) @@ -747,14 +1186,43 @@ mod tests { } #[test] - fn checkpoint_before() { + fn append_tick_gap_returns_error() { let mut store = LocalProvenanceStore::new(); let w = test_worldline_id(); - let warp = test_warp_id(); + store.register_worldline(w, test_warp_id()).unwrap(); - store.register_worldline(w, warp).unwrap(); + store.append_local_commit(test_entry(0)).unwrap(); + let result = store.append_local_commit(test_entry(2)); + assert!(matches!( + result, + Err(HistoryError::TickGap { + expected: 1, + got: 2 + }) + )); + } + + #[test] + fn append_local_commit_rejects_missing_head_key() { + let mut store = LocalProvenanceStore::new(); + let w = test_worldline_id(); + store.register_worldline(w, test_warp_id()).unwrap(); + + let mut entry = test_entry(0); + entry.head_key = None; + let result = store.append_local_commit(entry); + assert!(matches!( + result, + Err(HistoryError::LocalCommitMissingHeadKey { tick: 0 }) + )); + } + + #[test] + fn checkpoint_before() { + let mut store = LocalProvenanceStore::new(); + let w = test_worldline_id(); + store.register_worldline(w, test_warp_id()).unwrap(); - // Add checkpoints at ticks 0, 5, 10 store .add_checkpoint( w, @@ -783,160 +1251,51 @@ mod tests { ) .unwrap(); - // No checkpoint before tick 0 assert!(store.checkpoint_before(w, 0).is_none()); - - // Checkpoint at 0 is before tick 1 assert_eq!(store.checkpoint_before(w, 1).unwrap().tick, 0); - - // Checkpoint at 5 is before tick 7 assert_eq!(store.checkpoint_before(w, 7).unwrap().tick, 5); - - // Checkpoint at 10 is before tick 15 assert_eq!(store.checkpoint_before(w, 15).unwrap().tick, 10); - - // Checkpoint at 5 is before tick 10 (not inclusive) assert_eq!(store.checkpoint_before(w, 10).unwrap().tick, 5); } - #[test] - fn register_worldline_duplicate_same_u0_ref_is_noop() { - let mut store = LocalProvenanceStore::new(); - let wl = WorldlineId([1u8; 32]); - let warp = WarpId([2u8; 32]); - store.register_worldline(wl, warp).unwrap(); - store.register_worldline(wl, warp).unwrap(); // same u0_ref: ok - } - - #[test] - fn register_worldline_duplicate_different_u0_ref_errors() { - let mut store = LocalProvenanceStore::new(); - let wl = WorldlineId([1u8; 32]); - let warp_a = WarpId([2u8; 32]); - let warp_b = WarpId([3u8; 32]); - store.register_worldline(wl, warp_a).unwrap(); - let err = store.register_worldline(wl, warp_b).unwrap_err(); - assert!(matches!(err, HistoryError::WorldlineAlreadyExists(_))); - } - - #[test] - fn append_tick_gap_returns_error() { - let mut store = LocalProvenanceStore::new(); - let w = test_worldline_id(); - store.register_worldline(w, test_warp_id()).unwrap(); - - // Append tick 0 successfully - store - .append(w, test_patch(0), test_triplet(0), vec![]) - .unwrap(); - - // Skip tick 1 → append tick 2 should fail with TickGap - let result = store.append(w, test_patch(2), test_triplet(2), vec![]); - assert!( - matches!( - result, - Err(HistoryError::TickGap { - expected: 1, - got: 2 - }) - ), - "expected TickGap, got {result:?}" - ); - } - - #[test] - fn fork_collision_returns_worldline_already_exists() { - let mut store = LocalProvenanceStore::new(); - let source = test_worldline_id(); - let target = WorldlineId([99u8; 32]); - let warp = test_warp_id(); - - // Register source with one patch so fork has history to copy - store.register_worldline(source, warp).unwrap(); - store - .append(source, test_patch(0), test_triplet(0), vec![]) - .unwrap(); - - // Register target worldline (collision target) - store.register_worldline(target, warp).unwrap(); - - // Fork into already-registered target should fail - let result = store.fork(source, 0, target); - assert!( - matches!(result, Err(HistoryError::WorldlineAlreadyExists(id)) if id == target), - "expected WorldlineAlreadyExists, got {result:?}" - ); - } - #[test] fn checkpoint_convenience_records_and_is_visible() { - use crate::graph::GraphStore; - let mut store = LocalProvenanceStore::new(); let w = test_worldline_id(); let warp = test_warp_id(); store.register_worldline(w, warp).unwrap(); - // Create a GraphStore so checkpoint() can compute the state hash let graph_store = GraphStore::new(warp); - - // Record a checkpoint at tick 5 let cp = store.checkpoint(w, 5, &graph_store).unwrap(); - - // The checkpoint should be visible via checkpoint_before(tick > 5) let found = store.checkpoint_before(w, 6); assert_eq!(found.unwrap().tick, 5); assert_eq!(found.unwrap().state_hash, cp.state_hash); - - // checkpoint_before(5) should NOT return the checkpoint at tick 5 (strict <) - let before_5 = store.checkpoint_before(w, 5); - assert!( - before_5.is_none() || before_5.unwrap().tick < 5, - "checkpoint_before should be strictly less than the query tick" - ); - } - - // ─── AtomWrite Tests ───────────────────────────────────────────────────── - - use crate::attachment::AttachmentKey; - use crate::ident::{make_node_id, NodeKey}; - use crate::tick_patch::SlotId; - use crate::worldline::AtomWrite; - - fn test_node_key() -> NodeKey { - NodeKey { - warp_id: test_warp_id(), - local_id: make_node_id("test-atom"), - } } - fn test_rule_id() -> [u8; 32] { - [42u8; 32] - } + #[test] + fn fork_copies_entry_prefix_and_checkpoints() { + let mut store = LocalProvenanceStore::new(); + let source = test_worldline_id(); + let target = WorldlineId([99u8; 32]); + let warp = test_warp_id(); - /// Creates a patch with out_slots declaring which atoms were written. - /// This mirrors how the engine populates Out(μ) from footprints. - fn test_patch_with_atom_slots(tick: u64, atoms: &[NodeKey]) -> WorldlineTickPatchV1 { - let mut patch = test_patch(tick); - for atom in atoms { - // Atom values are attachments on the node's α plane (Paper III: Out(μ)) - patch - .out_slots - .push(SlotId::Attachment(AttachmentKey::node_alpha(*atom))); - } - patch - } + store.register_worldline(source, warp).unwrap(); + store.append_local_commit(test_entry(0)).unwrap(); + store.append_local_commit(test_entry(1)).unwrap(); + store + .add_checkpoint( + source, + CheckpointRef { + tick: 1, + state_hash: [1u8; 32], + }, + ) + .unwrap(); - /// Creates a patch with both in_slots and out_slots for a mutation. - /// A mutation reads (In) the previous value and writes (Out) the new one. - fn test_patch_with_atom_mutation(tick: u64, atoms: &[NodeKey]) -> WorldlineTickPatchV1 { - let mut patch = test_patch(tick); - for atom in atoms { - let slot = SlotId::Attachment(AttachmentKey::node_alpha(*atom)); - patch.in_slots.push(slot); - patch.out_slots.push(slot); - } - patch + store.fork(source, 0, target).unwrap(); + assert_eq!(store.len(target).unwrap(), 1); + assert_eq!(store.entry(target, 0).unwrap().expected, test_triplet(0)); + assert!(store.checkpoint_before(target, 1).is_none()); } #[test] @@ -945,42 +1304,22 @@ mod tests { let w = test_worldline_id(); store.register_worldline(w, test_warp_id()).unwrap(); - let atom_write = AtomWrite::new( - test_node_key(), - test_rule_id(), - 0, - None, // create - vec![1, 2, 3], - ); + let atom_write = AtomWrite::new(test_node_key(), test_rule_id(), 0, None, vec![1, 2, 3]); - store - .append_with_writes( - w, - test_patch_with_atom_slots(0, &[test_node_key()]), - test_triplet(0), - vec![], - vec![atom_write.clone()], - ) - .unwrap(); + append_test_entry( + &mut store, + w, + test_patch_with_atom_slots(0, &[test_node_key()]), + test_triplet(0), + vec![], + vec![atom_write.clone()], + ); let writes = store.atom_writes(w, 0).unwrap(); assert_eq!(writes.len(), 1); assert_eq!(writes[0], atom_write); } - #[test] - fn atom_writes_unavailable_for_missing_tick() { - let mut store = LocalProvenanceStore::new(); - let w = test_worldline_id(); - store.register_worldline(w, test_warp_id()).unwrap(); - - let result = store.atom_writes(w, 0); - assert!(matches!( - result, - Err(HistoryError::HistoryUnavailable { tick: 0 }) - )); - } - #[test] fn atom_history_walks_causal_cone() { let mut store = LocalProvenanceStore::new(); @@ -988,263 +1327,37 @@ mod tests { store.register_worldline(w, test_warp_id()).unwrap(); let atom_key = test_node_key(); - - // Tick 0: create atom (Out(μ) declares the atom slot) let write0 = AtomWrite::new(atom_key, test_rule_id(), 0, None, vec![1]); - store - .append_with_writes( - w, - test_patch_with_atom_slots(0, &[atom_key]), - test_triplet(0), - vec![], - vec![write0.clone()], - ) - .unwrap(); - - // Tick 1: mutate atom (In + Out declare read-then-write) let write1 = AtomWrite::new(atom_key, test_rule_id(), 1, Some(vec![1]), vec![2]); - store - .append_with_writes( - w, - test_patch_with_atom_mutation(1, &[atom_key]), - test_triplet(1), - vec![], - vec![write1.clone()], - ) - .unwrap(); - - // Tick 2: mutate atom again let write2 = AtomWrite::new(atom_key, test_rule_id(), 2, Some(vec![2]), vec![3]); - store - .append_with_writes( - w, - test_patch_with_atom_mutation(2, &[atom_key]), - test_triplet(2), - vec![], - vec![write2.clone()], - ) - .unwrap(); - - let history = store.atom_history(w, &atom_key).unwrap(); - assert_eq!(history.len(), 3); - assert_eq!(history[0], write0); - assert_eq!(history[1], write1); - assert_eq!(history[2], write2); - } - #[test] - fn atom_history_skips_ticks_not_in_causal_cone() { - let mut store = LocalProvenanceStore::new(); - let w = test_worldline_id(); - store.register_worldline(w, test_warp_id()).unwrap(); - - let atom_a = test_node_key(); - let atom_b = NodeKey { - warp_id: test_warp_id(), - local_id: make_node_id("other-atom"), - }; - - // Tick 0: create atom_a (Out(μ) = {atom_a}) - let write_a = AtomWrite::new(atom_a, test_rule_id(), 0, None, vec![1]); - store - .append_with_writes( - w, - test_patch_with_atom_slots(0, &[atom_a]), - test_triplet(0), - vec![], - vec![write_a.clone()], - ) - .unwrap(); - - // Tick 1: only touches atom_b — atom_a's causal cone skips this tick - let write_b = AtomWrite::new(atom_b, test_rule_id(), 1, None, vec![100]); - store - .append_with_writes( - w, - test_patch_with_atom_slots(1, &[atom_b]), - test_triplet(1), - vec![], - vec![write_b], - ) - .unwrap(); - - // Tick 2: mutate atom_a again (Out(μ) = {atom_a}) - let write_a2 = AtomWrite::new(atom_a, test_rule_id(), 2, Some(vec![1]), vec![2]); - store - .append_with_writes( - w, - test_patch_with_atom_mutation(2, &[atom_a]), - test_triplet(2), - vec![], - vec![write_a2.clone()], - ) - .unwrap(); - - // atom_a's history should skip tick 1 entirely - let history_a = store.atom_history(w, &atom_a).unwrap(); - assert_eq!(history_a.len(), 2); - assert_eq!(history_a[0], write_a); - assert_eq!(history_a[1], write_a2); - } - - #[test] - fn atom_history_terminates_at_creation() { - let mut store = LocalProvenanceStore::new(); - let w = test_worldline_id(); - store.register_worldline(w, test_warp_id()).unwrap(); - - let atom_key = test_node_key(); - - // Ticks 0-4: unrelated work (no atom_key in out_slots) - for tick in 0..5 { - store - .append_with_writes(w, test_patch(tick), test_triplet(tick), vec![], Vec::new()) - .unwrap(); - } - - // Tick 5: create atom_key - let write5 = AtomWrite::new(atom_key, test_rule_id(), 5, None, vec![1]); - store - .append_with_writes( - w, - test_patch_with_atom_slots(5, &[atom_key]), - test_triplet(5), - vec![], - vec![write5.clone()], - ) - .unwrap(); - - // Tick 6: mutate atom_key - let write6 = AtomWrite::new(atom_key, test_rule_id(), 6, Some(vec![1]), vec![2]); - store - .append_with_writes( - w, - test_patch_with_atom_mutation(6, &[atom_key]), - test_triplet(6), - vec![], - vec![write6.clone()], - ) - .unwrap(); - - let history = store.atom_history(w, &atom_key).unwrap(); - assert_eq!( - history.len(), - 2, - "should find creation at tick 5 and mutation at tick 6" + append_test_entry( + &mut store, + w, + test_patch_with_atom_slots(0, &[atom_key]), + test_triplet(0), + vec![], + vec![write0.clone()], ); - assert_eq!(history[0], write5); - assert_eq!(history[1], write6); - } - - #[test] - fn atom_history_preserves_within_tick_order() { - // Regression: if a single tick has [create, mutate] for the same atom, - // atom_history must return both in execution order (create then mutate). - // A naive forward iteration over tick_writes would hit is_create() first - // and early-return, losing the subsequent mutation. - let mut store = LocalProvenanceStore::new(); - let w = test_worldline_id(); - store.register_worldline(w, test_warp_id()).unwrap(); - - let atom_key = test_node_key(); - - // Single tick with two writes: create then mutate (same atom, same tick). - // This can happen when a rule creates an atom and immediately sets its value. - let create_write = AtomWrite::new(atom_key, test_rule_id(), 0, None, vec![1]); - let mutate_write = AtomWrite::new(atom_key, test_rule_id(), 0, Some(vec![1]), vec![2]); - - store - .append_with_writes( - w, - test_patch_with_atom_slots(0, &[atom_key]), - test_triplet(0), - vec![], - vec![create_write.clone(), mutate_write.clone()], - ) - .unwrap(); - - let history = store.atom_history(w, &atom_key).unwrap(); - assert_eq!( - history.len(), - 2, - "both create and mutate must be captured from the same tick" - ); - assert_eq!( - history[0], create_write, - "create must come first (execution order)" + append_test_entry( + &mut store, + w, + test_patch_with_atom_mutation(1, &[atom_key]), + test_triplet(1), + vec![], + vec![write1.clone()], ); - assert_eq!( - history[1], mutate_write, - "mutate must come second (execution order)" + append_test_entry( + &mut store, + w, + test_patch_with_atom_mutation(2, &[atom_key]), + test_triplet(2), + vec![], + vec![write2.clone()], ); - } - #[test] - fn atom_history_filters_by_atom() { - let mut store = LocalProvenanceStore::new(); - let w = test_worldline_id(); - store.register_worldline(w, test_warp_id()).unwrap(); - - let atom_a = test_node_key(); - let atom_b = NodeKey { - warp_id: test_warp_id(), - local_id: make_node_id("other-atom"), - }; - - // Both atoms written at tick 0 (both in Out(μ)) - let write_a = AtomWrite::new(atom_a, test_rule_id(), 0, None, vec![1]); - let write_b = AtomWrite::new(atom_b, test_rule_id(), 0, None, vec![100]); - store - .append_with_writes( - w, - test_patch_with_atom_slots(0, &[atom_a, atom_b]), - test_triplet(0), - vec![], - vec![write_a.clone(), write_b], - ) - .unwrap(); - - // Query only atom_a's history - let history_a = store.atom_history(w, &atom_a).unwrap(); - assert_eq!(history_a.len(), 1); - assert_eq!(history_a[0], write_a); - } - - #[test] - fn atom_write_is_create() { - let create_write = AtomWrite::new(test_node_key(), test_rule_id(), 0, None, vec![1]); - assert!(create_write.is_create()); - - let mutation_write = - AtomWrite::new(test_node_key(), test_rule_id(), 1, Some(vec![1]), vec![2]); - assert!(!mutation_write.is_create()); - } - - #[test] - fn atom_write_is_mutation() { - // Create is a mutation (value changed from nothing to something) - let create_write = AtomWrite::new(test_node_key(), test_rule_id(), 0, None, vec![1]); - assert!(create_write.is_mutation()); - - // Actual value change - let change_write = - AtomWrite::new(test_node_key(), test_rule_id(), 1, Some(vec![1]), vec![2]); - assert!(change_write.is_mutation()); - - // No-op (same value) - let noop_write = AtomWrite::new(test_node_key(), test_rule_id(), 2, Some(vec![1]), vec![1]); - assert!(!noop_write.is_mutation()); - } - - /// Creates a patch with `SlotId::Node(atom)` out_slots instead of - /// `SlotId::Attachment`. This exercises the skeleton-level provenance path - /// (UpsertNode/DeleteNode) as opposed to the payload-level path (SetAttachment). - fn test_patch_with_node_slots(tick: u64, atoms: &[NodeKey]) -> WorldlineTickPatchV1 { - let mut patch = test_patch(tick); - for atom in atoms { - patch.out_slots.push(SlotId::Node(*atom)); - } - patch + let history = store.atom_history(w, &atom_key).unwrap(); + assert_eq!(history, vec![write0, write1, write2]); } #[test] @@ -1254,64 +1367,70 @@ mod tests { store.register_worldline(w, test_warp_id()).unwrap(); let atom = test_node_key(); - - // Tick 0: create via SlotId::Node (skeleton-level write, e.g. UpsertNode) let create_write = AtomWrite::new(atom, test_rule_id(), 0, None, vec![1]); - store - .append_with_writes( - w, - test_patch_with_node_slots(0, &[atom]), - test_triplet(0), - vec![], - vec![create_write.clone()], - ) - .unwrap(); - - // Tick 1: mutate via SlotId::Node let mutate_write = AtomWrite::new(atom, test_rule_id(), 1, Some(vec![1]), vec![2]); - store - .append_with_writes( - w, - test_patch_with_node_slots(1, &[atom]), - test_triplet(1), - vec![], - vec![mutate_write.clone()], - ) - .unwrap(); - // atom_history should find both writes through the SlotId::Node path + append_test_entry( + &mut store, + w, + test_patch_with_node_slots(0, &[atom]), + test_triplet(0), + vec![], + vec![create_write.clone()], + ); + append_test_entry( + &mut store, + w, + test_patch_with_node_slots(1, &[atom]), + test_triplet(1), + vec![], + vec![mutate_write.clone()], + ); + let history = store.atom_history(w, &atom).unwrap(); - assert_eq!(history.len(), 2); - assert_eq!(history[0], create_write, "oldest first: creation"); - assert_eq!(history[1], mutate_write, "oldest first: mutation"); + assert_eq!(history, vec![create_write, mutate_write]); } #[test] - fn fork_copies_atom_writes() { - let mut store = LocalProvenanceStore::new(); - let source = test_worldline_id(); - let target = WorldlineId([99u8; 32]); - let warp = test_warp_id(); - - store.register_worldline(source, warp).unwrap(); - - let atom_write = AtomWrite::new(test_node_key(), test_rule_id(), 0, None, vec![1, 2, 3]); - store - .append_with_writes( - source, - test_patch_with_atom_slots(0, &[test_node_key()]), - test_triplet(0), - vec![], - vec![atom_write.clone()], - ) - .unwrap(); - - // Fork at tick 0 - store.fork(source, 0, target).unwrap(); + fn build_btr_round_trips_contiguous_segment() { + let mut service = ProvenanceService::new(); + let w = test_worldline_id(); + let state = WorldlineState::empty(); + service.register_worldline(w, &state).unwrap(); + service.append_local_commit(test_entry(0)).unwrap(); + service.append_local_commit(test_entry(1)).unwrap(); + + let btr = service.build_btr(w, 0, 2, 7, b"auth".to_vec()).unwrap(); + assert_eq!(btr.logical_counter, 7); + assert_eq!(btr.payload.start_tick, 0); + assert_eq!(btr.payload.entries.len(), 2); + service.validate_btr(&btr).unwrap(); + } - // Target should have the same atom writes - let target_writes = store.atom_writes(target, 0).unwrap(); - assert_eq!(target_writes.len(), 1); - assert_eq!(target_writes[0], atom_write); + #[test] + fn btr_validation_rejects_mixed_worldlines() { + let entry = ProvenanceEntry::local_commit( + WorldlineId([2u8; 32]), + 0, + 0, + WriterHeadKey { + worldline_id: WorldlineId([2u8; 32]), + head_id: make_head_id("b"), + }, + Vec::new(), + test_triplet(0), + test_patch(0), + Vec::new(), + Vec::new(), + ); + let payload = BtrPayload { + worldline_id: test_worldline_id(), + start_tick: 0, + entries: vec![entry], + }; + assert!(matches!( + payload.validate(), + Err(BtrError::MixedWorldline { .. }) + )); } } diff --git a/crates/warp-core/src/snapshot.rs b/crates/warp-core/src/snapshot.rs index b661d565..5ee33ec7 100644 --- a/crates/warp-core/src/snapshot.rs +++ b/crates/warp-core/src/snapshot.rs @@ -162,6 +162,12 @@ pub(crate) fn compute_state_root(state: &WarpState, root: &NodeKey) -> Hash { hasher.finalize().into() } +/// Computes the canonical state root for a full [`WarpState`] rooted at `root`. +#[must_use] +pub(crate) fn compute_state_root_for_warp_state(state: &WarpState, root: &NodeKey) -> Hash { + compute_state_root(state, root) +} + fn collect_reachable_graph( state: &WarpState, root: &NodeKey, diff --git a/crates/warp-core/src/worldline.rs b/crates/warp-core/src/worldline.rs index 4f47b2fd..9b259c6f 100644 --- a/crates/warp-core/src/worldline.rs +++ b/crates/warp-core/src/worldline.rs @@ -90,7 +90,7 @@ pub struct WorldlineTickHeaderV1 { /// Unlike [`WarpTickPatchV1`](crate::tick_patch::WarpTickPatchV1) which is the /// engine's internal format, this type is designed for external worldline storage /// and includes the header context needed for independent replay. -#[derive(Clone, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct WorldlineTickPatchV1 { /// Shared tick header metadata. diff --git a/crates/warp-core/tests/checkpoint_fork_tests.rs b/crates/warp-core/tests/checkpoint_fork_tests.rs index 256edc7c..d150cfac 100644 --- a/crates/warp-core/tests/checkpoint_fork_tests.rs +++ b/crates/warp-core/tests/checkpoint_fork_tests.rs @@ -13,7 +13,8 @@ mod common; use common::{ - create_add_node_patch, create_initial_store, test_cursor_id, test_warp_id, test_worldline_id, + append_fixture_entry, create_add_node_patch, create_initial_store, test_cursor_id, + test_warp_id, test_worldline_id, }; use warp_core::{ @@ -82,8 +83,7 @@ fn setup_worldline_with_ticks_and_checkpoints( commit_hash, }; - provenance - .append(worldline_id, patch, triplet, vec![]) + append_fixture_entry(&mut provenance, worldline_id, patch, triplet, vec![]) .expect("append should succeed"); parents = vec![commit_hash]; @@ -243,8 +243,9 @@ fn fork_worldline_diverges_after_fork_tick_without_affecting_original() { let mut original_expected_hashes: Vec = Vec::new(); for tick in 0..20 { let expected = provenance - .expected(original_worldline_id, tick) - .expect("original tick should exist"); + .entry(original_worldline_id, tick) + .expect("original tick should exist") + .expected; original_expected_hashes.push(expected); } @@ -255,19 +256,18 @@ fn fork_worldline_diverges_after_fork_tick_without_affecting_original() { // Copy patches 0-7 from original to forked for tick in 0..=7 { - let patch = provenance - .patch(original_worldline_id, tick) - .expect("original patch should exist"); - let expected = provenance - .expected(original_worldline_id, tick) - .expect("original expected should exist"); - let outputs = provenance - .outputs(original_worldline_id, tick) - .expect("original outputs should exist"); - - provenance - .append(forked_worldline_id, patch, expected, outputs) - .expect("append to forked should succeed"); + let entry = provenance + .entry(original_worldline_id, tick) + .expect("original entry should exist"); + let patch = entry.patch.expect("original patch should exist"); + append_fixture_entry( + &mut provenance, + forked_worldline_id, + patch, + entry.expected, + entry.outputs, + ) + .expect("append to forked should succeed"); } // Verify fork has 8 ticks (0-7) @@ -286,7 +286,9 @@ fn fork_worldline_diverges_after_fork_tick_without_affecting_original() { // Replay forked worldline to tick 7 to get the correct state for tick in 0..=7 { let patch = provenance - .patch(forked_worldline_id, tick) + .entry(forked_worldline_id, tick) + .expect("forked entry should exist") + .patch .expect("forked patch should exist"); patch .apply_to_store(&mut forked_store) @@ -296,8 +298,9 @@ fn fork_worldline_diverges_after_fork_tick_without_affecting_original() { // Get parent commit_hash from the last copied tick (tick 7) for Merkle chain continuity let mut fork_parents: Vec = vec![ provenance - .expected(forked_worldline_id, 7) + .entry(forked_worldline_id, 7) .expect("forked tick 7 should exist") + .expected .commit_hash, ]; @@ -326,8 +329,7 @@ fn fork_worldline_diverges_after_fork_tick_without_affecting_original() { commit_hash, }; - provenance - .append(forked_worldline_id, patch, triplet, vec![]) + append_fixture_entry(&mut provenance, forked_worldline_id, patch, triplet, vec![]) .expect("append divergent tick should succeed"); fork_parents = vec![commit_hash]; @@ -344,8 +346,9 @@ fn fork_worldline_diverges_after_fork_tick_without_affecting_original() { for tick in 0..20 { let current_expected = provenance - .expected(original_worldline_id, tick) - .expect("original tick should still exist"); + .entry(original_worldline_id, tick) + .expect("original tick should still exist") + .expected; assert_eq!( current_expected, original_expected_hashes[tick as usize], "original worldline tick {tick} expected hash should be unchanged" @@ -355,11 +358,13 @@ fn fork_worldline_diverges_after_fork_tick_without_affecting_original() { // Assert 2: Forked worldline has ticks 0-7 matching original for tick in 0..=7 { let original_expected = provenance - .expected(original_worldline_id, tick) - .expect("original tick should exist"); + .entry(original_worldline_id, tick) + .expect("original tick should exist") + .expected; let forked_expected = provenance - .expected(forked_worldline_id, tick) - .expect("forked tick should exist"); + .entry(forked_worldline_id, tick) + .expect("forked tick should exist") + .expected; assert_eq!( original_expected, forked_expected, @@ -378,11 +383,13 @@ fn fork_worldline_diverges_after_fork_tick_without_affecting_original() { for tick in 8..=10 { let original_expected = provenance - .expected(original_worldline_id, tick) - .expect("original tick should exist"); + .entry(original_worldline_id, tick) + .expect("original tick should exist") + .expected; let forked_expected = provenance - .expected(forked_worldline_id, tick) - .expect("forked tick should exist"); + .entry(forked_worldline_id, tick) + .expect("forked tick should exist") + .expected; // State roots should differ because patches created different nodes assert_ne!( diff --git a/crates/warp-core/tests/common/mod.rs b/crates/warp-core/tests/common/mod.rs index bb8c8fbc..c05d9667 100644 --- a/crates/warp-core/tests/common/mod.rs +++ b/crates/warp-core/tests/common/mod.rs @@ -13,12 +13,13 @@ )] use warp_core::{ - compute_commit_hash_v2, compute_state_root_for_warp_store, make_edge_id, make_node_id, - make_type_id, make_warp_id, ApplyResult, AtomPayload, AttachmentKey, AttachmentSet, - AttachmentValue, ConflictPolicy, CursorId, EdgeId, EdgeRecord, Engine, EngineBuilder, - Footprint, GraphStore, Hash, HashTriplet, LocalProvenanceStore, NodeId, NodeKey, NodeRecord, - PatternGraph, RewriteRule, SessionId, WarpId, WarpOp, WorldlineId, WorldlineTickHeaderV1, - WorldlineTickPatchV1, + compute_commit_hash_v2, compute_state_root_for_warp_store, make_edge_id, make_head_id, + make_node_id, make_type_id, make_warp_id, ApplyResult, AtomPayload, AtomWriteSet, + AttachmentKey, AttachmentSet, AttachmentValue, ConflictPolicy, CursorId, EdgeId, EdgeRecord, + Engine, EngineBuilder, Footprint, GraphStore, Hash, HashTriplet, LocalProvenanceStore, NodeId, + NodeKey, NodeRecord, OutputFrameSet, PatternGraph, ProvenanceEntry, ProvenanceStore, + RewriteRule, SessionId, WarpId, WarpOp, WorldlineId, WorldlineTickHeaderV1, + WorldlineTickPatchV1, WriterHeadKey, }; // ============================================================================= @@ -847,8 +848,7 @@ pub fn setup_worldline_with_ticks( commit_hash, }; - provenance - .append(worldline_id, patch, triplet, vec![]) + append_fixture_entry(&mut provenance, worldline_id, patch, triplet, vec![]) .expect("append should succeed"); // Advance parent chain @@ -858,6 +858,80 @@ pub fn setup_worldline_with_ticks( (provenance, initial_store, warp_id, worldline_id) } +/// Returns the canonical writer head key used by provenance test fixtures. +#[must_use] +pub fn fixture_head_key(worldline_id: WorldlineId) -> WriterHeadKey { + WriterHeadKey { + worldline_id, + head_id: make_head_id("fixture"), + } +} + +/// Builds a fixture provenance entry from a worldline patch/triplet pair. +/// +/// Parent refs are derived from the current provenance tip so the resulting +/// entry forms a valid deterministic linear history for test replay. +pub fn fixture_entry( + provenance: &LocalProvenanceStore, + worldline_id: WorldlineId, + patch: WorldlineTickPatchV1, + expected: HashTriplet, + outputs: OutputFrameSet, + atom_writes: AtomWriteSet, +) -> Result { + let tick = patch.global_tick(); + let parents = provenance.tip_ref(worldline_id)?.into_iter().collect(); + Ok(ProvenanceEntry::local_commit( + worldline_id, + tick, + tick, + fixture_head_key(worldline_id), + parents, + expected, + patch, + outputs, + atom_writes, + )) +} + +/// Appends a fixture provenance entry using the entry-based Phase 4 API. +pub fn append_fixture_entry( + provenance: &mut LocalProvenanceStore, + worldline_id: WorldlineId, + patch: WorldlineTickPatchV1, + expected: HashTriplet, + outputs: OutputFrameSet, +) -> Result<(), warp_core::HistoryError> { + append_fixture_entry_with_writes( + provenance, + worldline_id, + patch, + expected, + outputs, + Vec::new(), + ) +} + +/// Appends a fixture provenance entry with explicit atom-write provenance. +pub fn append_fixture_entry_with_writes( + provenance: &mut LocalProvenanceStore, + worldline_id: WorldlineId, + patch: WorldlineTickPatchV1, + expected: HashTriplet, + outputs: OutputFrameSet, + atom_writes: AtomWriteSet, +) -> Result<(), warp_core::HistoryError> { + let entry = fixture_entry( + provenance, + worldline_id, + patch, + expected, + outputs, + atom_writes, + )?; + provenance.append_local_commit(entry) +} + /// Creates a "touch" rewrite rule for worker invariance tests. /// /// The rule sets a marker attachment on the scope node, exercising the diff --git a/crates/warp-core/tests/golden_vectors_phase0.rs b/crates/warp-core/tests/golden_vectors_phase0.rs index 19ba3b37..8f9b5ae2 100644 --- a/crates/warp-core/tests/golden_vectors_phase0.rs +++ b/crates/warp-core/tests/golden_vectors_phase0.rs @@ -137,8 +137,9 @@ fn gv002_provenance_replay_integrity() { // Verify each tick's hash triplet against pinned values for (tick, (exp_sr, exp_pd, exp_ch)) in EXPECTED.iter().enumerate() { let triplet = provenance - .expected(worldline_id, tick as u64) - .unwrap_or_else(|e| panic!("tick {tick}: {e}")); + .entry(worldline_id, tick as u64) + .unwrap_or_else(|e| panic!("tick {tick}: {e}")) + .expected; assert_eq!( hex(&triplet.state_root), @@ -209,8 +210,11 @@ fn gv003_fork_reproducibility() { // Prefix ticks 0..5 must be identical between original and fork for (tick, exp_ch) in EXPECTED_PREFIX_COMMITS.iter().enumerate() { - let original = provenance.expected(worldline_id, tick as u64).unwrap(); - let forked = provenance.expected(forked_id, tick as u64).unwrap(); + let original = provenance + .entry(worldline_id, tick as u64) + .unwrap() + .expected; + let forked = provenance.entry(forked_id, tick as u64).unwrap().expected; assert_eq!( original, forked, diff --git a/crates/warp-core/tests/inbox.rs b/crates/warp-core/tests/inbox.rs index aa9f59c8..3bf7f04f 100644 --- a/crates/warp-core/tests/inbox.rs +++ b/crates/warp-core/tests/inbox.rs @@ -6,8 +6,8 @@ use warp_core::{ make_head_id, make_intent_kind, make_node_id, make_type_id, Engine, EngineBuilder, GraphStore, InboxAddress, InboxPolicy, IngressDisposition, IngressEnvelope, IngressTarget, NodeId, - NodeRecord, PlaybackMode, SchedulerCoordinator, SchedulerKind, WorldlineId, WorldlineRuntime, - WorldlineState, WriterHead, WriterHeadKey, + NodeRecord, PlaybackMode, ProvenanceService, SchedulerCoordinator, SchedulerKind, WorldlineId, + WorldlineRuntime, WorldlineState, WriterHead, WriterHeadKey, }; fn wl(n: u8) -> WorldlineId { @@ -61,6 +61,16 @@ fn runtime_store(runtime: &WorldlineRuntime, worldline_id: WorldlineId) -> &Grap .unwrap() } +fn mirrored_provenance(runtime: &WorldlineRuntime) -> ProvenanceService { + let mut provenance = ProvenanceService::new(); + for (worldline_id, frontier) in runtime.worldlines().iter() { + provenance + .register_worldline(*worldline_id, frontier.state()) + .unwrap(); + } + provenance +} + #[test] fn runtime_ingest_commits_without_legacy_graph_inbox_nodes() { let mut runtime = WorldlineRuntime::new(); @@ -84,7 +94,9 @@ fn runtime_ingest_commits_without_legacy_graph_inbox_nodes() { } ); - let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); assert_eq!(records[0].head_key, head_key); @@ -128,7 +140,8 @@ fn runtime_ingest_is_idempotent_per_resolved_head_after_commit() { head_key: default_key, } ); - SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!( runtime.ingest(default_env).unwrap(), @@ -144,7 +157,7 @@ fn runtime_ingest_is_idempotent_per_resolved_head_after_commit() { head_key: named_key, } ); - SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); let named_ingress_id = named_env.ingress_id(); assert_eq!( runtime.ingest(named_env).unwrap(), @@ -179,7 +192,9 @@ fn runtime_ingest_keeps_distinct_intents_as_distinct_event_nodes() { runtime.ingest(intent_a.clone()).unwrap(); runtime.ingest(intent_b.clone()).unwrap(); - let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); assert_eq!(records[0].admitted_count, 2); @@ -206,7 +221,9 @@ fn runtime_commit_patch_replays_to_post_state() { )) .unwrap(); - let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); let frontier = runtime.worldlines().get(&worldline_id).unwrap(); diff --git a/crates/warp-core/tests/invariant_property_tests.rs b/crates/warp-core/tests/invariant_property_tests.rs index bfbdad76..d9b0cb23 100644 --- a/crates/warp-core/tests/invariant_property_tests.rs +++ b/crates/warp-core/tests/invariant_property_tests.rs @@ -27,7 +27,10 @@ )] mod common; -use common::{create_add_node_patch, create_initial_store, test_warp_id, test_worldline_id}; +use common::{ + append_fixture_entry, create_add_node_patch, create_initial_store, test_warp_id, + test_worldline_id, +}; use proptest::prelude::*; @@ -87,7 +90,7 @@ proptest! { ); let triplet = HashTriplet { state_root, patch_digest: patch.patch_digest, commit_hash }; - provenance.append(worldline_id, patch, triplet, vec![]).unwrap(); + append_fixture_entry(&mut provenance, worldline_id, patch, triplet, vec![]).unwrap(); parents = vec![commit_hash]; // Invariant: length must equal tick + 1 @@ -102,7 +105,8 @@ proptest! { patch_digest: gap_patch.patch_digest, commit_hash: [0u8; 32], }; - let result = provenance.append(worldline_id, gap_patch, gap_triplet, vec![]); + let result = + append_fixture_entry(&mut provenance, worldline_id, gap_patch, gap_triplet, vec![]); prop_assert!(result.is_err(), "appending at tick gap must fail"); // Invariant: attempting to re-append at an existing tick must fail @@ -113,7 +117,8 @@ proptest! { patch_digest: dup_patch.patch_digest, commit_hash: [0u8; 32], }; - let dup_result = provenance.append(worldline_id, dup_patch, dup_triplet, vec![]); + let dup_result = + append_fixture_entry(&mut provenance, worldline_id, dup_patch, dup_triplet, vec![]); prop_assert!(dup_result.is_err(), "re-appending at existing tick must fail"); } } @@ -267,9 +272,7 @@ fn inv004_no_cross_worldline_leakage() { patch_digest: patch.patch_digest, commit_hash: ch, }; - provenance - .append(worldline_a, patch, triplet, vec![]) - .unwrap(); + append_fixture_entry(&mut provenance, worldline_a, patch, triplet, vec![]).unwrap(); parents_a = vec![ch]; } @@ -286,9 +289,7 @@ fn inv004_no_cross_worldline_leakage() { patch_digest: patch.patch_digest, commit_hash: ch, }; - provenance - .append(worldline_b, patch, triplet, vec![]) - .unwrap(); + append_fixture_entry(&mut provenance, worldline_b, patch, triplet, vec![]).unwrap(); parents_b = vec![ch]; } @@ -297,8 +298,12 @@ fn inv004_no_cross_worldline_leakage() { assert_eq!(provenance.len(worldline_b).unwrap(), 3); // State roots must differ (different node names) - let sr_a = provenance.expected(worldline_a, 4).unwrap().state_root; - let triplet_b_before = provenance.expected(worldline_b, 2).unwrap(); + let sr_a = provenance + .entry(worldline_a, 4) + .unwrap() + .expected + .state_root; + let triplet_b_before = provenance.entry(worldline_b, 2).unwrap().expected; let sr_b = triplet_b_before.state_root; assert_ne!( sr_a, sr_b, @@ -316,12 +321,10 @@ fn inv004_no_cross_worldline_leakage() { patch_digest: patch.patch_digest, commit_hash: ch, }; - provenance - .append(worldline_a, patch, triplet, vec![]) - .unwrap(); + append_fixture_entry(&mut provenance, worldline_a, patch, triplet, vec![]).unwrap(); assert_eq!(provenance.len(worldline_a).unwrap(), 6); assert_eq!( - provenance.expected(worldline_b, 2).unwrap(), + provenance.entry(worldline_b, 2).unwrap().expected, triplet_b_before, "appending to A must not mutate B's latest committed triplet" ); @@ -402,15 +405,16 @@ fn inv006_provenance_immutable_after_append() { commit_hash: ch, }; recorded_triplets.push(triplet.clone()); - provenance - .append(worldline_id, patch, triplet, vec![]) - .unwrap(); + append_fixture_entry(&mut provenance, worldline_id, patch, triplet, vec![]).unwrap(); parents = vec![ch]; } // Verify all triplets remain unchanged after all appends for (tick, expected) in recorded_triplets.iter().enumerate() { - let actual = provenance.expected(worldline_id, tick as u64).unwrap(); + let actual = provenance + .entry(worldline_id, tick as u64) + .unwrap() + .expected; assert_eq!( actual, *expected, "tick {tick}: triplet must not change after append" diff --git a/crates/warp-core/tests/outputs_playback_tests.rs b/crates/warp-core/tests/outputs_playback_tests.rs index 62ff40c4..7bf894cc 100644 --- a/crates/warp-core/tests/outputs_playback_tests.rs +++ b/crates/warp-core/tests/outputs_playback_tests.rs @@ -16,8 +16,8 @@ mod common; use common::{ - create_add_node_patch, create_initial_store, setup_worldline_with_ticks, test_cursor_id, - test_session_id, test_warp_id, test_worldline_id, + append_fixture_entry, create_add_node_patch, create_initial_store, setup_worldline_with_ticks, + test_cursor_id, test_session_id, test_warp_id, test_worldline_id, }; use warp_core::materialization::{ @@ -93,8 +93,7 @@ fn setup_worldline_with_outputs( (velocity_channel, vec![(tick * 2) as u8]), ]; - provenance - .append(worldline_id, patch, triplet, outputs) + append_fixture_entry(&mut provenance, worldline_id, patch, triplet, outputs) .expect("append should succeed"); // Advance parent chain @@ -355,8 +354,9 @@ fn outputs_match_recorded_bytes_for_same_tick() { // publish_truth queries prov_tick = cursor.tick - 1 (0-based index of last applied patch). let prov_tick = k - 1; let recorded_outputs = provenance - .outputs(worldline_id, prov_tick) - .expect("outputs should exist"); + .entry(worldline_id, prov_tick) + .expect("outputs should exist") + .outputs; // Get published frames let frames = sink.collect_frames(session_id); @@ -691,7 +691,8 @@ fn publish_truth_returns_error_for_unavailable_tick() { /// T1: Simulate a writer advancing and recording outputs via hexagonal architecture. /// /// This test demonstrates that we can simulate the engine's write behavior by manually -/// calling `provenance.append()`. The ProvenanceStore trait (port) allows us to test +/// appending [`warp_core::ProvenanceEntry`] values through the entry-based provenance +/// surface. The provenance port allows us to test /// the write side of the provenance contract without needing the real engine. #[test] fn writer_play_advances_and_records_outputs() { @@ -738,9 +739,7 @@ fn writer_play_advances_and_records_outputs() { // Create outputs with deterministic values: (channel, vec![tick as u8]) let outputs = vec![(output_channel, vec![tick as u8])]; - // Call provenance.append() - this is the hexagonal architecture pattern - provenance - .append(worldline_id, patch, triplet, outputs) + append_fixture_entry(&mut provenance, worldline_id, patch, triplet, outputs) .expect("append should succeed"); // Advance parent chain for next iteration's Merkle computation @@ -754,19 +753,18 @@ fn writer_play_advances_and_records_outputs() { "provenance should have 10 entries" ); - // Assert: provenance.expected(worldline, t) exists for t in 0..10 + // Assert: provenance.entry(worldline, t) exists for t in 0..10 // Recompute the Merkle chain to verify stored commit_hashes match let mut verify_store = initial_store; let mut verify_parents: Vec = Vec::new(); for tick in 0..10u64 { - let triplet = provenance - .expected(worldline_id, tick) - .expect("expected should exist for tick"); + let entry = provenance + .entry(worldline_id, tick) + .expect("entry should exist for tick"); + let triplet = entry.expected; // Recompute commit_hash from scratch to verify Merkle chain integrity - let patch = provenance - .patch(worldline_id, tick) - .expect("patch should exist"); + let patch = entry.patch.expect("patch should exist"); patch .apply_to_store(&mut verify_store) .expect("apply should succeed"); @@ -785,11 +783,12 @@ fn writer_play_advances_and_records_outputs() { verify_parents = vec![expected_commit]; } - // Assert: provenance.outputs(worldline, t) contains expected values + // Assert: provenance.entry(worldline, t).outputs contains expected values for tick in 0..10u64 { let outputs = provenance - .outputs(worldline_id, tick) - .expect("outputs should exist for tick"); + .entry(worldline_id, tick) + .expect("outputs should exist for tick") + .outputs; assert_eq!(outputs.len(), 1, "should have 1 output for tick {tick}"); assert_eq!( @@ -852,18 +851,20 @@ fn truth_frames_are_cursor_addressed_and_authoritative() { // publish_truth queries prov_tick = cursor.tick - 1 = 2 let expected_triplet_3 = provenance - .expected(worldline_id, 2) - .expect("expected triplet for prov_tick 2 should exist"); + .entry(worldline_id, 2) + .expect("expected triplet for prov_tick 2 should exist") + .expected; assert_eq!( receipt_3.commit_hash, expected_triplet_3.commit_hash, - "receipt commit_hash should match provenance.expected(worldline, 2)" + "receipt commit_hash should match provenance.entry(worldline, 2).expected" ); - // Verify frame values match provenance.outputs(worldline, 2) (prov_tick = cursor.tick - 1) + // Verify frame values match provenance.entry(worldline, 2).outputs (prov_tick = cursor.tick - 1) let frames_3 = sink.collect_frames(session_id); let recorded_outputs_3 = provenance - .outputs(worldline_id, 2) - .expect("outputs for prov_tick 2 should exist"); + .entry(worldline_id, 2) + .expect("outputs for prov_tick 2 should exist") + .outputs; let position_output_3 = recorded_outputs_3 .iter() @@ -905,18 +906,20 @@ fn truth_frames_are_cursor_addressed_and_authoritative() { // publish_truth queries prov_tick = cursor.tick - 1 = 6 let expected_triplet_7 = provenance - .expected(worldline_id, 6) - .expect("expected triplet for prov_tick 6 should exist"); + .entry(worldline_id, 6) + .expect("expected triplet for prov_tick 6 should exist") + .expected; assert_eq!( receipt_7.commit_hash, expected_triplet_7.commit_hash, - "receipt commit_hash should match provenance.expected(worldline, 6)" + "receipt commit_hash should match provenance.entry(worldline, 6).expected" ); - // Verify frame values match provenance.outputs(worldline, 6) (prov_tick = cursor.tick - 1) + // Verify frame values match provenance.entry(worldline, 6).outputs (prov_tick = cursor.tick - 1) let frames_7 = sink.collect_frames(session_id); let recorded_outputs_7 = provenance - .outputs(worldline_id, 6) - .expect("outputs for prov_tick 6 should exist"); + .entry(worldline_id, 6) + .expect("outputs for prov_tick 6 should exist") + .outputs; let position_output_7 = recorded_outputs_7 .iter() diff --git a/crates/warp-core/tests/playback_cursor_tests.rs b/crates/warp-core/tests/playback_cursor_tests.rs index 45f9d853..629d254c 100644 --- a/crates/warp-core/tests/playback_cursor_tests.rs +++ b/crates/warp-core/tests/playback_cursor_tests.rs @@ -8,8 +8,8 @@ mod common; use common::{ - create_add_node_patch, create_initial_store, setup_worldline_with_ticks, test_cursor_id, - test_warp_id, test_worldline_id, + append_fixture_entry, create_add_node_patch, create_initial_store, setup_worldline_with_ticks, + test_cursor_id, test_warp_id, test_worldline_id, }; use warp_core::{ compute_commit_hash_v2, compute_state_root_for_warp_store, CursorRole, Hash, HashTriplet, @@ -69,8 +69,7 @@ fn cursor_seek_fails_on_corrupt_patch_or_hash_mismatch() { commit_hash, }; - provenance - .append(worldline_id, patch, triplet, vec![]) + append_fixture_entry(&mut provenance, worldline_id, patch, triplet, vec![]) .expect("append should succeed"); parents = vec![commit_hash]; @@ -382,8 +381,7 @@ fn duplicate_worldline_registration_is_idempotent() { patch_digest: patch.patch_digest, commit_hash, }; - provenance - .append(worldline_id, patch, triplet, vec![]) + append_fixture_entry(&mut provenance, worldline_id, patch, triplet, vec![]) .expect("append should succeed"); // Verify history length is 1 diff --git a/crates/warp-core/tests/slice_theorem_proof.rs b/crates/warp-core/tests/slice_theorem_proof.rs index b2894d61..08e0152c 100644 --- a/crates/warp-core/tests/slice_theorem_proof.rs +++ b/crates/warp-core/tests/slice_theorem_proof.rs @@ -25,7 +25,7 @@ mod common; use std::panic::{catch_unwind, AssertUnwindSafe}; -use common::XorShift64; +use common::{append_fixture_entry, XorShift64}; use warp_core::{ compute_commit_hash_v2, compute_state_root_for_warp_store, HashTriplet, LocalProvenanceStore, WorldlineTickHeaderV1, WorldlineTickPatchV1, @@ -610,8 +610,7 @@ fn phase_2_and_3_playback_replay_matches_execution() { commit_hash, }; - provenance - .append(worldline_id, wl_patch, triplet, vec![]) + append_fixture_entry(&mut provenance, worldline_id, wl_patch, triplet, vec![]) .expect("append"); parents = vec![commit_hash]; } @@ -836,9 +835,7 @@ fn phase_6_semantic_correctness_dependent_chain() { commit_hash, }; - provenance - .append(worldline_id, wl_patch, triplet, vec![]) - .expect("append"); + append_fixture_entry(&mut provenance, worldline_id, wl_patch, triplet, vec![]).expect("append"); let mut cursor = PlaybackCursor::new( cursor_id, diff --git a/crates/warp-wasm/src/lib.rs b/crates/warp-wasm/src/lib.rs index 4f896f9f..ec537464 100644 --- a/crates/warp-wasm/src/lib.rs +++ b/crates/warp-wasm/src/lib.rs @@ -186,18 +186,15 @@ where match build_kernel_head(make_kernel) { Ok((kernel, head)) => { let envelope = OkEnvelope::new(&head); - match echo_wasm_abi::encode_cbor(&envelope) { - Ok(bytes) => { - install_kernel(Box::new(kernel)); - bytes_to_uint8array(&bytes) - } - Err(_) => { - clear_kernel(); - encode_err_raw( - kernel_port::error_codes::CODEC_ERROR, - "failed to encode response", - ) - } + if let Ok(bytes) = echo_wasm_abi::encode_cbor(&envelope) { + install_kernel(Box::new(kernel)); + bytes_to_uint8array(&bytes) + } else { + clear_kernel(); + encode_err_raw( + kernel_port::error_codes::CODEC_ERROR, + "failed to encode response", + ) } } Err(err) => encode_err(&err), @@ -614,7 +611,11 @@ mod init_tests { assert!(with_kernel_ref(|k| k.get_head()).is_ok()); clear_kernel(); - let err = with_kernel_ref(|k| k.get_head()).unwrap_err(); + let result = with_kernel_ref(|k| k.get_head()); + assert!(result.is_err()); + let Err(err) = result else { + unreachable!("get_head should fail after clear_kernel"); + }; assert_eq!(err.code, kernel_port::error_codes::NOT_INITIALIZED); } @@ -623,12 +624,17 @@ mod init_tests { clear_kernel(); install_kernel(Box::new(StubKernel)); let result = build_kernel_head(|| Err(warp_kernel::KernelInitError::NonFreshEngine)); - match result { - Ok(_) => panic!("build_kernel_head unexpectedly succeeded"), - Err(err) => assert_eq!(err.code, kernel_port::error_codes::ENGINE_ERROR), - } + assert!(result.is_err()); + let Err(err) = result else { + unreachable!("build_kernel_head unexpectedly succeeded"); + }; + assert_eq!(err.code, kernel_port::error_codes::ENGINE_ERROR); - let err = with_kernel_ref(|k| k.get_head()).unwrap_err(); + let result = with_kernel_ref(|k| k.get_head()); + assert!(result.is_err()); + let Err(err) = result else { + unreachable!("get_head should fail after init failure"); + }; assert_eq!(err.code, kernel_port::error_codes::NOT_INITIALIZED); } } diff --git a/crates/warp-wasm/src/warp_kernel.rs b/crates/warp-wasm/src/warp_kernel.rs index 0b9c2870..3ec4048b 100644 --- a/crates/warp-wasm/src/warp_kernel.rs +++ b/crates/warp-wasm/src/warp_kernel.rs @@ -16,9 +16,9 @@ use echo_wasm_abi::kernel_port::{ use echo_wasm_abi::unpack_intent_v1; use warp_core::{ make_head_id, make_intent_kind, make_node_id, make_type_id, Engine, EngineBuilder, GraphStore, - IngressDisposition, IngressEnvelope, IngressTarget, NodeRecord, PlaybackMode, RuntimeError, - SchedulerCoordinator, SchedulerKind, WorldlineId, WorldlineRuntime, WorldlineState, - WorldlineStateError, WriterHead, WriterHeadKey, + HistoryError, IngressDisposition, IngressEnvelope, IngressTarget, NodeRecord, PlaybackMode, + ProvenanceService, RuntimeError, SchedulerCoordinator, SchedulerKind, WorldlineId, + WorldlineRuntime, WorldlineState, WorldlineStateError, WriterHead, WriterHeadKey, }; /// Error returned when a [`WarpKernel`] cannot be initialized from a caller-supplied engine. @@ -28,6 +28,8 @@ pub enum KernelInitError { NonFreshEngine, /// The engine's backing state does not satisfy [`WorldlineState`] invariants. WorldlineState(WorldlineStateError), + /// Provenance registration failed while installing the default worldline. + Provenance(HistoryError), /// Runtime registration failed while installing the default worldline/head. Runtime(RuntimeError), } @@ -37,6 +39,7 @@ impl fmt::Display for KernelInitError { match self { Self::NonFreshEngine => write!(f, "WarpKernel::with_engine requires a fresh engine"), Self::WorldlineState(err) => err.fmt(f), + Self::Provenance(err) => err.fmt(f), Self::Runtime(err) => err.fmt(f), } } @@ -56,6 +59,12 @@ impl From for KernelInitError { } } +impl From for KernelInitError { + fn from(value: HistoryError) -> Self { + Self::Provenance(value) + } +} + /// App-agnostic kernel wrapping a `warp-core::Engine`. /// /// Constructed via [`WarpKernel::new`] (default empty engine) or @@ -63,6 +72,7 @@ impl From for KernelInitError { pub struct WarpKernel { engine: Engine, runtime: WorldlineRuntime, + provenance: ProvenanceService, default_worldline: WorldlineId, /// Whether materialization output has been drained since the last step. /// Prevents returning stale data on consecutive drain calls. @@ -115,10 +125,10 @@ impl WarpKernel { let root = engine.root_key(); let default_worldline = WorldlineId(root.warp_id.0); let mut runtime = WorldlineRuntime::new(); - runtime.register_worldline( - default_worldline, - WorldlineState::try_from(engine.state().clone())?, - )?; + let default_state = WorldlineState::try_from(engine.state().clone())?; + let mut provenance = ProvenanceService::new(); + provenance.register_worldline(default_worldline, &default_state)?; + runtime.register_worldline(default_worldline, default_state)?; runtime.register_writer_head(WriterHead::with_routing( WriterHeadKey { worldline_id: default_worldline, @@ -133,6 +143,7 @@ impl WarpKernel { Ok(Self { engine, runtime, + provenance, default_worldline, drained: true, registry, @@ -141,11 +152,10 @@ impl WarpKernel { /// Build a [`HeadInfo`] from the current engine snapshot. fn head_info(&self) -> HeadInfo { - let frontier = self - .runtime - .worldlines() - .get(&self.default_worldline) - .expect("default worldline must exist"); + let frontier = match self.runtime.worldlines().get(&self.default_worldline) { + Some(frontier) => frontier, + None => unreachable!("default worldline must exist"), + }; let snap = frontier .state() .last_snapshot() @@ -212,11 +222,15 @@ impl KernelPort for WarpKernel { // Phase 3 exposes only the default worldline/default writer through // the WASM ABI, so one coordinator pass can produce at most one // committed head step here. - let records = SchedulerCoordinator::super_tick(&mut self.runtime, &mut self.engine) - .map_err(|e| AbiError { - code: error_codes::ENGINE_ERROR, - message: e.to_string(), - })?; + let records = SchedulerCoordinator::super_tick( + &mut self.runtime, + &mut self.provenance, + &mut self.engine, + ) + .map_err(|e| AbiError { + code: error_codes::ENGINE_ERROR, + message: e.to_string(), + })?; if records.is_empty() { break; } @@ -243,13 +257,11 @@ impl KernelPort for WarpKernel { } self.drained = true; - let finalized = self - .runtime - .worldlines() - .get(&self.default_worldline) - .expect("default worldline must exist") - .state() - .last_materialization(); + let frontier = match self.runtime.worldlines().get(&self.default_worldline) { + Some(frontier) => frontier, + None => unreachable!("default worldline must exist"), + }; + let finalized = frontier.state().last_materialization(); let channels: Vec = finalized .iter() .map(|ch| ChannelData { @@ -270,11 +282,10 @@ impl KernelPort for WarpKernel { code: error_codes::INVALID_TICK, message: format!("tick {tick} exceeds addressable range"), })?; - let frontier = self - .runtime - .worldlines() - .get(&self.default_worldline) - .expect("default worldline must exist"); + let frontier = match self.runtime.worldlines().get(&self.default_worldline) { + Some(frontier) => frontier, + None => unreachable!("default worldline must exist"), + }; let snap = self .engine .snapshot_at_state(frontier.state(), tick_index) From 3418c36d056230460544cdc888e8d489dd9b8e4e Mon Sep 17 00:00:00 2001 From: James Ross Date: Sun, 15 Mar 2026 09:47:33 -0700 Subject: [PATCH 02/18] feat(warp-core): extend phase 4 provenance service --- crates/warp-core/src/provenance_store.rs | 156 +++++++++++++++++++++++ crates/warp-core/tests/inbox.rs | 70 +++++++++- 2 files changed, 224 insertions(+), 2 deletions(-) diff --git a/crates/warp-core/src/provenance_store.rs b/crates/warp-core/src/provenance_store.rs index a9337283..a1ee7d57 100644 --- a/crates/warp-core/src/provenance_store.rs +++ b/crates/warp-core/src/provenance_store.rs @@ -806,6 +806,54 @@ impl ProvenanceService { ) } + /// Records a checkpoint for a worldline. + /// + /// # Errors + /// + /// Returns [`HistoryError::WorldlineNotFound`] if the worldline hasn't been registered. + pub fn add_checkpoint( + &mut self, + worldline_id: WorldlineId, + checkpoint: CheckpointRef, + ) -> Result<(), HistoryError> { + self.store.add_checkpoint(worldline_id, checkpoint) + } + + /// Creates a checkpoint at the given tick using the provided state. + /// + /// # Errors + /// + /// Returns [`HistoryError::WorldlineNotFound`] if the worldline hasn't been registered. + pub fn checkpoint( + &mut self, + worldline_id: WorldlineId, + tick: u64, + state: &GraphStore, + ) -> Result { + self.store.checkpoint(worldline_id, tick, state) + } + + /// Returns the largest checkpoint with `checkpoint.tick < tick`, if any. + #[must_use] + pub fn checkpoint_before(&self, worldline_id: WorldlineId, tick: u64) -> Option { + self.store.checkpoint_before(worldline_id, tick) + } + + /// Creates a new worldline that is a prefix-copy of the source up to `fork_tick`. + /// + /// # Errors + /// + /// Returns [`HistoryError`] if the source is missing, the target exists, or + /// `fork_tick` is out of range. + pub fn fork( + &mut self, + source: WorldlineId, + fork_tick: u64, + new_id: WorldlineId, + ) -> Result<(), HistoryError> { + self.store.fork(source, fork_tick, new_id) + } + /// Returns the deterministic tip ref for a worldline, if any. /// /// # Errors @@ -1407,6 +1455,114 @@ mod tests { service.validate_btr(&btr).unwrap(); } + #[test] + fn build_btr_preserves_parent_refs_and_head_attribution() { + let mut service = ProvenanceService::new(); + let w = test_worldline_id(); + let state = WorldlineState::empty(); + service.register_worldline(w, &state).unwrap(); + + let first = test_entry(0); + let first_ref = first.as_ref(); + let second = ProvenanceEntry::local_commit( + w, + 1, + 1, + test_head_key(), + vec![first_ref], + test_triplet(1), + test_patch(1), + vec![(make_channel_id("test:ok"), b"ok".to_vec())], + Vec::new(), + ); + + service.append_local_commit(first).unwrap(); + service.append_local_commit(second.clone()).unwrap(); + + let btr = service.build_btr(w, 0, 2, 9, b"auth".to_vec()).unwrap(); + assert_eq!(btr.logical_counter, 9); + assert_eq!(btr.payload.entries[1].head_key, Some(test_head_key())); + assert_eq!(btr.payload.entries[1].parents, vec![first_ref]); + assert_eq!(btr.payload.entries[1], second); + } + + #[test] + fn service_fork_copies_entry_prefix_and_checkpoints() { + let mut service = ProvenanceService::new(); + let source = test_worldline_id(); + let target = WorldlineId([99u8; 32]); + let state = WorldlineState::empty(); + + service.register_worldline(source, &state).unwrap(); + service.append_local_commit(test_entry(0)).unwrap(); + service.append_local_commit(test_entry(1)).unwrap(); + service + .add_checkpoint( + source, + CheckpointRef { + tick: 1, + state_hash: [1u8; 32], + }, + ) + .unwrap(); + + service.fork(source, 0, target).unwrap(); + + assert_eq!(service.len(target).unwrap(), 1); + assert_eq!(service.entry(target, 0).unwrap().expected, test_triplet(0)); + assert!(service.checkpoint_before(target, 1).is_none()); + } + + #[test] + fn btr_validation_rejects_non_contiguous_ticks() { + let payload = BtrPayload { + worldline_id: test_worldline_id(), + start_tick: 0, + entries: vec![test_entry(0), test_entry(2)], + }; + assert!(matches!( + payload.validate(), + Err(BtrError::NonContiguousTicks { + expected: 1, + got: 2 + }) + )); + } + + #[test] + fn validate_btr_rejects_bad_input_boundary_hash() { + let mut service = ProvenanceService::new(); + let w = test_worldline_id(); + let state = WorldlineState::empty(); + service.register_worldline(w, &state).unwrap(); + service.append_local_commit(test_entry(0)).unwrap(); + + let mut btr = service.build_btr(w, 0, 1, 3, b"auth".to_vec()).unwrap(); + btr.input_boundary_hash = [9u8; 32]; + + assert!(matches!( + service.validate_btr(&btr), + Err(BtrError::InputBoundaryHashMismatch { .. }) + )); + } + + #[test] + fn validate_btr_rejects_bad_output_boundary_hash() { + let mut service = ProvenanceService::new(); + let w = test_worldline_id(); + let state = WorldlineState::empty(); + service.register_worldline(w, &state).unwrap(); + service.append_local_commit(test_entry(0)).unwrap(); + + let mut btr = service.build_btr(w, 0, 1, 3, b"auth".to_vec()).unwrap(); + btr.output_boundary_hash = [7u8; 32]; + + assert!(matches!( + service.validate_btr(&btr), + Err(BtrError::OutputBoundaryHashMismatch { .. }) + )); + } + #[test] fn btr_validation_rejects_mixed_worldlines() { let entry = ProvenanceEntry::local_commit( diff --git a/crates/warp-core/tests/inbox.rs b/crates/warp-core/tests/inbox.rs index 3bf7f04f..fccf5019 100644 --- a/crates/warp-core/tests/inbox.rs +++ b/crates/warp-core/tests/inbox.rs @@ -6,8 +6,9 @@ use warp_core::{ make_head_id, make_intent_kind, make_node_id, make_type_id, Engine, EngineBuilder, GraphStore, InboxAddress, InboxPolicy, IngressDisposition, IngressEnvelope, IngressTarget, NodeId, - NodeRecord, PlaybackMode, ProvenanceService, SchedulerCoordinator, SchedulerKind, WorldlineId, - WorldlineRuntime, WorldlineState, WriterHead, WriterHeadKey, + NodeRecord, PlaybackMode, ProvenanceEventKind, ProvenanceService, ProvenanceStore, + SchedulerCoordinator, SchedulerKind, WorldlineId, WorldlineRuntime, WorldlineState, + WorldlineTickPatchV1, WriterHead, WriterHeadKey, }; fn wl(n: u8) -> WorldlineId { @@ -240,3 +241,68 @@ fn runtime_commit_patch_replays_to_post_state() { "runtime tick patch must replay to the committed post-state" ); } + +#[test] +fn runtime_commit_provenance_matches_worldline_state_mirror() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + let head_key = register_head(&mut runtime, worldline_id, "default", None, true); + + runtime + .ingest(IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test/runtime"), + b"mirror-consistency".to_vec(), + )) + .unwrap(); + + let mut provenance = mirrored_provenance(&runtime); + let records = + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); + assert_eq!(records.len(), 1); + + let frontier = runtime.worldlines().get(&worldline_id).unwrap(); + let state = frontier.state(); + let (snapshot, _receipt, patch) = state.tick_history().last().unwrap().clone(); + let entry = provenance.entry(worldline_id, 0).unwrap(); + let expected_outputs = state + .last_materialization() + .iter() + .map(|channel| (channel.channel, channel.data.clone())) + .collect::>(); + + assert_eq!(entry.worldline_id, worldline_id); + assert_eq!(entry.worldline_tick, 0); + assert_eq!(entry.global_tick, runtime.global_tick()); + assert_eq!(entry.head_key, Some(head_key)); + assert!(matches!(entry.event_kind, ProvenanceEventKind::LocalCommit)); + assert!( + entry.parents.is_empty(), + "first local commit should be parentless" + ); + assert_eq!(entry.expected.state_root, snapshot.state_root); + assert_eq!(entry.expected.patch_digest, snapshot.patch_digest); + assert_eq!(entry.expected.commit_hash, snapshot.hash); + assert_eq!(entry.outputs, expected_outputs); + + let expected_patch = WorldlineTickPatchV1 { + header: warp_core::WorldlineTickHeaderV1 { + global_tick: runtime.global_tick(), + policy_id: patch.policy_id(), + rule_pack_id: patch.rule_pack_id(), + plan_digest: snapshot.plan_digest, + decision_digest: snapshot.decision_digest, + rewrites_digest: snapshot.rewrites_digest, + }, + warp_id: snapshot.root.warp_id, + ops: patch.ops().to_vec(), + in_slots: patch.in_slots().to_vec(), + out_slots: patch.out_slots().to_vec(), + patch_digest: patch.digest(), + }; + assert_eq!(entry.patch, Some(expected_patch)); +} From 470c8e4eae818bce03decd3d9fd54f75f419a072 Mon Sep 17 00:00:00 2001 From: James Ross Date: Sun, 15 Mar 2026 13:39:45 -0700 Subject: [PATCH 03/18] feat(warp-core): add explicit observation contract --- CHANGELOG.md | 17 + crates/echo-wasm-abi/src/kernel_port.rs | 188 +++- crates/warp-core/Cargo.toml | 2 +- crates/warp-core/README.md | 9 + crates/warp-core/src/lib.rs | 6 + crates/warp-core/src/observation.rs | 913 ++++++++++++++++++ crates/warp-wasm/src/lib.rs | 19 +- crates/warp-wasm/src/warp_kernel.rs | 369 +++++-- .../ADR-0011-explicit-observation-contract.md | 226 +++++ docs/plans/adr-0008-and-0009.md | 125 ++- .../SPEC-0004-worldlines-playback-truthbus.md | 7 + docs/spec/SPEC-0005-provenance-payload.md | 4 + docs/spec/SPEC-0009-wasm-abi-v1.md | 110 ++- 13 files changed, 1872 insertions(+), 123 deletions(-) create mode 100644 crates/warp-core/src/observation.rs create mode 100644 docs/adr/ADR-0011-explicit-observation-contract.md diff --git a/CHANGELOG.md b/CHANGELOG.md index c0869ce9..f786d382 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,23 @@ ## Unreleased +### feat(warp-core): close Phase 4 and pivot reads to observe + +- **Changed** Phase 4 provenance/BTR work is now the documented substrate + baseline: provenance is entry-based, parent refs are stored explicitly, and + the standalone `ProvenanceService` owns authoritative worldline history. +- **Added** `ObservationService::observe(...)` as the canonical internal read + path with explicit worldline, coordinate, frame, and projection semantics. +- **Added** deterministic observation artifacts and error mapping: + `INVALID_WORLDLINE`, `INVALID_TICK`, `UNSUPPORTED_FRAME_PROJECTION`, + `UNSUPPORTED_QUERY`, and `OBSERVATION_UNAVAILABLE`. +- **Changed** `WarpKernel` and the WASM ABI now expose `observe(...)`, while + `get_head`, `snapshot_at`, `execute_query`, and `drain_view_ops` are thin + one-phase adapters over the observation contract. +- **Changed** `drain_view_ops()` is now legacy adapter/debug behavior only: it + reads recorded truth through `observe(...)` and tracks only adapter-local + drain state instead of mutating runtime-owned materialization state. + ### fix(warp-core): close final Phase 3 PR review threads - **Fixed** `Engine::commit_with_state()` now restores both the engine-owned diff --git a/crates/echo-wasm-abi/src/kernel_port.rs b/crates/echo-wasm-abi/src/kernel_port.rs index d169adf4..97d13c89 100644 --- a/crates/echo-wasm-abi/src/kernel_port.rs +++ b/crates/echo-wasm-abi/src/kernel_port.rs @@ -48,14 +48,24 @@ pub mod error_codes { pub const INVALID_INTENT: u32 = 2; /// An internal engine error occurred during processing. pub const ENGINE_ERROR: u32 = 3; - /// The requested tick index is out of bounds. - pub const INVALID_TICK: u32 = 4; + /// Legacy snapshot/history tick index is out of bounds. + pub const LEGACY_INVALID_TICK: u32 = 4; /// The requested operation is not yet supported by this kernel. pub const NOT_SUPPORTED: u32 = 5; /// CBOR encoding or decoding failed. pub const CODEC_ERROR: u32 = 6; /// The provided payload bytes were invalid or corrupted. pub const INVALID_PAYLOAD: u32 = 7; + /// The requested worldline is not registered. + pub const INVALID_WORLDLINE: u32 = 8; + /// The requested observation tick is not available. + pub const INVALID_TICK: u32 = 9; + /// The requested frame/projection pairing is invalid. + pub const UNSUPPORTED_FRAME_PROJECTION: u32 = 10; + /// Query observation is not implemented yet. + pub const UNSUPPORTED_QUERY: u32 = 11; + /// The requested observation cannot be produced at this coordinate. + pub const OBSERVATION_UNAVAILABLE: u32 = 12; } // --------------------------------------------------------------------------- @@ -115,6 +125,169 @@ pub struct ChannelData { pub data: Vec, } +/// Coordinate selector for an observation request. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ObservationCoordinate { + /// Worldline to observe. + pub worldline_id: Vec, + /// Requested coordinate within the worldline. + pub at: ObservationAt, +} + +/// Requested position within a worldline. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "kind", rename_all = "snake_case")] +pub enum ObservationAt { + /// Observe the current frontier. + Frontier, + /// Observe a specific committed historical tick. + Tick { + /// Zero-based historical tick index. + tick: u64, + }, +} + +/// Declared semantic frame for an observation. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ObservationFrame { + /// Commit-boundary metadata and snapshots. + CommitBoundary, + /// Recorded truth emitted by committed history. + RecordedTruth, + /// Query-shaped observation frame. + QueryView, +} + +/// Requested observation projection. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "kind", rename_all = "snake_case")] +pub enum ObservationProjection { + /// Head metadata at the resolved coordinate. + Head, + /// Snapshot metadata at the resolved coordinate. + Snapshot, + /// Recorded truth channel payloads. + TruthChannels { + /// Optional channel filter. `None` means all recorded channels. + channels: Option>>, + }, + /// Query payload placeholder. + Query { + /// Stable query identifier. + query_id: u32, + /// Canonical vars payload bytes. + vars_bytes: Vec, + }, +} + +/// Canonical observation request DTO. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ObservationRequest { + /// Requested worldline coordinate. + pub coordinate: ObservationCoordinate, + /// Declared read frame. + pub frame: ObservationFrame, + /// Requested projection within that frame. + pub projection: ObservationProjection, +} + +/// Resolved coordinate returned with every observation artifact. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ResolvedObservationCoordinate { + /// Observation contract version. + pub observation_version: u32, + /// Worldline actually observed. + pub worldline_id: Vec, + /// Original coordinate selector from the request. + pub requested_at: ObservationAt, + /// Concrete resolved committed tick. + pub resolved_tick: u64, + /// Canonical state root at the resolved coordinate. + pub state_root: Vec, + /// Canonical commit hash at the resolved coordinate. + pub commit_hash: Vec, +} + +/// Minimal head observation payload. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct HeadObservation { + /// Current committed tick count at the observed frontier. + pub tick: u64, + /// Graph-only state hash (32 bytes). + pub state_root: Vec, + /// Canonical commit hash (32 bytes). + pub commit_id: Vec, +} + +/// Minimal historical snapshot payload. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct SnapshotObservation { + /// Historical tick index being observed. + pub tick: u64, + /// Graph-only state hash (32 bytes). + pub state_root: Vec, + /// Canonical commit hash (32 bytes). + pub commit_id: Vec, +} + +/// Observation payload variants returned by the kernel. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "kind", rename_all = "snake_case")] +pub enum ObservationPayload { + /// Head payload. + Head { + /// Head observation. + head: HeadObservation, + }, + /// Snapshot payload. + Snapshot { + /// Snapshot observation. + snapshot: SnapshotObservation, + }, + /// Recorded truth payload. + TruthChannels { + /// Recorded channel payloads. + channels: Vec, + }, + /// Query payload. + QueryBytes { + /// Raw query result bytes. + data: Vec, + }, +} + +/// Canonical hash input for an observation artifact. +/// +/// This excludes `artifact_hash` itself so kernels can compute the hash over +/// the resolved coordinate, frame, projection, and canonical payload bytes. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ObservationHashInput { + /// Resolved coordinate metadata. + pub resolved: ResolvedObservationCoordinate, + /// Declared semantic frame. + pub frame: ObservationFrame, + /// Declared projection. + pub projection: ObservationProjection, + /// Observation payload. + pub payload: ObservationPayload, +} + +/// Full observation artifact returned by `observe(...)`. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ObservationArtifact { + /// Resolved coordinate metadata. + pub resolved: ResolvedObservationCoordinate, + /// Declared semantic frame. + pub frame: ObservationFrame, + /// Declared projection. + pub projection: ObservationProjection, + /// Canonical artifact hash. + pub artifact_hash: Vec, + /// Observation payload. + pub payload: ObservationPayload, +} + /// Response from [`KernelPort::drain_view_ops`]. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct DrainResponse { @@ -234,6 +407,17 @@ pub trait KernelPort { /// after stepping. A budget of 0 is a no-op that returns the current head. fn step(&mut self, budget: u32) -> Result; + /// Observe a worldline at an explicit coordinate and frame. + /// + /// The default implementation reports that the observation contract is not + /// supported by this kernel implementation. + fn observe(&self, _request: ObservationRequest) -> Result { + Err(AbiError { + code: error_codes::NOT_SUPPORTED, + message: "observe is not supported by this kernel".into(), + }) + } + /// Drain materialized ViewOps channels since the last drain. /// /// Returns finalized channel data. Calling drain twice without an diff --git a/crates/warp-core/Cargo.toml b/crates/warp-core/Cargo.toml index 10050568..41397104 100644 --- a/crates/warp-core/Cargo.toml +++ b/crates/warp-core/Cargo.toml @@ -23,13 +23,13 @@ ciborium = "0.2" rustc-hash = "2.1.1" serde = { version = "1.0", features = ["derive"], optional = true } libm = "0.2" +echo-wasm-abi = { workspace = true } [dev-dependencies] serde = { version = "1.0", features = ["derive"] } serde-value = "0.7" proptest = { version = "1.5" } echo-dry-tests = { workspace = true } -echo-wasm-abi = { workspace = true } [features] default = [] diff --git a/crates/warp-core/README.md b/crates/warp-core/README.md index 5d4b6697..890f0ea2 100644 --- a/crates/warp-core/README.md +++ b/crates/warp-core/README.md @@ -31,8 +31,17 @@ The `warp-core` crate also contains a small “website kernel spike” used by t - runnable writer heads advance in canonical `(worldline_id, head_id)` order, - commits run against the shared `WorldlineState` frontier for that worldline, - empty inboxes do not advance frontier ticks. +- `ObservationService::observe(...)` is the canonical read path: + - every read names an explicit worldline, coordinate, frame, and projection, + - commit-boundary reads and recorded-truth reads share one deterministic + artifact model, + - observation is read-only and does not mutate runtime, provenance, inboxes, + or compatibility mirrors. - The runtime/kernel production path no longer uses `sim/inbox`, `edge:pending`, or `Engine::dispatch_next_intent(...)`. +- Legacy read surfaces such as `get_head()`, `snapshot_at()`, and + `drain_view_ops()` now exist only as one-phase adapters above `observe(...)` + and are scheduled for deletion at the start of Phase 6 / ABI v2. - `Engine::ingest_intent(intent_bytes)` and `Engine::ingest_inbox_event(seq, payload)` remain legacy compatibility helpers for isolated tests and older spike call sites. diff --git a/crates/warp-core/src/lib.rs b/crates/warp-core/src/lib.rs index abdc71c1..663e51c7 100644 --- a/crates/warp-core/src/lib.rs +++ b/crates/warp-core/src/lib.rs @@ -95,6 +95,7 @@ mod ident; pub mod inbox; /// Materialization subsystem for deterministic channel-based output. pub mod materialization; +mod observation; /// Parallel execution module. /// /// Provides both serial and parallel execution strategies for rewrite rules, @@ -190,6 +191,11 @@ pub use playback::{ // --- Session types --- pub use playback::{SessionId, ViewSession}; // --- Truth delivery --- +pub use observation::{ + HeadObservation, ObservationArtifact, ObservationAt, ObservationCoordinate, ObservationError, + ObservationFrame, ObservationPayload, ObservationProjection, ObservationProjectionKind, + ObservationRequest, ObservationService, ResolvedObservationCoordinate, WorldlineSnapshot, +}; pub use playback::{CursorReceipt, TruthFrame, TruthSink}; pub use provenance_store::{ BoundaryTransitionRecord, BtrError, BtrPayload, CheckpointRef, HistoryError, diff --git a/crates/warp-core/src/observation.rs b/crates/warp-core/src/observation.rs new file mode 100644 index 00000000..a13ffca1 --- /dev/null +++ b/crates/warp-core/src/observation.rs @@ -0,0 +1,913 @@ +// SPDX-License-Identifier: Apache-2.0 +// © James Ross Ω FLYING•ROBOTS +//! Explicit observation contract for worldline reads. +//! +//! Phase 5 makes observation the single canonical internal read path. Every +//! meaningful read names: +//! +//! - a worldline, +//! - a coordinate, +//! - a semantic frame, +//! - and a projection. +//! +//! Observation is strictly read-only. It never advances runtime state, drains +//! inboxes, rewrites provenance, or mutates compatibility mirrors. + +use blake3::Hasher; +use echo_wasm_abi::kernel_port as abi; +use thiserror::Error; + +use crate::coordinator::WorldlineRuntime; +use crate::engine_impl::Engine; +use crate::ident::Hash; +use crate::materialization::ChannelId; +use crate::provenance_store::{ProvenanceService, ProvenanceStore}; +use crate::snapshot::Snapshot; +use crate::worldline::WorldlineId; + +const OBSERVATION_VERSION: u32 = 1; +const OBSERVATION_ARTIFACT_DOMAIN: &[u8] = b"echo:observation-artifact:v1\0"; + +/// Coordinate selector for an observation request. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ObservationCoordinate { + /// Worldline to observe. + pub worldline_id: WorldlineId, + /// Requested coordinate within the worldline. + pub at: ObservationAt, +} + +/// Requested position within a worldline. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ObservationAt { + /// Observe the current worldline frontier. + Frontier, + /// Observe a specific committed historical tick. + Tick(u64), +} + +impl ObservationAt { + fn to_abi(self) -> abi::ObservationAt { + match self { + Self::Frontier => abi::ObservationAt::Frontier, + Self::Tick(tick) => abi::ObservationAt::Tick { tick }, + } + } +} + +/// Semantic frame declared by an observation request. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ObservationFrame { + /// Read commit-boundary state metadata. + CommitBoundary, + /// Read recorded truth from provenance outputs. + RecordedTruth, + /// Read query-shaped projections. + QueryView, +} + +impl ObservationFrame { + fn to_abi(self) -> abi::ObservationFrame { + match self { + Self::CommitBoundary => abi::ObservationFrame::CommitBoundary, + Self::RecordedTruth => abi::ObservationFrame::RecordedTruth, + Self::QueryView => abi::ObservationFrame::QueryView, + } + } +} + +/// Coarse projection kind used by the validity matrix and deterministic errors. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ObservationProjectionKind { + /// Head metadata projection. + Head, + /// Snapshot metadata projection. + Snapshot, + /// Recorded truth channels projection. + TruthChannels, + /// Query payload projection. + Query, +} + +impl ObservationProjectionKind { + fn to_abi(self, projection: &ObservationProjection) -> abi::ObservationProjection { + match (self, projection) { + (Self::Head, ObservationProjection::Head) => abi::ObservationProjection::Head, + (Self::Snapshot, ObservationProjection::Snapshot) => { + abi::ObservationProjection::Snapshot + } + (Self::TruthChannels, ObservationProjection::TruthChannels { channels }) => { + abi::ObservationProjection::TruthChannels { + channels: channels.as_ref().map(|ids| { + ids.iter() + .map(|channel| channel.0.to_vec()) + .collect::>() + }), + } + } + ( + Self::Query, + ObservationProjection::Query { + query_id, + vars_bytes, + }, + ) => abi::ObservationProjection::Query { + query_id: *query_id, + vars_bytes: vars_bytes.clone(), + }, + _ => unreachable!("projection kind and projection must agree"), + } + } +} + +/// Requested projection within a frame. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum ObservationProjection { + /// Head metadata projection. + Head, + /// Snapshot metadata projection. + Snapshot, + /// Recorded truth channels projection. + TruthChannels { + /// Optional channel filter. `None` means all recorded channels. + channels: Option>, + }, + /// Query payload placeholder. + Query { + /// Stable query identifier. + query_id: u32, + /// Canonical vars payload bytes. + vars_bytes: Vec, + }, +} + +impl ObservationProjection { + /// Returns the coarse projection kind used for validation and error reporting. + #[must_use] + pub fn kind(&self) -> ObservationProjectionKind { + match self { + Self::Head => ObservationProjectionKind::Head, + Self::Snapshot => ObservationProjectionKind::Snapshot, + Self::TruthChannels { .. } => ObservationProjectionKind::TruthChannels, + Self::Query { .. } => ObservationProjectionKind::Query, + } + } + + fn to_abi(&self) -> abi::ObservationProjection { + self.kind().to_abi(self) + } +} + +/// Canonical observation request. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ObservationRequest { + /// Worldline coordinate being observed. + pub coordinate: ObservationCoordinate, + /// Declared semantic frame. + pub frame: ObservationFrame, + /// Requested projection within that frame. + pub projection: ObservationProjection, +} + +/// Fully resolved coordinate returned with every observation. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ResolvedObservationCoordinate { + /// Observation contract version. + pub observation_version: u32, + /// Worldline that was actually observed. + pub worldline_id: WorldlineId, + /// Original coordinate selector from the request. + pub requested_at: ObservationAt, + /// Concrete resolved tick. + pub resolved_tick: u64, + /// Canonical state root at the resolved coordinate. + pub state_root: Hash, + /// Canonical commit hash at the resolved coordinate. + pub commit_hash: Hash, +} + +impl ResolvedObservationCoordinate { + fn to_abi(&self) -> abi::ResolvedObservationCoordinate { + abi::ResolvedObservationCoordinate { + observation_version: self.observation_version, + worldline_id: self.worldline_id.0.to_vec(), + requested_at: self.requested_at.to_abi(), + resolved_tick: self.resolved_tick, + state_root: self.state_root.to_vec(), + commit_hash: self.commit_hash.to_vec(), + } + } +} + +/// Minimal frontier/head observation payload. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HeadObservation { + /// Observed tick. + pub tick: u64, + /// Canonical state root at that tick. + pub state_root: Hash, + /// Canonical commit hash at that tick. + pub commit_hash: Hash, +} + +impl HeadObservation { + fn to_abi(&self) -> abi::HeadObservation { + abi::HeadObservation { + tick: self.tick, + state_root: self.state_root.to_vec(), + commit_id: self.commit_hash.to_vec(), + } + } +} + +/// Minimal historical snapshot observation payload. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct WorldlineSnapshot { + /// Observed historical tick. + pub tick: u64, + /// Canonical state root at that tick. + pub state_root: Hash, + /// Canonical commit hash at that tick. + pub commit_hash: Hash, +} + +impl WorldlineSnapshot { + fn to_abi(&self) -> abi::SnapshotObservation { + abi::SnapshotObservation { + tick: self.tick, + state_root: self.state_root.to_vec(), + commit_id: self.commit_hash.to_vec(), + } + } +} + +/// Observation payload variants. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum ObservationPayload { + /// Head metadata. + Head(HeadObservation), + /// Historical snapshot metadata. + Snapshot(WorldlineSnapshot), + /// Recorded truth payloads in channel-id order. + TruthChannels(Vec<(ChannelId, Vec)>), + /// Query result bytes. + QueryBytes(Vec), +} + +impl ObservationPayload { + fn to_abi(&self) -> abi::ObservationPayload { + match self { + Self::Head(head) => abi::ObservationPayload::Head { + head: head.to_abi(), + }, + Self::Snapshot(snapshot) => abi::ObservationPayload::Snapshot { + snapshot: snapshot.to_abi(), + }, + Self::TruthChannels(channels) => abi::ObservationPayload::TruthChannels { + channels: channels + .iter() + .map(|(channel, data)| abi::ChannelData { + channel_id: channel.0.to_vec(), + data: data.clone(), + }) + .collect(), + }, + Self::QueryBytes(data) => abi::ObservationPayload::QueryBytes { data: data.clone() }, + } + } +} + +/// Full observation artifact with deterministic identity. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ObservationArtifact { + /// Resolved coordinate metadata. + pub resolved: ResolvedObservationCoordinate, + /// Declared semantic frame. + pub frame: ObservationFrame, + /// Declared projection. + pub projection: ObservationProjection, + /// Deterministic artifact hash. + pub artifact_hash: Hash, + /// Observation payload. + pub payload: ObservationPayload, +} + +impl ObservationArtifact { + /// Converts this artifact into the shared ABI DTO shape. + #[must_use] + pub fn to_abi(&self) -> abi::ObservationArtifact { + abi::ObservationArtifact { + resolved: self.resolved.to_abi(), + frame: self.frame.to_abi(), + projection: self.projection.to_abi(), + artifact_hash: self.artifact_hash.to_vec(), + payload: self.payload.to_abi(), + } + } +} + +/// Deterministic observation failures. +#[derive(Clone, Debug, PartialEq, Eq, Error)] +pub enum ObservationError { + /// The requested worldline is not registered. + #[error("invalid worldline: {0:?}")] + InvalidWorldline(WorldlineId), + /// The requested historical tick is not available. + #[error("invalid tick {tick} for worldline {worldline_id:?}")] + InvalidTick { + /// Worldline that was targeted. + worldline_id: WorldlineId, + /// Requested tick. + tick: u64, + }, + /// The frame/projection pairing is not valid in v1. + #[error("unsupported frame/projection pairing: {frame:?} + {projection:?}")] + UnsupportedFrameProjection { + /// Declared frame. + frame: ObservationFrame, + /// Requested projection kind. + projection: ObservationProjectionKind, + }, + /// Query observation is not implemented yet. + #[error("query observation is not supported in phase 5")] + UnsupportedQuery, + /// The requested observation cannot be produced at this coordinate. + #[error("observation unavailable for worldline {worldline_id:?} at {at:?}")] + ObservationUnavailable { + /// Worldline that was targeted. + worldline_id: WorldlineId, + /// Requested coordinate. + at: ObservationAt, + }, + /// Canonical artifact encoding failed. + #[error("observation artifact encoding failed: {0}")] + CodecFailure(String), +} + +/// Immutable observation service. +pub struct ObservationService; + +impl ObservationService { + /// Observe a worldline at an explicit coordinate and frame. + /// + /// The runtime, provenance store, and engine are borrowed immutably. This + /// method never mutates live frontier state or recorded history. + /// + /// # Errors + /// + /// Returns [`ObservationError`] for invalid worldlines/ticks, unsupported + /// frame/projection pairings, unsupported query requests, or unavailable + /// recorded truth. + pub fn observe( + runtime: &WorldlineRuntime, + provenance: &ProvenanceService, + engine: &Engine, + request: ObservationRequest, + ) -> Result { + let worldline_id = request.coordinate.worldline_id; + if runtime.worldlines().get(&worldline_id).is_none() { + return Err(ObservationError::InvalidWorldline(worldline_id)); + } + Self::validate_frame_projection(request.frame, &request.projection)?; + if matches!(request.frame, ObservationFrame::QueryView) { + return Err(ObservationError::UnsupportedQuery); + } + + let resolved = Self::resolve_coordinate(runtime, provenance, engine, &request)?; + let payload = match (&request.frame, &request.projection) { + (ObservationFrame::CommitBoundary, ObservationProjection::Head) => { + ObservationPayload::Head(HeadObservation { + tick: resolved.resolved_tick, + state_root: resolved.state_root, + commit_hash: resolved.commit_hash, + }) + } + (ObservationFrame::CommitBoundary, ObservationProjection::Snapshot) => { + ObservationPayload::Snapshot(WorldlineSnapshot { + tick: resolved.resolved_tick, + state_root: resolved.state_root, + commit_hash: resolved.commit_hash, + }) + } + ( + ObservationFrame::RecordedTruth, + ObservationProjection::TruthChannels { channels }, + ) => { + let entry = provenance + .entry(worldline_id, resolved.resolved_tick) + .map_err(|_| ObservationError::ObservationUnavailable { + worldline_id, + at: request.coordinate.at, + })?; + let outputs = match channels { + Some(filter) => entry + .outputs + .into_iter() + .filter(|(channel, _)| filter.contains(channel)) + .collect(), + None => entry.outputs, + }; + ObservationPayload::TruthChannels(outputs) + } + (ObservationFrame::QueryView, ObservationProjection::Query { .. }) => { + return Err(ObservationError::UnsupportedQuery); + } + _ => unreachable!("validity matrix must reject unsupported combinations"), + }; + + let artifact_hash = + Self::compute_artifact_hash(&resolved, request.frame, &request.projection, &payload)?; + Ok(ObservationArtifact { + resolved, + frame: request.frame, + projection: request.projection, + artifact_hash, + payload, + }) + } + + fn validate_frame_projection( + frame: ObservationFrame, + projection: &ObservationProjection, + ) -> Result<(), ObservationError> { + let projection_kind = projection.kind(); + let valid = matches!( + (frame, projection_kind), + ( + ObservationFrame::CommitBoundary, + ObservationProjectionKind::Head | ObservationProjectionKind::Snapshot + ) | ( + ObservationFrame::RecordedTruth, + ObservationProjectionKind::TruthChannels + ) | ( + ObservationFrame::QueryView, + ObservationProjectionKind::Query + ) + ); + if valid { + Ok(()) + } else { + Err(ObservationError::UnsupportedFrameProjection { + frame, + projection: projection_kind, + }) + } + } + + fn resolve_coordinate( + runtime: &WorldlineRuntime, + provenance: &ProvenanceService, + engine: &Engine, + request: &ObservationRequest, + ) -> Result { + let worldline_id = request.coordinate.worldline_id; + let frontier = runtime + .worldlines() + .get(&worldline_id) + .ok_or(ObservationError::InvalidWorldline(worldline_id))?; + + match (request.frame, request.coordinate.at) { + (ObservationFrame::CommitBoundary, ObservationAt::Frontier) => { + let snapshot = frontier + .state() + .last_snapshot() + .cloned() + .unwrap_or_else(|| engine.snapshot_for_state(frontier.state())); + Ok(Self::resolved_commit_boundary( + worldline_id, + request.coordinate.at, + frontier.frontier_tick(), + &snapshot, + )) + } + (ObservationFrame::CommitBoundary, ObservationAt::Tick(tick)) => { + let entry = provenance + .entry(worldline_id, tick) + .map_err(|_| ObservationError::InvalidTick { worldline_id, tick })?; + Ok(ResolvedObservationCoordinate { + observation_version: OBSERVATION_VERSION, + worldline_id, + requested_at: request.coordinate.at, + resolved_tick: tick, + state_root: entry.expected.state_root, + commit_hash: entry.expected.commit_hash, + }) + } + (ObservationFrame::RecordedTruth, ObservationAt::Frontier) => { + let Some(resolved_tick) = frontier.frontier_tick().checked_sub(1) else { + return Err(ObservationError::ObservationUnavailable { + worldline_id, + at: request.coordinate.at, + }); + }; + let entry = provenance.entry(worldline_id, resolved_tick).map_err(|_| { + ObservationError::ObservationUnavailable { + worldline_id, + at: request.coordinate.at, + } + })?; + Ok(ResolvedObservationCoordinate { + observation_version: OBSERVATION_VERSION, + worldline_id, + requested_at: request.coordinate.at, + resolved_tick, + state_root: entry.expected.state_root, + commit_hash: entry.expected.commit_hash, + }) + } + (ObservationFrame::RecordedTruth, ObservationAt::Tick(tick)) => { + let entry = provenance + .entry(worldline_id, tick) + .map_err(|_| ObservationError::InvalidTick { worldline_id, tick })?; + Ok(ResolvedObservationCoordinate { + observation_version: OBSERVATION_VERSION, + worldline_id, + requested_at: request.coordinate.at, + resolved_tick: tick, + state_root: entry.expected.state_root, + commit_hash: entry.expected.commit_hash, + }) + } + (ObservationFrame::QueryView, _) => Err(ObservationError::UnsupportedQuery), + } + } + + fn resolved_commit_boundary( + worldline_id: WorldlineId, + requested_at: ObservationAt, + resolved_tick: u64, + snapshot: &Snapshot, + ) -> ResolvedObservationCoordinate { + ResolvedObservationCoordinate { + observation_version: OBSERVATION_VERSION, + worldline_id, + requested_at, + resolved_tick, + state_root: snapshot.state_root, + commit_hash: snapshot.hash, + } + } + + fn compute_artifact_hash( + resolved: &ResolvedObservationCoordinate, + frame: ObservationFrame, + projection: &ObservationProjection, + payload: &ObservationPayload, + ) -> Result { + let input = abi::ObservationHashInput { + resolved: resolved.to_abi(), + frame: frame.to_abi(), + projection: projection.to_abi(), + payload: payload.to_abi(), + }; + let bytes = echo_wasm_abi::encode_cbor(&input) + .map_err(|err| ObservationError::CodecFailure(err.to_string()))?; + let mut hasher = Hasher::new(); + hasher.update(OBSERVATION_ARTIFACT_DOMAIN); + hasher.update(&bytes); + Ok(hasher.finalize().into()) + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use crate::coordinator::WorldlineRuntime; + use crate::head::{make_head_id, WriterHead, WriterHeadKey}; + use crate::head_inbox::{make_intent_kind, InboxPolicy, IngressEnvelope, IngressTarget}; + use crate::ident::{make_node_id, make_type_id, WarpId}; + use crate::materialization::make_channel_id; + use crate::receipt::TickReceipt; + use crate::record::NodeRecord; + use crate::tick_patch::{TickCommitStatus, WarpTickPatchV1}; + use crate::worldline::{HashTriplet, WorldlineTickHeaderV1, WorldlineTickPatchV1}; + use crate::{ + EngineBuilder, GraphStore, PlaybackMode, ProvenanceEntry, SchedulerCoordinator, + WorldlineState, + }; + + fn wl(n: u8) -> WorldlineId { + WorldlineId([n; 32]) + } + + fn empty_runtime_fixture() -> (Engine, WorldlineRuntime, ProvenanceService, WorldlineId) { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + + let engine = EngineBuilder::new(store, root).workers(1).build(); + let default_worldline = WorldlineId(engine.root_key().warp_id.0); + let mut runtime = WorldlineRuntime::new(); + let default_state = WorldlineState::try_from(engine.state().clone()).unwrap(); + let mut provenance = ProvenanceService::new(); + provenance + .register_worldline(default_worldline, &default_state) + .unwrap(); + runtime + .register_worldline(default_worldline, default_state) + .unwrap(); + runtime + .register_writer_head(WriterHead::with_routing( + WriterHeadKey { + worldline_id: default_worldline, + head_id: make_head_id("default"), + }, + PlaybackMode::Play, + InboxPolicy::AcceptAll, + None, + true, + )) + .unwrap(); + (engine, runtime, provenance, default_worldline) + } + + fn one_commit_fixture() -> (Engine, WorldlineRuntime, ProvenanceService, WorldlineId) { + let (mut engine, mut runtime, mut provenance, worldline_id) = empty_runtime_fixture(); + runtime + .ingest(IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("echo.intent/test"), + b"hello".to_vec(), + )) + .unwrap(); + SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); + (engine, runtime, provenance, worldline_id) + } + + fn recorded_truth_fixture() -> (Engine, WorldlineRuntime, ProvenanceService, WorldlineId) { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + let engine = EngineBuilder::new(store, root).workers(1).build(); + let worldline_id = wl(7); + let mut state = WorldlineState::empty(); + let snapshot = engine.snapshot_for_state(&state); + state.tick_history.push(( + snapshot.clone(), + TickReceipt::new(snapshot.tx, Vec::new(), Vec::new()), + WarpTickPatchV1::new( + crate::POLICY_ID_NO_POLICY_V0, + crate::blake3_empty(), + TickCommitStatus::Committed, + Vec::new(), + Vec::new(), + Vec::new(), + ), + )); + state.last_snapshot = Some(snapshot.clone()); + let mut runtime = WorldlineRuntime::new(); + runtime + .register_worldline(worldline_id, state.clone()) + .unwrap(); + runtime + .register_writer_head(WriterHead::with_routing( + WriterHeadKey { + worldline_id, + head_id: make_head_id("default"), + }, + PlaybackMode::Play, + InboxPolicy::AcceptAll, + None, + true, + )) + .unwrap(); + let mut provenance = ProvenanceService::new(); + provenance.register_worldline(worldline_id, &state).unwrap(); + let channel = make_channel_id("test:truth"); + provenance + .append_local_commit(ProvenanceEntry::local_commit( + worldline_id, + 0, + 1, + WriterHeadKey { + worldline_id, + head_id: make_head_id("default"), + }, + Vec::new(), + HashTriplet { + state_root: snapshot.state_root, + patch_digest: snapshot.patch_digest, + commit_hash: snapshot.hash, + }, + WorldlineTickPatchV1 { + header: WorldlineTickHeaderV1 { + global_tick: 1, + policy_id: crate::POLICY_ID_NO_POLICY_V0, + rule_pack_id: crate::blake3_empty(), + plan_digest: snapshot.plan_digest, + decision_digest: snapshot.decision_digest, + rewrites_digest: snapshot.rewrites_digest, + }, + warp_id: WarpId(root.0), + ops: Vec::new(), + in_slots: Vec::new(), + out_slots: Vec::new(), + patch_digest: snapshot.patch_digest, + }, + vec![(channel, b"truth".to_vec())], + Vec::new(), + )) + .unwrap(); + (engine, runtime, provenance, worldline_id) + } + + #[test] + fn validity_matrix_accepts_only_centralized_pairs() { + let truth = ObservationProjection::TruthChannels { channels: None }; + let query = ObservationProjection::Query { + query_id: 7, + vars_bytes: Vec::new(), + }; + + assert!(ObservationService::validate_frame_projection( + ObservationFrame::CommitBoundary, + &ObservationProjection::Head, + ) + .is_ok()); + assert!(ObservationService::validate_frame_projection( + ObservationFrame::CommitBoundary, + &ObservationProjection::Snapshot, + ) + .is_ok()); + assert!(ObservationService::validate_frame_projection( + ObservationFrame::RecordedTruth, + &truth, + ) + .is_ok()); + assert!( + ObservationService::validate_frame_projection(ObservationFrame::QueryView, &query,) + .is_ok() + ); + + assert!(matches!( + ObservationService::validate_frame_projection( + ObservationFrame::RecordedTruth, + &ObservationProjection::Head, + ), + Err(ObservationError::UnsupportedFrameProjection { .. }) + )); + assert!(matches!( + ObservationService::validate_frame_projection(ObservationFrame::CommitBoundary, &truth,), + Err(ObservationError::UnsupportedFrameProjection { .. }) + )); + } + + #[test] + fn frontier_head_matches_live_frontier_snapshot() { + let (engine, runtime, provenance, worldline_id) = empty_runtime_fixture(); + let artifact = ObservationService::observe( + &runtime, + &provenance, + &engine, + ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id, + at: ObservationAt::Frontier, + }, + frame: ObservationFrame::CommitBoundary, + projection: ObservationProjection::Head, + }, + ) + .unwrap(); + + let frontier = runtime.worldlines().get(&worldline_id).unwrap(); + let snapshot = engine.snapshot_for_state(frontier.state()); + assert_eq!(artifact.resolved.resolved_tick, frontier.frontier_tick()); + assert_eq!(artifact.resolved.state_root, snapshot.state_root); + assert_eq!(artifact.resolved.commit_hash, snapshot.hash); + } + + #[test] + fn recorded_truth_frontier_without_commits_is_unavailable() { + let (engine, runtime, provenance, worldline_id) = empty_runtime_fixture(); + let err = ObservationService::observe( + &runtime, + &provenance, + &engine, + ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id, + at: ObservationAt::Frontier, + }, + frame: ObservationFrame::RecordedTruth, + projection: ObservationProjection::TruthChannels { channels: None }, + }, + ) + .unwrap_err(); + assert_eq!( + err, + ObservationError::ObservationUnavailable { + worldline_id, + at: ObservationAt::Frontier, + } + ); + } + + #[test] + fn recorded_truth_reads_recorded_outputs_only() { + let (engine, runtime, provenance, worldline_id) = recorded_truth_fixture(); + let artifact = ObservationService::observe( + &runtime, + &provenance, + &engine, + ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id, + at: ObservationAt::Frontier, + }, + frame: ObservationFrame::RecordedTruth, + projection: ObservationProjection::TruthChannels { channels: None }, + }, + ) + .unwrap(); + let channels = if let ObservationPayload::TruthChannels(channels) = artifact.payload { + channels + } else { + Vec::new() + }; + assert_eq!(channels.len(), 1); + assert_eq!(channels[0].1, b"truth".to_vec()); + } + + #[test] + fn identical_requests_produce_stable_artifact_hashes() { + let (engine, runtime, provenance, worldline_id) = one_commit_fixture(); + let request = ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id, + at: ObservationAt::Frontier, + }, + frame: ObservationFrame::CommitBoundary, + projection: ObservationProjection::Head, + }; + let first = + ObservationService::observe(&runtime, &provenance, &engine, request.clone()).unwrap(); + let second = ObservationService::observe(&runtime, &provenance, &engine, request).unwrap(); + assert_eq!(first.artifact_hash, second.artifact_hash); + assert_eq!(first.to_abi(), second.to_abi()); + } + + #[test] + fn observation_is_zero_write_for_runtime_and_provenance() { + let (engine, runtime, provenance, worldline_id) = one_commit_fixture(); + let runtime_before = runtime.clone(); + let provenance_before = provenance.clone(); + + let artifact = ObservationService::observe( + &runtime, + &provenance, + &engine, + ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id, + at: ObservationAt::Tick(0), + }, + frame: ObservationFrame::CommitBoundary, + projection: ObservationProjection::Snapshot, + }, + ) + .unwrap(); + + assert_eq!(artifact.resolved.resolved_tick, 0); + let frontier_after = runtime.worldlines().get(&worldline_id).unwrap(); + let frontier_before = runtime_before.worldlines().get(&worldline_id).unwrap(); + assert_eq!(runtime.global_tick(), runtime_before.global_tick()); + assert_eq!( + frontier_after.frontier_tick(), + frontier_before.frontier_tick() + ); + assert_eq!( + frontier_after.state().current_tick(), + frontier_before.state().current_tick() + ); + assert_eq!( + frontier_after + .state() + .last_snapshot() + .map(|snapshot| snapshot.hash), + frontier_before + .state() + .last_snapshot() + .map(|snapshot| snapshot.hash) + ); + assert_eq!( + provenance.len(worldline_id).unwrap(), + provenance_before.len(worldline_id).unwrap() + ); + assert_eq!( + provenance.entry(worldline_id, 0).unwrap(), + provenance_before.entry(worldline_id, 0).unwrap() + ); + } +} diff --git a/crates/warp-wasm/src/lib.rs b/crates/warp-wasm/src/lib.rs index ec537464..57520121 100644 --- a/crates/warp-wasm/src/lib.rs +++ b/crates/warp-wasm/src/lib.rs @@ -30,7 +30,7 @@ use wasm_bindgen::JsValue; #[cfg(feature = "engine")] use echo_wasm_abi::kernel_port::HeadInfo; use echo_wasm_abi::kernel_port::{ - self, AbiError, ErrEnvelope, KernelPort, OkEnvelope, RawBytesResponse, + self, AbiError, ErrEnvelope, KernelPort, ObservationRequest, OkEnvelope, RawBytesResponse, }; use std::cell::RefCell; @@ -282,6 +282,23 @@ pub fn execute_query(query_id: u32, vars_bytes: &[u8]) -> Uint8Array { encode_result(result) } +/// Observe a worldline at an explicit coordinate, frame, and projection. +/// +/// The request bytes must decode as canonical-CBOR `ObservationRequest`. +#[wasm_bindgen] +pub fn observe(request_bytes: &[u8]) -> Uint8Array { + let request = match echo_wasm_abi::decode_cbor::(request_bytes) { + Ok(request) => request, + Err(err) => { + return encode_err(&AbiError { + code: kernel_port::error_codes::INVALID_PAYLOAD, + message: format!("invalid observation request payload: {err}"), + }) + } + }; + encode_result(with_kernel_ref(|k| k.observe(request))) +} + /// Replay to a specific tick and return the snapshot. /// /// Returns CBOR-encoded `{ ok: true, data: }` or error envelope. diff --git a/crates/warp-wasm/src/warp_kernel.rs b/crates/warp-wasm/src/warp_kernel.rs index 3ec4048b..1211cb9e 100644 --- a/crates/warp-wasm/src/warp_kernel.rs +++ b/crates/warp-wasm/src/warp_kernel.rs @@ -11,14 +11,18 @@ use std::fmt; use echo_wasm_abi::kernel_port::{ error_codes, AbiError, ChannelData, DispatchResponse, DrainResponse, HeadInfo, KernelPort, + ObservationArtifact as AbiObservationArtifact, ObservationFrame as AbiObservationFrame, + ObservationProjection as AbiObservationProjection, ObservationRequest as AbiObservationRequest, RegistryInfo, StepResponse, ABI_VERSION, }; use echo_wasm_abi::unpack_intent_v1; use warp_core::{ make_head_id, make_intent_kind, make_node_id, make_type_id, Engine, EngineBuilder, GraphStore, - HistoryError, IngressDisposition, IngressEnvelope, IngressTarget, NodeRecord, PlaybackMode, - ProvenanceService, RuntimeError, SchedulerCoordinator, SchedulerKind, WorldlineId, - WorldlineRuntime, WorldlineState, WorldlineStateError, WriterHead, WriterHeadKey, + HistoryError, IngressDisposition, IngressEnvelope, IngressTarget, NodeRecord, ObservationAt, + ObservationCoordinate, ObservationError, ObservationFrame, ObservationPayload, + ObservationProjection, ObservationRequest, ObservationService, PlaybackMode, ProvenanceService, + RuntimeError, SchedulerCoordinator, SchedulerKind, WorldlineId, WorldlineRuntime, + WorldlineState, WorldlineStateError, WriterHead, WriterHeadKey, }; /// Error returned when a [`WarpKernel`] cannot be initialized from a caller-supplied engine. @@ -74,9 +78,11 @@ pub struct WarpKernel { runtime: WorldlineRuntime, provenance: ProvenanceService, default_worldline: WorldlineId, - /// Whether materialization output has been drained since the last step. - /// Prevents returning stale data on consecutive drain calls. - drained: bool, + /// Latest committed tick returned by the legacy `drain_view_ops()` adapter. + /// + /// This bookkeeping belongs to the compatibility layer only. It does not + /// mutate runtime, provenance, or engine-owned worldline state. + last_drained_commit_tick: Option, /// Registry metadata (injected at construction, immutable after). registry: RegistryInfo, } @@ -145,28 +151,150 @@ impl WarpKernel { runtime, provenance, default_worldline, - drained: true, + last_drained_commit_tick: None, registry, }) } - /// Build a [`HeadInfo`] from the current engine snapshot. - fn head_info(&self) -> HeadInfo { - let frontier = match self.runtime.worldlines().get(&self.default_worldline) { - Some(frontier) => frontier, - None => unreachable!("default worldline must exist"), + fn parse_worldline_id(bytes: &[u8]) -> Result { + let hash: [u8; 32] = bytes.try_into().map_err(|_| AbiError { + code: error_codes::INVALID_WORLDLINE, + message: format!("worldline id must be exactly 32 bytes, got {}", bytes.len()), + })?; + Ok(WorldlineId(hash)) + } + + fn parse_channel_ids( + channels: Option<&Vec>>, + ) -> Result>, AbiError> { + channels + .map(|ids| { + ids.iter() + .map(|bytes| { + let hash: [u8; 32] = bytes.as_slice().try_into().map_err(|_| AbiError { + code: error_codes::INVALID_PAYLOAD, + message: format!( + "channel id must be exactly 32 bytes, got {}", + bytes.len() + ), + })?; + Ok(warp_core::TypeId(hash)) + }) + .collect::, _>>() + }) + .transpose() + } + + fn map_observation_error(err: ObservationError) -> AbiError { + match err { + ObservationError::InvalidWorldline(worldline_id) => AbiError { + code: error_codes::INVALID_WORLDLINE, + message: format!("invalid worldline: {worldline_id:?}"), + }, + ObservationError::InvalidTick { worldline_id, tick } => AbiError { + code: error_codes::INVALID_TICK, + message: format!("invalid tick {tick} for worldline {worldline_id:?}"), + }, + ObservationError::UnsupportedFrameProjection { frame, projection } => AbiError { + code: error_codes::UNSUPPORTED_FRAME_PROJECTION, + message: format!( + "unsupported frame/projection pairing: {frame:?} + {projection:?}" + ), + }, + ObservationError::UnsupportedQuery => AbiError { + code: error_codes::UNSUPPORTED_QUERY, + message: "query observation is not supported by this kernel".into(), + }, + ObservationError::ObservationUnavailable { worldline_id, at } => AbiError { + code: error_codes::OBSERVATION_UNAVAILABLE, + message: format!( + "observation unavailable for worldline {worldline_id:?} at {at:?}" + ), + }, + ObservationError::CodecFailure(message) => AbiError { + code: error_codes::CODEC_ERROR, + message, + }, + } + } + + fn to_core_request(request: AbiObservationRequest) -> Result { + let worldline_id = Self::parse_worldline_id(&request.coordinate.worldline_id)?; + let at = match request.coordinate.at { + echo_wasm_abi::kernel_port::ObservationAt::Frontier => ObservationAt::Frontier, + echo_wasm_abi::kernel_port::ObservationAt::Tick { tick } => ObservationAt::Tick(tick), + }; + let frame = match request.frame { + AbiObservationFrame::CommitBoundary => ObservationFrame::CommitBoundary, + AbiObservationFrame::RecordedTruth => ObservationFrame::RecordedTruth, + AbiObservationFrame::QueryView => ObservationFrame::QueryView, + }; + let projection = match request.projection { + AbiObservationProjection::Head => ObservationProjection::Head, + AbiObservationProjection::Snapshot => ObservationProjection::Snapshot, + AbiObservationProjection::TruthChannels { channels } => { + ObservationProjection::TruthChannels { + channels: Self::parse_channel_ids(channels.as_ref())?, + } + } + AbiObservationProjection::Query { + query_id, + vars_bytes, + } => ObservationProjection::Query { + query_id, + vars_bytes, + }, }; - let snap = frontier - .state() - .last_snapshot() - .cloned() - .unwrap_or_else(|| self.engine.snapshot_for_state(frontier.state())); - HeadInfo { - tick: frontier.frontier_tick(), - state_root: snap.state_root.to_vec(), - commit_id: snap.hash.to_vec(), + Ok(ObservationRequest { + coordinate: ObservationCoordinate { worldline_id, at }, + frame, + projection, + }) + } + + fn observe_core( + &self, + request: ObservationRequest, + ) -> Result { + ObservationService::observe(&self.runtime, &self.provenance, &self.engine, request) + .map_err(Self::map_observation_error) + } + + fn head_info_from_observation( + artifact: warp_core::ObservationArtifact, + ) -> Result { + match artifact.payload { + ObservationPayload::Head(head) => Ok(HeadInfo { + tick: head.tick, + state_root: head.state_root.to_vec(), + commit_id: head.commit_hash.to_vec(), + }), + _ => Err(AbiError { + code: error_codes::ENGINE_ERROR, + message: "observe returned non-head payload for head adapter".into(), + }), } } + + fn snapshot_bytes_from_observation( + artifact: warp_core::ObservationArtifact, + ) -> Result, AbiError> { + let ObservationPayload::Snapshot(snapshot) = artifact.payload else { + return Err(AbiError { + code: error_codes::ENGINE_ERROR, + message: "observe returned non-snapshot payload for snapshot adapter".into(), + }); + }; + let head = HeadInfo { + tick: snapshot.tick, + state_root: snapshot.state_root.to_vec(), + commit_id: snapshot.commit_hash.to_vec(), + }; + echo_wasm_abi::encode_cbor(&head).map_err(|e| AbiError { + code: error_codes::CODEC_ERROR, + message: e.to_string(), + }) + } } impl KernelPort for WarpKernel { @@ -212,7 +340,7 @@ impl KernelPort for WarpKernel { if budget == 0 { return Ok(StepResponse { ticks_executed: 0, - head: self.head_info(), + head: self.get_head()?, }); } @@ -238,71 +366,107 @@ impl KernelPort for WarpKernel { { ticks_executed += records.len() as u32; } - self.drained = false; } Ok(StepResponse { ticks_executed, - head: self.head_info(), + head: self.get_head()?, }) } + fn observe(&self, request: AbiObservationRequest) -> Result { + let request = Self::to_core_request(request)?; + Ok(self.observe_core(request)?.to_abi()) + } + fn drain_view_ops(&mut self) -> Result { - // If already drained since the last step, return empty to avoid - // returning stale data (the engine doesn't clear last_materialization). - if self.drained { + let artifact = match self.observe_core(ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id: self.default_worldline, + at: ObservationAt::Frontier, + }, + frame: ObservationFrame::RecordedTruth, + projection: ObservationProjection::TruthChannels { channels: None }, + }) { + Ok(artifact) => artifact, + Err(AbiError { + code: error_codes::OBSERVATION_UNAVAILABLE, + .. + }) => { + return Ok(DrainResponse { + channels: Vec::new(), + }) + } + Err(err) => return Err(err), + }; + + if self.last_drained_commit_tick == Some(artifact.resolved.resolved_tick) { return Ok(DrainResponse { channels: Vec::new(), }); } - self.drained = true; - - let frontier = match self.runtime.worldlines().get(&self.default_worldline) { - Some(frontier) => frontier, - None => unreachable!("default worldline must exist"), + self.last_drained_commit_tick = Some(artifact.resolved.resolved_tick); + + let channels = match artifact.payload { + ObservationPayload::TruthChannels(channels) => channels + .into_iter() + .map(|(channel, data)| ChannelData { + channel_id: channel.0.to_vec(), + data, + }) + .collect(), + _ => { + return Err(AbiError { + code: error_codes::ENGINE_ERROR, + message: "observe returned non-truth payload for drain adapter".into(), + }) + } }; - let finalized = frontier.state().last_materialization(); - let channels: Vec = finalized - .iter() - .map(|ch| ChannelData { - channel_id: ch.channel.0.to_vec(), - data: ch.data.clone(), - }) - .collect(); Ok(DrainResponse { channels }) } fn get_head(&self) -> Result { - Ok(self.head_info()) + Self::head_info_from_observation(self.observe_core(ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id: self.default_worldline, + at: ObservationAt::Frontier, + }, + frame: ObservationFrame::CommitBoundary, + projection: ObservationProjection::Head, + })?) } - fn snapshot_at(&mut self, tick: u64) -> Result, AbiError> { - let tick_index = usize::try_from(tick).map_err(|_| AbiError { - code: error_codes::INVALID_TICK, - message: format!("tick {tick} exceeds addressable range"), + fn execute_query(&self, query_id: u32, vars_bytes: &[u8]) -> Result, AbiError> { + let artifact = self.observe_core(ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id: self.default_worldline, + at: ObservationAt::Frontier, + }, + frame: ObservationFrame::QueryView, + projection: ObservationProjection::Query { + query_id, + vars_bytes: vars_bytes.to_vec(), + }, })?; - let frontier = match self.runtime.worldlines().get(&self.default_worldline) { - Some(frontier) => frontier, - None => unreachable!("default worldline must exist"), - }; - let snap = self - .engine - .snapshot_at_state(frontier.state(), tick_index) - .map_err(|e| AbiError { - code: error_codes::INVALID_TICK, - message: e.to_string(), - })?; - let head = HeadInfo { - tick, - state_root: snap.state_root.to_vec(), - commit_id: snap.hash.to_vec(), + let ObservationPayload::QueryBytes(data) = artifact.payload else { + return Err(AbiError { + code: error_codes::ENGINE_ERROR, + message: "observe returned non-query payload for query adapter".into(), + }); }; + Ok(data) + } - echo_wasm_abi::encode_cbor(&head).map_err(|e| AbiError { - code: error_codes::CODEC_ERROR, - message: e.to_string(), - }) + fn snapshot_at(&mut self, tick: u64) -> Result, AbiError> { + Self::snapshot_bytes_from_observation(self.observe_core(ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id: self.default_worldline, + at: ObservationAt::Tick(tick), + }, + frame: ObservationFrame::CommitBoundary, + projection: ObservationProjection::Snapshot, + })?) } fn registry_info(&self) -> RegistryInfo { @@ -314,7 +478,15 @@ impl KernelPort for WarpKernel { #[allow(clippy::unwrap_used)] mod tests { use super::*; - use echo_wasm_abi::pack_intent_v1; + use echo_wasm_abi::{ + kernel_port::{ + ObservationAt as AbiObservationAt, ObservationCoordinate as AbiObservationCoordinate, + ObservationFrame as AbiObservationFrame, ObservationPayload as AbiObservationPayload, + ObservationProjection as AbiObservationProjection, + ObservationRequest as AbiObservationRequest, + }, + pack_intent_v1, + }; #[test] fn new_kernel_has_zero_tick() { @@ -409,7 +581,7 @@ mod tests { fn execute_query_returns_not_supported() { let kernel = WarpKernel::new().unwrap(); let err = kernel.execute_query(0, &[]).unwrap_err(); - assert_eq!(err.code, error_codes::NOT_SUPPORTED); + assert_eq!(err.code, error_codes::UNSUPPORTED_QUERY); } #[test] @@ -483,6 +655,71 @@ mod tests { assert_eq!(err.code, error_codes::NOT_SUPPORTED); } + #[test] + fn observe_frontier_head_matches_get_head_adapter() { + let kernel = WarpKernel::new().unwrap(); + let artifact = kernel + .observe(AbiObservationRequest { + coordinate: AbiObservationCoordinate { + worldline_id: kernel.default_worldline.0.to_vec(), + at: AbiObservationAt::Frontier, + }, + frame: AbiObservationFrame::CommitBoundary, + projection: AbiObservationProjection::Head, + }) + .unwrap(); + let head = kernel.get_head().unwrap(); + + let AbiObservationPayload::Head { head: observed } = artifact.payload else { + panic!("expected head observation payload"); + }; + assert_eq!(observed.tick, head.tick); + assert_eq!(observed.state_root, head.state_root); + assert_eq!(observed.commit_id, head.commit_id); + } + + #[test] + fn observe_snapshot_matches_snapshot_at_adapter() { + let mut kernel = WarpKernel::new().unwrap(); + let intent = pack_intent_v1(1, b"hello").unwrap(); + kernel.dispatch_intent(&intent).unwrap(); + kernel.step(1).unwrap(); + + let artifact = kernel + .observe(AbiObservationRequest { + coordinate: AbiObservationCoordinate { + worldline_id: kernel.default_worldline.0.to_vec(), + at: AbiObservationAt::Tick { tick: 0 }, + }, + frame: AbiObservationFrame::CommitBoundary, + projection: AbiObservationProjection::Snapshot, + }) + .unwrap(); + let bytes = kernel.snapshot_at(0).unwrap(); + let head: HeadInfo = echo_wasm_abi::decode_cbor(&bytes).unwrap(); + + let AbiObservationPayload::Snapshot { snapshot } = artifact.payload else { + panic!("expected snapshot observation payload"); + }; + assert_eq!(snapshot.tick, head.tick); + assert_eq!(snapshot.state_root, head.state_root); + assert_eq!(snapshot.commit_id, head.commit_id); + } + + #[test] + fn drain_view_ops_is_read_only_adapter() { + let mut kernel = WarpKernel::new().unwrap(); + let intent = pack_intent_v1(1, b"hello").unwrap(); + kernel.dispatch_intent(&intent).unwrap(); + kernel.step(1).unwrap(); + + let head_before = kernel.get_head().unwrap(); + let _ = kernel.drain_view_ops().unwrap(); + let head_after = kernel.get_head().unwrap(); + + assert_eq!(head_before, head_after); + } + #[test] fn registry_info_has_abi_version() { let kernel = WarpKernel::new().unwrap(); diff --git a/docs/adr/ADR-0011-explicit-observation-contract.md b/docs/adr/ADR-0011-explicit-observation-contract.md new file mode 100644 index 00000000..52e5c49e --- /dev/null +++ b/docs/adr/ADR-0011-explicit-observation-contract.md @@ -0,0 +1,226 @@ + + + +# ADR-0011: Explicit Observation Contract + +- **Status:** Proposed +- **Date:** 2026-03-15 +- **Amends:** ADR-0008, ADR-0010 +- **Related:** ADR-0009 + +## Context + +Echo's write path is already worldline-native: + +- runtime ingress is explicit, +- provenance is entry-based, +- parent refs are stored rather than reconstructed, +- replay is grounded in recorded history, +- BTRs exist as deterministic contiguous provenance containers. + +The read path still lags behind that architecture. Snapshot, head, truth-drain, +and query-shaped operations are currently exposed as separate surfaces with +different implicit coordinate stories. That leaves too much hidden: + +- which worldline is being read, +- which historical coordinate is being observed, +- whether the read is a commit-boundary view or recorded-truth view, +- whether the read is reconstructive or current-frontier, +- and which parts of the runtime are allowed to mutate as a side effect. + +The system already knows it lives on worldlines. Reads must stop pretending +otherwise. + +## Decision + +### 1. Observation is the canonical read contract + +Echo reads are observations of a worldline at a coordinate under a declared +frame and projection. + +The canonical internal entrypoint is: + +```rust +observe(request: ObservationRequest) -> Result +``` + +All meaningful reads must flow through this path. + +### 2. Observation is explicit about coordinate, frame, and projection + +The v1 observation request surface is: + +```rust +pub struct ObservationCoordinate { + pub worldline_id: WorldlineId, + pub at: ObservationAt, +} + +pub enum ObservationAt { + Frontier, + Tick(u64), +} + +pub enum ObservationFrame { + CommitBoundary, + RecordedTruth, + QueryView, +} + +pub enum ObservationProjection { + Head, + Snapshot, + TruthChannels { channels: Option> }, + Query { query_id: u32, vars_bytes: Vec }, +} +``` + +The frame/projection validity matrix is closed and centralized: + +- `CommitBoundary` → `Head`, `Snapshot` +- `RecordedTruth` → `TruthChannels` +- `QueryView` → `Query` +- all other combinations fail with deterministic `UnsupportedFrameProjection` + +### 3. Observation is read-only by construction + +Observation must not mutate: + +- runtime frontier ticks, +- inbox state, +- committed-ingress ledgers, +- provenance history, +- worldline mirrors such as `tick_history`, `last_snapshot`, or recorded + materialization fields. + +Implementations should prefer immutable borrows all the way down: + +- `&WorldlineRuntime` +- `&ProvenanceService` +- `&Engine` + +If a helper cannot be expressed without mutation, it does not belong in this +phase. + +### 4. Recorded truth means recorded truth + +`RecordedTruth` observations read recorded outputs from provenance/history. + +They do not re-run engine logic, recompute materialization, or synthesize truth +from current state under another name. + +### 5. Resolved coordinates and artifact identity are first-class + +Every observation returns explicit resolved coordinate metadata: + +```rust +pub struct ResolvedObservationCoordinate { + pub observation_version: u32, + pub worldline_id: WorldlineId, + pub requested_at: ObservationAt, + pub resolved_tick: u64, + pub state_root: Hash, + pub commit_hash: Hash, +} +``` + +The observation artifact is identity-bearing: + +```rust +pub struct ObservationArtifact { + pub resolved: ResolvedObservationCoordinate, + pub frame: ObservationFrame, + pub projection: ObservationProjection, + pub artifact_hash: Hash, + pub payload: ObservationPayload, +} +``` + +### 6. Canonical serialization and hashing are normative + +Observation artifact identity uses the repository's canonical CBOR rules. + +`artifact_hash` is defined as: + +```text +blake3("echo:observation-artifact:v1\0" || canonical_cbor(hash_input)) +``` + +Where `hash_input` includes: + +- observation version, +- resolved coordinate, +- frame, +- projection, +- canonical payload bytes, + +and excludes `artifact_hash` itself. + +Map-order dependence or serializer-specific field-order behavior is forbidden. + +### 7. Query is reserved but intentionally unsupported + +The only valid query-shaped pairing in v1 is: + +- `QueryView + Query { ... }` + +That pairing is still allowed to fail with deterministic `UnsupportedQuery` +until real query support exists. + +No future query API may bypass `observe(...)`. + +### 8. Compatibility is one phase only + +The internal read pivot is a hard break. + +Externally, one adapter phase is allowed: + +- `get_head()` lowers to `observe(Frontier, CommitBoundary, Head)` +- `snapshot_at(t)` lowers to `observe(Tick(t), CommitBoundary, Snapshot)` +- `execute_query(...)` lowers to `observe(..., QueryView, Query { ... })` +- `drain_view_ops()` is a legacy adapter over `RecordedTruth` + +`drain_view_ops()` is legacy/debug-only in this phase. It must not gain new +product semantics. + +At the start of Phase 6: + +- `get_head` +- `snapshot_at` +- `drain_view_ops` +- `execute_query` +- `render_snapshot` + +are removed from the public boundary, and the WASM ABI version is bumped to 2 +before other Phase 6 work proceeds. + +## Consequences + +### Positive + +- Reads become explicit about worldline and time. +- One canonical read path replaces divergent implicit read semantics. +- Historical and current observations can share one deterministic identity model. +- Recorded truth becomes a real read contract rather than a side effect of + mutable drain plumbing. + +### Negative + +- Kernel and ABI adapters must be rewritten now instead of later. +- Some existing cursor/session helpers remain as accelerators but lose their + status as the conceptual public read model. +- A compatibility layer still exists for one phase and must be actively + deleted on schedule. + +## Non-Goals + +This ADR does not introduce: + +- rich observer profiles, +- governance or aperture-rights systems, +- translation-cost / observer-geometry machinery, +- multi-worldline coordinate models, +- implicit continuation from historical reads, +- `fork_from_observation(...)` itself. + +Those remain later work. diff --git a/docs/plans/adr-0008-and-0009.md b/docs/plans/adr-0008-and-0009.md index 10aea308..2fb56dae 100644 --- a/docs/plans/adr-0008-and-0009.md +++ b/docs/plans/adr-0008-and-0009.md @@ -4,10 +4,12 @@ # Implementation Plan: ADR-0008 and ADR-0009 -- **Status:** Living implementation plan; Phases 0-3 implemented +- **Status:** Living implementation plan; Phases 0-4 implemented - **Date:** 2026-03-12 - **Primary ADRs:** ADR-0008 (Worldline Runtime Model), ADR-0009 (Inter-Worldline Communication) -- **Companion ADR in this change set:** `docs/adr/ADR-0010-observational-seek-and-administrative-rewind.md` +- **Companion ADRs in this change set:** + - `docs/adr/ADR-0010-observational-seek-and-administrative-rewind.md` + - `docs/adr/ADR-0011-explicit-observation-contract.md` ## Purpose @@ -511,6 +513,8 @@ pub struct HeadInbox { ## Phase 4: Provenance Entry Model and DAG Parents +**Status:** Implemented on 2026-03-15 + **Goal** Make provenance structurally ready for local commits, cross-worldline messages, @@ -549,47 +553,120 @@ pub trait ProvenanceStore { - Replay through `parents()` reproduces expected hash triplets - Head attribution survives round-trip -## Phase 5: Observation APIs, Snapshot, Fork, and Administrative Rewind +## Phase 5: Observation Coordinates and Explicit Read Contract **Goal** -Replace ambiguous global rewind behavior with explicit APIs that respect -head-local observation and worldline isolation. +Make reads honest about time and worldline position by replacing ad hoc +snapshot/head/query surfaces with one canonical observation contract. **Deliverables** -New or revised APIs on the coordinator/runtime surface: +New canonical read types and entrypoint: ```rust -pub fn seek_reader(key: ReaderHeadKey, target_tick: WorldlineTick) -> Result<...>; -pub fn jump_to_frontier(key: PlaybackHeadKey) -> Result<...>; -pub fn snapshot_at(worldline: WorldlineId, tick: WorldlineTick) -> Result; -pub fn fork(source: WorldlineId, at: WorldlineTick, target: WorldlineId) -> Result<...>; +pub struct ObservationCoordinate { + pub worldline_id: WorldlineId, + pub at: ObservationAt, +} + +pub enum ObservationAt { + Frontier, + Tick(u64), +} + +pub enum ObservationFrame { + CommitBoundary, + RecordedTruth, + QueryView, +} + +pub enum ObservationProjection { + Head, + Snapshot, + TruthChannels { channels: Option> }, + Query { query_id: u32, vars_bytes: Vec }, +} + +pub struct ObservationRequest { + pub coordinate: ObservationCoordinate, + pub frame: ObservationFrame, + pub projection: ObservationProjection, +} + +pub struct ResolvedObservationCoordinate { + pub observation_version: u32, + pub worldline_id: WorldlineId, + pub requested_at: ObservationAt, + pub resolved_tick: u64, + pub state_root: Hash, + pub commit_hash: Hash, +} + +pub struct ObservationArtifact { + pub resolved: ResolvedObservationCoordinate, + pub frame: ObservationFrame, + pub projection: ObservationProjection, + pub artifact_hash: Hash, + pub payload: ObservationPayload, +} -#[cfg(any(test, feature = "admin-rewind"))] -pub fn rewind_worldline(worldline: WorldlineId, target_tick: WorldlineTick) -> Result<...>; +pub fn observe(request: ObservationRequest) -> Result; ``` ### Design Notes -- `seek_reader(...)` is observational and head-local. -- Writers do not rewind shared frontier state as a side effect of `seek`. -- `snapshot_at(...)` gives Janus/testing a clean read-only historical view. -- `fork(...)` reconstructs state at the fork tick from provenance and creates a new live worldline. -- `rewind_worldline(...)` is an explicit administrative/testing tool, not the default playback API. +- Every meaningful read names four things explicitly: + - worldline + - coordinate + - frame + - projection +- `observe(...)` is the only canonical internal read path. +- Observation is strictly read-only: + - no frontier movement + - no inbox draining + - no committed-ingress mutation + - no provenance writes +- `RecordedTruth` reads recorded outputs only. It does not re-run engine logic. +- The frame/projection validity matrix is centralized: + - `CommitBoundary` → `Head`, `Snapshot` + - `RecordedTruth` → `TruthChannels` + - `QueryView` → `Query` + - all other combinations fail deterministically +- `artifact_hash` is: + - `blake3("echo:observation-artifact:v1\0" || canonical_cbor(hash_input))` +- Query support remains stubbed behind deterministic `UnsupportedQuery` errors + until a real query implementation exists. ### Migration -- `Engine::jump_to_tick()` becomes deprecated. -- Existing callers move to `snapshot_at`, `seek_reader`, or `fork`. +- Internally, all new read logic lowers to `observe(...)`. +- Externally, one adapter phase remains in `warp-wasm` only: + - `get_head()` lowers to `observe(Frontier, CommitBoundary, Head)` + - `snapshot_at(t)` lowers to `observe(Tick(t), CommitBoundary, Snapshot)` + - `execute_query(...)` lowers to `observe(..., QueryView, Query { ... })` + - `drain_view_ops()` becomes a legacy adapter over recorded-truth observation +- Phase 6 entry deletes: + - `get_head` + - `snapshot_at` + - `drain_view_ops` + - `execute_query` + - `render_snapshot` +- Phase 6 bumps the ABI version to 2 before other Phase 6 work proceeds. **Tests** -- Seeking a reader does not mutate any frontier state -- `snapshot_at(worldline, t)` matches replay at `t` -- Fork at tick 5 produces a new independent worldline with identical prefix state -- Rewind is unavailable without the explicit admin/testing gate -- Rewinding one worldline does not mutate another +- Same `ObservationRequest` at the same resolved coordinate yields identical + payload bytes and identical `artifact_hash` +- `get_head()` adapter is byte-for-byte compatible with the current `HeadInfo` + response +- `snapshot_at(t)` adapter is byte-for-byte compatible with the current + historical snapshot response +- `RecordedTruth` observations reproduce recorded outputs byte-for-byte +- Observation never mutates frontier ticks, inbox state, committed ingress, + provenance length, `tick_history`, `last_snapshot`, or materialization mirrors +- Invalid worldline/tick, invalid frame/projection pairs, unsupported query, + and unavailable recorded truth return deterministic errors/codes ## Phase 6: Split `worldline_tick` and `global_tick` diff --git a/docs/spec/SPEC-0004-worldlines-playback-truthbus.md b/docs/spec/SPEC-0004-worldlines-playback-truthbus.md index 560fc70f..41bcd47e 100644 --- a/docs/spec/SPEC-0004-worldlines-playback-truthbus.md +++ b/docs/spec/SPEC-0004-worldlines-playback-truthbus.md @@ -13,6 +13,8 @@ ## 0) Doctrine - **Worldline is the boundary.** (U0 + per-warp tick patches + expected hashes + recorded outputs) +- **Observation is the public read contract.** Playback/session helpers are implementation + tools and compatibility aids; they are not the semantic center of reads anymore. - **PlaybackCursor is a viewpoint.** (materialize any tick without mutating head) - **Clients are dumb.** They render authoritative truth frames; no rollback/diff/rebuild logic. - **Determinism is non-negotiable.** Canonical ordering → canonical bytes → canonical hashes. @@ -182,6 +184,11 @@ struct TruthFrame { ## 2) ProvenanceStore Seam (Wormholes-Ready) +> **Phase 5 note:** recorded truth for public reads is now surfaced through the +> explicit observation contract first. `PlaybackCursor`, `ViewSession`, and +> `TruthSink` remain useful helper types, but they no longer define the public +> read boundary of Echo. + Worldline data is accessed through an interface (local memory today, wormholes later). ```rust diff --git a/docs/spec/SPEC-0005-provenance-payload.md b/docs/spec/SPEC-0005-provenance-payload.md index 05dc8a52..62955049 100644 --- a/docs/spec/SPEC-0005-provenance-payload.md +++ b/docs/spec/SPEC-0005-provenance-payload.md @@ -17,6 +17,10 @@ Foundations) into concrete Echo types. It defines the data structures needed to answer "show me why" queries — tracing any observed state back through the causal chain of tick patches that produced it. +> **Phase 5 note:** provenance payloads and BTRs are substrate packaging and +> replay artifacts. They are not themselves the public read contract; that role +> now belongs to the explicit observation API. + ### Scope - **In scope:** Type definitions, wire format, composition rules, bridge to diff --git a/docs/spec/SPEC-0009-wasm-abi-v1.md b/docs/spec/SPEC-0009-wasm-abi-v1.md index 9bf194f0..32d948f1 100644 --- a/docs/spec/SPEC-0009-wasm-abi-v1.md +++ b/docs/spec/SPEC-0009-wasm-abi-v1.md @@ -44,20 +44,21 @@ but apps can implement `KernelPort` with any engine. All exports are `#[wasm_bindgen]` functions. Return types are CBOR-encoded `Uint8Array` unless noted otherwise. -| Export | Signature | Returns | -| ------------------------- | --------------------------- | --------------------------- | -| `init()` | `() → Uint8Array` | `HeadInfo` envelope | -| `dispatch_intent(bytes)` | `(&[u8]) → Uint8Array` | `DispatchResponse` envelope | -| `step(budget)` | `(u32) → Uint8Array` | `StepResponse` envelope | -| `drain_view_ops()` | `() → Uint8Array` | `DrainResponse` envelope | -| `get_head()` | `() → Uint8Array` | `HeadInfo` envelope | -| `execute_query(id, vars)` | `(u32, &[u8]) → Uint8Array` | `RawBytesResponse` envelope | -| `snapshot_at(tick)` | `(u64) → Uint8Array` | `RawBytesResponse` envelope | -| `render_snapshot(bytes)` | `(&[u8]) → Uint8Array` | `RawBytesResponse` envelope | -| `get_registry_info()` | `() → Uint8Array` | `RegistryInfo` envelope | -| `get_codec_id()` | `() → JsValue` | `string \| null` | -| `get_registry_version()` | `() → JsValue` | `string \| null` | -| `get_schema_sha256_hex()` | `() → JsValue` | `string \| null` | +| Export | Signature | Returns | +| ------------------------- | --------------------------- | ------------------------------ | +| `init()` | `() → Uint8Array` | `HeadInfo` envelope | +| `dispatch_intent(bytes)` | `(&[u8]) → Uint8Array` | `DispatchResponse` envelope | +| `step(budget)` | `(u32) → Uint8Array` | `StepResponse` envelope | +| `observe(request)` | `(&[u8]) → Uint8Array` | `ObservationArtifact` envelope | +| `drain_view_ops()` | `() → Uint8Array` | `DrainResponse` envelope | +| `get_head()` | `() → Uint8Array` | `HeadInfo` envelope | +| `execute_query(id, vars)` | `(u32, &[u8]) → Uint8Array` | `RawBytesResponse` envelope | +| `snapshot_at(tick)` | `(u64) → Uint8Array` | `RawBytesResponse` envelope | +| `render_snapshot(bytes)` | `(&[u8]) → Uint8Array` | `RawBytesResponse` envelope | +| `get_registry_info()` | `() → Uint8Array` | `RegistryInfo` envelope | +| `get_codec_id()` | `() → JsValue` | `string \| null` | +| `get_registry_version()` | `() → JsValue` | `string \| null` | +| `get_schema_sha256_hex()` | `() → JsValue` | `string \| null` | ## Wire Envelope @@ -81,6 +82,49 @@ integers, no tags, definite lengths). ## Response Types +### ObservationRequest + +The request payload for `observe(request)` is itself canonical-CBOR bytes that +decode to: + +- `coordinate.worldline_id: bytes(32)` +- `coordinate.at: frontier | tick` +- `frame: commit_boundary | recorded_truth | query_view` +- `projection: head | snapshot | truth_channels | query` + +This makes worldline, time, frame, and projection explicit on every read. + +### ObservationArtifact + +| Field | Type | Description | +| --------------- | ------------------------------- | ---------------------------------------------- | +| `resolved` | `ResolvedObservationCoordinate` | Explicit resolved coordinate metadata | +| `frame` | enum | Declared semantic frame | +| `projection` | enum | Declared projection | +| `artifact_hash` | bytes(32) | Canonical observation artifact hash | +| `payload` | tagged union | Head, snapshot, recorded truth, or query bytes | + +`artifact_hash` is computed as +`blake3("echo:observation-artifact:v1\0" || canonical_cbor(hash_input))`. + +### ResolvedObservationCoordinate + +| Field | Type | Description | +| --------------------- | --------- | ------------------------------------------- | +| `observation_version` | u32 | Observation contract version | +| `worldline_id` | bytes(32) | Worldline actually observed | +| `requested_at` | enum | Original coordinate selector | +| `resolved_tick` | u64 | Concrete resolved tick | +| `state_root` | bytes(32) | Canonical graph-only state hash | +| `commit_hash` | bytes(32) | Canonical commit hash at the resolved point | + +### ObservationPayload + +- `head` → `HeadObservation` +- `snapshot` → `SnapshotObservation` +- `truth_channels` → `ChannelData[]` +- `query_bytes` → raw bytes + ### HeadInfo | Field | Type | Description | @@ -137,21 +181,26 @@ envelope like all other responses. ## Error Codes -| Code | Name | Meaning | -| ---- | ----------------- | ------------------------------ | -| 1 | `NOT_INITIALIZED` | `init()` not called | -| 2 | `INVALID_INTENT` | Malformed EINT intent envelope | -| 3 | `ENGINE_ERROR` | Internal engine failure | -| 4 | `INVALID_TICK` | Tick index out of bounds | -| 5 | `NOT_SUPPORTED` | Operation not implemented | -| 6 | `CODEC_ERROR` | CBOR encode/decode failure | -| 7 | `INVALID_PAYLOAD` | Corrupted input bytes | +| Code | Name | Meaning | +| ---- | ------------------------------ | ---------------------------------------------------------- | +| 1 | `NOT_INITIALIZED` | `init()` not called | +| 2 | `INVALID_INTENT` | Malformed EINT intent envelope | +| 3 | `ENGINE_ERROR` | Internal engine failure | +| 4 | `LEGACY_INVALID_TICK` | Legacy snapshot/history tick out of bounds | +| 5 | `NOT_SUPPORTED` | Operation not implemented | +| 6 | `CODEC_ERROR` | CBOR encode/decode failure | +| 7 | `INVALID_PAYLOAD` | Corrupted input bytes | +| 8 | `INVALID_WORLDLINE` | Requested worldline missing | +| 9 | `INVALID_TICK` | Requested observation tick missing | +| 10 | `UNSUPPORTED_FRAME_PROJECTION` | Invalid frame/projection pair | +| 11 | `UNSUPPORTED_QUERY` | Query observation not yet implemented | +| 12 | `OBSERVATION_UNAVAILABLE` | Valid request but no observation exists at that coordinate | ## Versioning Strategy - The ABI version is exposed via `RegistryInfo.abi_version` and the constant `echo_wasm_abi::kernel_port::ABI_VERSION`. -- **Additive changes** (new optional fields, new exports) do NOT bump the +- **Additive changes** (new optional fields, new exports such as `observe`) do NOT bump the ABI version. - **Breaking changes** (removed fields, changed semantics, new required fields, changed error codes) require an ABI version bump and a @@ -172,15 +221,18 @@ envelope like all other responses. silently returned empty bytes; now they return error code `1`. 3. **`dispatch_intent` returns data**. Previously a no-op void function; now returns `DispatchResponse` with the intent hash. -4. **`execute_query` and `render_snapshot`** return error code `5` - (`NOT_SUPPORTED`). These will be wired when the engine query dispatcher - lands. -5. **JsValue exports unchanged**: `get_codec_id`, `get_registry_version`, +4. **`observe(request)`** is the canonical read boundary. Legacy read exports + remain one-phase adapters above it. +5. **`execute_query`** currently lowers to `observe(..., query_view, query)` + and returns error code `11` (`UNSUPPORTED_QUERY`) until real query support lands. +6. **`render_snapshot`** still returns error code `5` + (`NOT_SUPPORTED`). +7. **JsValue exports unchanged**: `get_codec_id`, `get_registry_version`, `get_schema_sha256_hex` still return `JsValue` (`string | null`). ## Not Yet Implemented These are honestly reported as `NOT_SUPPORTED` (error code 5): -- `execute_query`: Engine query dispatcher not yet built. +- `execute_query`: Lowered through `observe(...)`, but real query evaluation is not yet built. - `render_snapshot`: Snapshot-to-ViewOps projection not yet built. From dcb5f6567836725916dffab00a4e688f673306bb Mon Sep 17 00:00:00 2001 From: James Ross Date: Sun, 15 Mar 2026 13:44:39 -0700 Subject: [PATCH 04/18] fix(ttd-browser): migrate to entry-based provenance --- crates/ttd-browser/src/lib.rs | 42 ++++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/crates/ttd-browser/src/lib.rs b/crates/ttd-browser/src/lib.rs index c96a6d65..35628023 100644 --- a/crates/ttd-browser/src/lib.rs +++ b/crates/ttd-browser/src/lib.rs @@ -422,7 +422,8 @@ impl TtdEngine { let expected = self .provenance - .expected(cursor.worldline_id, cursor.tick - 1) + .entry(cursor.worldline_id, cursor.tick - 1) + .map(|entry| entry.expected) .map_err(|e| JsError::new(&e.to_string()))?; Ok(hash_to_uint8array(&expected.state_root)) @@ -449,7 +450,8 @@ impl TtdEngine { let expected = self .provenance - .expected(cursor.worldline_id, cursor.tick - 1) + .entry(cursor.worldline_id, cursor.tick - 1) + .map(|entry| entry.expected) .map_err(|e| JsError::new(&e.to_string()))?; Ok(hash_to_uint8array(&expected.commit_hash)) @@ -479,7 +481,8 @@ impl TtdEngine { let expected = self .provenance - .expected(cursor.worldline_id, cursor.tick - 1) + .entry(cursor.worldline_id, cursor.tick - 1) + .map(|entry| entry.expected) .map_err(|e| JsError::new(&e.to_string()))?; // Return patch_digest as a proxy; actual emissions_digest would need @@ -729,14 +732,10 @@ impl TtdEngine { let expected = self .provenance - .expected(cursor.worldline_id, cursor.tick - 1) - .map_err(|e| e.to_string())?; - - // Retrieve recorded outputs for this tick to compute emissions_digest - let outputs = self - .provenance - .outputs(cursor.worldline_id, cursor.tick - 1) + .entry(cursor.worldline_id, cursor.tick - 1) .map_err(|e| e.to_string())?; + let outputs = expected.outputs.clone(); + let expected = expected.expected; let finalized_channels: Vec = outputs .into_iter() @@ -1340,7 +1339,8 @@ mod tests { #[test] fn regression_commit_populates_emissions_digest() { use warp_core::{ - HashTriplet, TypeId, WarpId, WorldlineId, WorldlineTickHeaderV1, WorldlineTickPatchV1, + make_head_id, HashTriplet, ProvenanceEntry, TypeId, WarpId, WorldlineId, + WorldlineTickHeaderV1, WorldlineTickPatchV1, WriterHeadKey, }; let mut engine = TtdEngine::new(); @@ -1372,11 +1372,23 @@ mod tests { }; let outputs = vec![(TypeId([10u8; 32]), vec![1, 2, 3])]; + let head_key = WriterHeadKey { + worldline_id: wl_id, + head_id: make_head_id("ttd-browser-test"), + }; + let entry = ProvenanceEntry::local_commit( + wl_id, + 0, + 0, + head_key, + Vec::new(), + expected, + patch, + outputs, + Vec::new(), + ); - engine - .provenance - .append_with_writes(wl_id, patch, expected, outputs, vec![]) - .unwrap(); + engine.provenance.append_local_commit(entry).unwrap(); let cursor_id = engine.create_cursor(&wl_id.0).unwrap(); // Advance cursor to tick 1 so we can commit (cannot commit at tick 0) From d30d6d54cb411a9a9f5ef97ac2b563ce13b4b72a Mon Sep 17 00:00:00 2001 From: James Ross Date: Sun, 15 Mar 2026 14:58:47 -0700 Subject: [PATCH 05/18] fix(warp-core): enforce provenance entry invariants --- crates/warp-core/src/provenance_store.rs | 162 +++++++++++++++++- .../ADR-0011-explicit-observation-contract.md | 2 +- 2 files changed, 157 insertions(+), 7 deletions(-) diff --git a/crates/warp-core/src/provenance_store.rs b/crates/warp-core/src/provenance_store.rs index a1ee7d57..c880def5 100644 --- a/crates/warp-core/src/provenance_store.rs +++ b/crates/warp-core/src/provenance_store.rs @@ -96,12 +96,41 @@ pub enum HistoryError { head_key: WriterHeadKey, }, + /// `append_local_commit(...)` only admits local-commit provenance entries. + #[error("append_local_commit rejected non-local event kind {got:?} at tick {tick}")] + InvalidLocalCommitEventKind { + /// The entry tick. + tick: u64, + /// The unexpected event kind. + got: ProvenanceEventKind, + }, + /// Parent references must already be stored in canonical commit-hash order. #[error("parent refs must be in canonical commit-hash order at tick {tick}")] NonCanonicalParents { /// The entry tick whose parent refs were non-canonical. tick: u64, }, + + /// A parent ref must resolve to an already-recorded provenance entry. + #[error("missing parent ref {parent:?} for tick {tick}")] + MissingParentRef { + /// The entry tick carrying the invalid parent ref. + tick: u64, + /// The missing parent ref. + parent: ProvenanceRef, + }, + + /// A parent ref must match the stored commit hash at its referenced coordinate. + #[error("parent ref commit hash mismatch at tick {tick}: parent {parent:?}, stored {stored_commit_hash:?}")] + ParentCommitHashMismatch { + /// The entry tick carrying the invalid parent ref. + tick: u64, + /// The provided parent ref. + parent: ProvenanceRef, + /// The stored commit hash at the referenced coordinate. + stored_commit_hash: Hash, + }, } /// Errors that can occur when constructing or validating a BTR. @@ -172,6 +201,13 @@ pub enum BtrError { /// Value carried by the BTR. got: Hash, }, + + /// A payload entry diverges from the authoritative stored history. + #[error("BTR payload entry mismatch at tick {tick}")] + EntryMismatch { + /// The mismatching entry tick. + tick: u64, + }, } /// Reference to a checkpoint within the provenance store. @@ -528,6 +564,7 @@ impl LocalProvenanceStore { } fn validate_local_commit_entry( + worldlines: &BTreeMap, worldline_id: WorldlineId, expected_tick: u64, entry: &ProvenanceEntry, @@ -560,15 +597,37 @@ impl LocalProvenanceStore { tick: entry.worldline_tick, }); } + if !matches!(entry.event_kind, ProvenanceEventKind::LocalCommit) { + return Err(HistoryError::InvalidLocalCommitEventKind { + tick: entry.worldline_tick, + got: entry.event_kind.clone(), + }); + } if !entry .parents .windows(2) - .all(|pair| pair[0].commit_hash <= pair[1].commit_hash) + .all(|pair| pair[0].commit_hash < pair[1].commit_hash) { return Err(HistoryError::NonCanonicalParents { tick: entry.worldline_tick, }); } + for parent in &entry.parents { + let stored = worldlines + .get(&parent.worldline_id) + .and_then(|history| history.entries.get(parent.worldline_tick as usize)) + .ok_or(HistoryError::MissingParentRef { + tick: entry.worldline_tick, + parent: *parent, + })?; + if stored.expected.commit_hash != parent.commit_hash { + return Err(HistoryError::ParentCommitHashMismatch { + tick: entry.worldline_tick, + parent: *parent, + stored_commit_hash: stored.expected.commit_hash, + }); + } + } Ok(()) } @@ -752,9 +811,14 @@ impl ProvenanceStore for LocalProvenanceStore { } fn append_local_commit(&mut self, entry: ProvenanceEntry) -> Result<(), HistoryError> { + let expected_tick = self.history(entry.worldline_id)?.entries.len() as u64; + Self::validate_local_commit_entry( + &self.worldlines, + entry.worldline_id, + expected_tick, + &entry, + )?; let history = self.history_mut(entry.worldline_id)?; - let expected_tick = history.entries.len() as u64; - Self::validate_local_commit_entry(entry.worldline_id, expected_tick, &entry)?; history.entries.push(entry); Ok(()) } @@ -977,9 +1041,8 @@ impl ProvenanceService { .store .entry(record.worldline_id, entry.worldline_tick)?; if &stored != entry { - return Err(BtrError::OutputBoundaryHashMismatch { - expected: stored.expected.state_root, - got: entry.expected.state_root, + return Err(BtrError::EntryMismatch { + tick: entry.worldline_tick, }); } } @@ -1265,6 +1328,73 @@ mod tests { )); } + #[test] + fn append_local_commit_rejects_non_local_event_kind() { + let mut store = LocalProvenanceStore::new(); + let w = test_worldline_id(); + store.register_worldline(w, test_warp_id()).unwrap(); + + let mut entry = test_entry(0); + entry.event_kind = ProvenanceEventKind::ConflictArtifact { + artifact_id: [9u8; 32], + }; + let result = store.append_local_commit(entry); + assert!(matches!( + result, + Err(HistoryError::InvalidLocalCommitEventKind { + tick: 0, + got: ProvenanceEventKind::ConflictArtifact { .. } + }) + )); + } + + #[test] + fn append_local_commit_rejects_missing_parent_ref() { + let mut store = LocalProvenanceStore::new(); + let w = test_worldline_id(); + store.register_worldline(w, test_warp_id()).unwrap(); + + let mut entry = test_entry(0); + let missing_parent = ProvenanceRef { + worldline_id: w, + worldline_tick: 99, + commit_hash: [7u8; 32], + }; + entry.parents = vec![missing_parent]; + + let result = store.append_local_commit(entry); + assert!(matches!( + result, + Err(HistoryError::MissingParentRef { tick: 0, parent }) if parent == missing_parent + )); + } + + #[test] + fn append_local_commit_rejects_parent_commit_hash_mismatch() { + let mut store = LocalProvenanceStore::new(); + let w = test_worldline_id(); + store.register_worldline(w, test_warp_id()).unwrap(); + store.append_local_commit(test_entry(0)).unwrap(); + + let mut entry = test_entry(1); + let bad_parent = ProvenanceRef { + worldline_id: w, + worldline_tick: 0, + commit_hash: [8u8; 32], + }; + entry.parents = vec![bad_parent]; + + let result = store.append_local_commit(entry); + assert!(matches!( + result, + Err(HistoryError::ParentCommitHashMismatch { + tick: 1, + parent, + stored_commit_hash + }) if parent == bad_parent && stored_commit_hash == test_triplet(0).commit_hash + )); + } + #[test] fn checkpoint_before() { let mut store = LocalProvenanceStore::new(); @@ -1563,6 +1693,26 @@ mod tests { )); } + #[test] + fn validate_btr_rejects_payload_entry_mismatch() { + let mut service = ProvenanceService::new(); + let w = test_worldline_id(); + let state = WorldlineState::empty(); + service.register_worldline(w, &state).unwrap(); + service.append_local_commit(test_entry(0)).unwrap(); + + let mut btr = service.build_btr(w, 0, 1, 3, b"auth".to_vec()).unwrap(); + btr.payload.entries[0].head_key = Some(WriterHeadKey { + worldline_id: w, + head_id: make_head_id("mismatch"), + }); + + assert!(matches!( + service.validate_btr(&btr), + Err(BtrError::EntryMismatch { tick: 0 }) + )); + } + #[test] fn btr_validation_rejects_mixed_worldlines() { let entry = ProvenanceEntry::local_commit( diff --git a/docs/adr/ADR-0011-explicit-observation-contract.md b/docs/adr/ADR-0011-explicit-observation-contract.md index 52e5c49e..8f5673a2 100644 --- a/docs/adr/ADR-0011-explicit-observation-contract.md +++ b/docs/adr/ADR-0011-explicit-observation-contract.md @@ -3,7 +3,7 @@ # ADR-0011: Explicit Observation Contract -- **Status:** Proposed +- **Status:** Implemented - **Date:** 2026-03-15 - **Amends:** ADR-0008, ADR-0010 - **Related:** ADR-0009 From 7869932c1348bc25ad439272a6677d507d496354 Mon Sep 17 00:00:00 2001 From: James Ross Date: Sun, 15 Mar 2026 15:16:44 -0700 Subject: [PATCH 06/18] feat(tooling): split local verification into lanes --- CHANGELOG.md | 12 ++ Makefile | 5 +- scripts/hooks/README.md | 14 +- scripts/verify-local.sh | 307 +++++++++++++++++++++++++++---- tests/hooks/test_verify_local.sh | 148 +++++++++++++++ 5 files changed, 446 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f786d382..c894e8cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,18 @@ ## Unreleased +### feat(tooling): split local verification into parallel lanes + +- **Changed** the local full verifier now runs as curated parallel lanes with + isolated `CARGO_TARGET_DIR`s for clippy, tests, rustdoc, and guard checks, + which cuts local wall-clock time by avoiding one giant serialized cargo + invocation. +- **Changed** staged and reduced local Rust checks now use a narrower fast-path + target surface, keeping the heaviest all-target clippy drag in CI instead of + every local iteration loop. +- **Added** `make verify-full-sequential` as an explicit fallback when the lane + runner itself needs debugging. + ### feat(warp-core): close Phase 4 and pivot reads to observe - **Changed** Phase 4 provenance/BTR work is now the documented substrate diff --git a/Makefile b/Makefile index cc8bcd86..5bf32d5a 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ SHELL := /bin/bash PORT ?= 5173 BENCH_PORT ?= 8000 -.PHONY: hooks verify-fast verify-pr verify-full docs docs-build docs-ci +.PHONY: hooks verify-fast verify-pr verify-full verify-full-sequential docs docs-build docs-ci hooks: @git config core.hooksPath .githooks @chmod +x .githooks/* 2>/dev/null || true @@ -22,6 +22,9 @@ verify-pr: verify-full: @./scripts/verify-local.sh full +verify-full-sequential: + @VERIFY_LANE_MODE=sequential ./scripts/verify-local.sh full + .PHONY: dags dags-fetch dags: @cargo xtask dags diff --git a/scripts/hooks/README.md b/scripts/hooks/README.md index c44fce1f..7bd5171e 100644 --- a/scripts/hooks/README.md +++ b/scripts/hooks/README.md @@ -15,7 +15,15 @@ older local workflows. Both [`scripts/hooks/pre-commit`](./pre-commit) and Authoritative behavior lives in `.githooks/pre-commit` and `.githooks/pre-push`. For explicit local runs outside git hooks, prefer the -`make verify-fast`, `make verify-pr`, and `make verify-full` entry points. A -successful `make verify-full` run now shares the same success stamp as the +`make verify-fast`, `make verify-pr`, and `make verify-full` entry points. + +The local full gate now runs as curated parallel lanes with isolated +`CARGO_TARGET_DIR`s, which keeps expensive cargo invocations from serializing on +the same target lock. `make verify-full-sequential` remains available as a +fallback if you need to debug the lane runner itself. + +A successful `make verify-full` run still shares the same success stamp as the canonical pre-push full gate, so pushing the same `HEAD` does not rerun that -identical full verification locally. +identical full verification locally. The staged and reduced local Rust paths are +also intentionally narrower than CI: heavy all-target clippy coverage stays in +CI, while local hooks bias toward faster iteration on the current work surface. diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh index 38c068ec..f5396371 100755 --- a/scripts/verify-local.sh +++ b/scripts/verify-local.sh @@ -19,6 +19,8 @@ PINNED="${PINNED:-${PINNED_FROM_FILE:-1.90.0}}" VERIFY_FORCE="${VERIFY_FORCE:-0}" STAMP_DIR="${VERIFY_STAMP_DIR:-.git/verify-local}" VERIFY_USE_NEXTEST="${VERIFY_USE_NEXTEST:-0}" +VERIFY_LANE_MODE="${VERIFY_LANE_MODE:-parallel}" +VERIFY_LANE_ROOT="${VERIFY_LANE_ROOT:-target/verify-lanes}" SECONDS=0 format_elapsed() { @@ -148,6 +150,43 @@ readonly FULL_LOCAL_TEST_PACKAGES=( "ttd-browser" ) +readonly FULL_LOCAL_CLIPPY_CORE_PACKAGES=( + "warp-core" + "warp-geom" + "warp-wasm" + "echo-wasm-abi" +) + +readonly FULL_LOCAL_CLIPPY_SUPPORT_PACKAGES=( + "echo-scene-port" + "echo-scene-codec" + "echo-graph" + "echo-ttd" + "echo-dind-harness" + "echo-dind-tests" + "ttd-browser" + "ttd-protocol-rs" + "ttd-manifest" +) + +readonly FULL_LOCAL_CLIPPY_BIN_ONLY_PACKAGES=( + "xtask" +) + +readonly FULL_LOCAL_RUSTDOC_PACKAGES=( + "warp-core" + "warp-geom" + "warp-wasm" +) + +readonly FAST_CLIPPY_LIB_ONLY_PACKAGES=( + "warp-core" + "warp-wasm" + "ttd-browser" + "echo-dind-harness" + "echo-dind-tests" +) + ensure_command() { local cmd="$1" if ! command -v "$cmd" >/dev/null 2>&1; then @@ -359,6 +398,7 @@ run_docs_lint() { run_targeted_checks() { local crates=("$@") local crate + local rustdoc_crates=() if [[ ${#crates[@]} -eq 0 ]]; then echo "[verify-local] no changed crates detected; running docs-only checks" @@ -370,26 +410,31 @@ run_targeted_checks() { echo "[verify-local] cargo fmt --all -- --check" cargo +"$PINNED" fmt --all -- --check - run_crate_lint_and_check "${crates[@]}" + run_crate_lint_and_check targeted "${crates[@]}" - local public_doc_crates=("warp-core" "warp-geom" "warp-wasm") - for crate in "${public_doc_crates[@]}"; do + for crate in "${FULL_LOCAL_RUSTDOC_PACKAGES[@]}"; do if printf '%s\n' "${crates[@]}" | grep -qx "$crate"; then - echo "[verify-local] rustdoc warnings gate (${crate})" - RUSTDOCFLAGS="-D warnings" cargo +"$PINNED" doc -p "$crate" --no-deps + rustdoc_crates+=("$crate") fi done + for crate in "${rustdoc_crates[@]}"; do + echo "[verify-local] rustdoc warnings gate (${crate})" + RUSTDOCFLAGS="-D warnings" cargo +"$PINNED" doc -p "$crate" --no-deps + done + for crate in "${crates[@]}"; do if [[ ! -f "crates/${crate}/Cargo.toml" ]]; then continue fi if use_nextest; then - echo "[verify-local] cargo nextest run -p ${crate}" - cargo +"$PINNED" nextest run -p "$crate" + echo "[verify-local] cargo nextest run -p ${crate} --lib --tests" + cargo +"$PINNED" nextest run -p "$crate" --lib --tests else - echo "[verify-local] cargo test -p ${crate}" - cargo +"$PINNED" test -p "$crate" + local -a test_args=() + mapfile -t test_args < <(targeted_test_args_for_crate "$crate") + echo "[verify-local] cargo test -p ${crate} ${test_args[*]}" + cargo +"$PINNED" test -p "$crate" "${test_args[@]}" fi done @@ -397,6 +442,8 @@ run_targeted_checks() { } run_crate_lint_and_check() { + local scope="$1" + shift local crates=("$@") local crate @@ -405,8 +452,10 @@ run_crate_lint_and_check() { echo "[verify-local] skipping ${crate}: missing crates/${crate}/Cargo.toml" >&2 continue fi - echo "[verify-local] cargo clippy -p ${crate} --all-targets" - cargo +"$PINNED" clippy -p "$crate" --all-targets -- -D warnings -D missing_docs + local -a clippy_args=() + mapfile -t clippy_args < <(clippy_target_args_for_scope "$crate" "$scope") + echo "[verify-local] cargo clippy -p ${crate} ${clippy_args[*]}" + cargo +"$PINNED" clippy -p "$crate" "${clippy_args[@]}" -- -D warnings -D missing_docs echo "[verify-local] cargo check -p ${crate}" cargo +"$PINNED" check -p "$crate" --quiet done @@ -421,7 +470,7 @@ run_pre_commit_checks() { ensure_toolchain echo "[verify-local] pre-commit verification for staged crates: ${changed_crates[*]}" - run_crate_lint_and_check "${changed_crates[@]}" + run_crate_lint_and_check pre-commit "${changed_crates[@]}" } package_args() { @@ -431,6 +480,128 @@ package_args() { done } +lane_target_dir() { + local lane="$1" + printf '%s/%s' "$VERIFY_LANE_ROOT" "$lane" +} + +lane_cargo() { + local lane="$1" + shift + mkdir -p "$VERIFY_LANE_ROOT" + CARGO_TARGET_DIR="$(lane_target_dir "$lane")" cargo +"$PINNED" "$@" +} + +should_run_parallel_lanes() { + [[ "$VERIFY_LANE_MODE" == "parallel" ]] +} + +run_parallel_lanes() { + local suite="$1" + shift + + local logdir + logdir="$(mktemp -d "${TMPDIR:-/tmp}/verify-local-${suite}.XXXXXX")" + local -a lane_names=() + local -a lane_funcs=() + local -a lane_pids=() + local i + + while [[ $# -gt 0 ]]; do + lane_names+=("$1") + lane_funcs+=("$2") + shift 2 + done + + echo "[verify-local] ${suite}: launching ${#lane_names[@]} local lanes" + for i in "${!lane_names[@]}"; do + ( + set -euo pipefail + "${lane_funcs[$i]}" + ) >"${logdir}/${lane_names[$i]}.log" 2>&1 & + lane_pids+=("$!") + done + + local failed=0 + local rc + set +e + for i in "${!lane_names[@]}"; do + wait "${lane_pids[$i]}" + rc=$? + if [[ $rc -ne 0 ]]; then + failed=1 + echo "[verify-local] lane failed: ${lane_names[$i]}" >&2 + fi + done + set -e + + if [[ $failed -ne 0 ]]; then + for i in "${!lane_names[@]}"; do + local logfile="${logdir}/${lane_names[$i]}.log" + if [[ ! -s "$logfile" ]]; then + continue + fi + echo + echo "--- ${lane_names[$i]} ---" >&2 + cat "$logfile" >&2 + done + rm -rf "$logdir" + exit 1 + fi + + rm -rf "$logdir" +} + +crate_supports_lib_target() { + local crate="$1" + [[ "$crate" != "xtask" ]] +} + +crate_is_fast_clippy_lib_only() { + local crate="$1" + local candidate + for candidate in "${FAST_CLIPPY_LIB_ONLY_PACKAGES[@]}"; do + if [[ "$crate" == "$candidate" ]]; then + return 0 + fi + done + return 1 +} + +clippy_target_args_for_scope() { + local crate="$1" + local scope="$2" + + if [[ "$crate" == "xtask" ]]; then + printf '%s\n' "--bins" + return + fi + + if [[ "$scope" == "full" ]]; then + printf '%s\n' "--all-targets" + return + fi + + printf '%s\n' "--lib" + if ! crate_is_fast_clippy_lib_only "$crate"; then + printf '%s\n' "--tests" + fi +} + +targeted_test_args_for_crate() { + local crate="$1" + + if [[ "$crate" == "xtask" ]]; then + printf '%s\n' "--bins" + return + fi + + if crate_supports_lib_target "$crate"; then + printf '%s\n' "--lib" + fi + printf '%s\n' "--tests" +} + run_pattern_guards() { ensure_command rg @@ -471,45 +642,109 @@ run_determinism_guard() { fi } -run_full_checks() { - ensure_toolchain - echo "[verify-local] critical local gate" - echo "[verify-local] cargo fmt --all -- --check" +run_full_lane_fmt() { + echo "[verify-local][fmt] cargo fmt --all -- --check" cargo +"$PINNED" fmt --all -- --check +} - local full_args=() - mapfile -t full_args < <(package_args "${FULL_LOCAL_PACKAGES[@]}") - local full_test_args=() - mapfile -t full_test_args < <(package_args "${FULL_LOCAL_TEST_PACKAGES[@]}") +run_full_lane_clippy_core() { + local args=() + mapfile -t args < <(package_args "${FULL_LOCAL_CLIPPY_CORE_PACKAGES[@]}") + echo "[verify-local][clippy-core] curated clippy on core packages" + lane_cargo "full-clippy-core" clippy "${args[@]}" --lib -- -D warnings -D missing_docs +} - echo "[verify-local] cargo clippy on critical packages" - cargo +"$PINNED" clippy "${full_args[@]}" --all-targets -- -D warnings -D missing_docs +run_full_lane_clippy_support() { + local args=() + mapfile -t args < <(package_args "${FULL_LOCAL_CLIPPY_SUPPORT_PACKAGES[@]}") + echo "[verify-local][clippy-support] curated clippy on support packages" + lane_cargo "full-clippy-support" clippy "${args[@]}" --lib --tests -- -D warnings -D missing_docs +} - echo "[verify-local] tests on critical packages (lib + integration targets)" - cargo +"$PINNED" test "${full_test_args[@]}" --lib --tests - cargo +"$PINNED" test -p warp-wasm --features engine --lib - cargo +"$PINNED" test -p echo-wasm-abi --lib - cargo +"$PINNED" test -p warp-core --lib - cargo +"$PINNED" test -p warp-core --test inbox - cargo +"$PINNED" test -p warp-core --test invariant_property_tests - cargo +"$PINNED" test -p warp-core --test golden_vectors_phase0 - cargo +"$PINNED" test -p warp-core --test materialization_determinism +run_full_lane_clippy_bins() { + local args=() + mapfile -t args < <(package_args "${FULL_LOCAL_CLIPPY_BIN_ONLY_PACKAGES[@]}") + echo "[verify-local][clippy-bins] curated clippy on binary-only packages" + lane_cargo "full-clippy-bins" clippy "${args[@]}" --bins -- -D warnings -D missing_docs +} + +run_full_lane_tests_support() { + local args=() + mapfile -t args < <(package_args "${FULL_LOCAL_TEST_PACKAGES[@]}") + echo "[verify-local][tests-support] critical support-package tests" + lane_cargo "full-tests-support" test "${args[@]}" --lib --tests +} - echo "[verify-local] PRNG golden regression (warp-core)" - cargo +"$PINNED" test -p warp-core --features golden_prng --test prng_golden_regression +run_full_lane_tests_runtime() { + echo "[verify-local][tests-runtime] warp-wasm + ABI runtime checks" + lane_cargo "full-tests-runtime" test -p warp-wasm --features engine --lib + lane_cargo "full-tests-runtime" test -p echo-wasm-abi --lib +} + +run_full_lane_tests_warp_core() { + echo "[verify-local][tests-warp-core] curated warp-core suite" + lane_cargo "full-tests-warp-core" test -p warp-core --lib + lane_cargo "full-tests-warp-core" test -p warp-core --test inbox + lane_cargo "full-tests-warp-core" test -p warp-core --test invariant_property_tests + lane_cargo "full-tests-warp-core" test -p warp-core --test golden_vectors_phase0 + lane_cargo "full-tests-warp-core" test -p warp-core --test materialization_determinism + lane_cargo "full-tests-warp-core" test -p warp-core --features golden_prng --test prng_golden_regression +} +run_full_lane_rustdoc() { local doc_pkg - for doc_pkg in warp-core warp-geom warp-wasm; do - echo "[verify-local] rustdoc warnings gate (${doc_pkg})" - RUSTDOCFLAGS="-D warnings" cargo +"$PINNED" doc -p "${doc_pkg}" --no-deps + for doc_pkg in "${FULL_LOCAL_RUSTDOC_PACKAGES[@]}"; do + echo "[verify-local][rustdoc] ${doc_pkg}" + CARGO_TARGET_DIR="$(lane_target_dir "full-rustdoc")" \ + RUSTDOCFLAGS="-D warnings" \ + cargo +"$PINNED" doc -p "${doc_pkg}" --no-deps done +} +run_full_lane_guards() { run_pattern_guards run_spdx_check run_determinism_guard run_docs_lint } +run_full_checks_sequential() { + echo "[verify-local] critical local gate" + run_full_lane_fmt + run_full_lane_clippy_core + run_full_lane_clippy_support + run_full_lane_clippy_bins + run_full_lane_tests_support + run_full_lane_tests_runtime + run_full_lane_tests_warp_core + run_full_lane_rustdoc + run_full_lane_guards +} + +run_full_checks_parallel() { + echo "[verify-local] critical local gate" + run_parallel_lanes \ + "full" \ + "fmt" run_full_lane_fmt \ + "clippy-core" run_full_lane_clippy_core \ + "clippy-support" run_full_lane_clippy_support \ + "clippy-bins" run_full_lane_clippy_bins \ + "tests-support" run_full_lane_tests_support \ + "tests-runtime" run_full_lane_tests_runtime \ + "tests-warp-core" run_full_lane_tests_warp_core \ + "rustdoc" run_full_lane_rustdoc \ + "guards" run_full_lane_guards +} + +run_full_checks() { + ensure_toolchain + if should_run_parallel_lanes; then + run_full_checks_parallel + return + fi + run_full_checks_sequential +} + run_auto_mode() { local classification="$1" local suite diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh index 782f3d5a..a594282d 100755 --- a/tests/hooks/test_verify_local.sh +++ b/tests/hooks/test_verify_local.sh @@ -79,6 +79,95 @@ EOF rm -rf "$tmp" } +run_fake_verify() { + local mode="$1" + local changed_file="$2" + local tmp + tmp="$(mktemp -d)" + + mkdir -p "$tmp/scripts" "$tmp/bin" "$tmp/.git" + mkdir -p "$tmp/crates/warp-core/src" + cp scripts/verify-local.sh "$tmp/scripts/verify-local.sh" + chmod +x "$tmp/scripts/verify-local.sh" + + cat >"$tmp/rust-toolchain.toml" <<'EOF' +[toolchain] +channel = "1.90.0" +EOF + + cat >"$tmp/crates/warp-core/Cargo.toml" <<'EOF' +[package] +name = "warp-core" +version = "0.0.0" +edition = "2021" +EOF + printf '%s\n' 'pub fn anchor() {}' >"$tmp/crates/warp-core/src/lib.rs" + + cat >"$tmp/bin/cargo" <<'EOF' +#!/usr/bin/env bash +set -euo pipefail +printf '%s|%s\n' "${CARGO_TARGET_DIR:-}" "$*" >>"${VERIFY_FAKE_CARGO_LOG}" +exit 0 +EOF + cat >"$tmp/bin/rustup" <<'EOF' +#!/usr/bin/env bash +set -euo pipefail +if [[ "${1:-}" == "toolchain" && "${2:-}" == "list" ]]; then + printf '1.90.0-aarch64-apple-darwin (default)\n' + exit 0 +fi +exit 0 +EOF + cat >"$tmp/bin/rg" <<'EOF' +#!/usr/bin/env bash +set -euo pipefail +exit 1 +EOF + cat >"$tmp/bin/npx" <<'EOF' +#!/usr/bin/env bash +set -euo pipefail +exit 0 +EOF + cat >"$tmp/bin/git" <<'EOF' +#!/usr/bin/env bash +set -euo pipefail +if [[ "${1:-}" == "rev-parse" && "${2:-}" == "HEAD" ]]; then + printf 'test-head\n' + exit 0 +fi +if [[ "${1:-}" == "rev-parse" && "${2:-}" == "--short" && "${3:-}" == "HEAD" ]]; then + printf 'test-head\n' + exit 0 +fi +exit 0 +EOF + chmod +x "$tmp/bin/cargo" "$tmp/bin/rustup" "$tmp/bin/rg" "$tmp/bin/npx" "$tmp/bin/git" + + local changed + changed="$(mktemp)" + printf '%s\n' "$changed_file" >"$changed" + local cargo_log + cargo_log="$(mktemp)" + + local output + output="$( + cd "$tmp" && \ + PATH="$tmp/bin:$PATH" \ + VERIFY_FORCE=1 \ + VERIFY_STAMP_SUBJECT="test-head" \ + VERIFY_CHANGED_FILES_FILE="$changed" \ + VERIFY_FAKE_CARGO_LOG="$cargo_log" \ + ./scripts/verify-local.sh "$mode" + )" + + printf '%s\n' "$output" + echo "--- cargo-log ---" + cat "$cargo_log" + + rm -f "$changed" "$cargo_log" + rm -rf "$tmp" +} + echo "=== verify-local classification ===" docs_output="$(run_detect docs/plans/adr-0008-and-0009.md docs/ROADMAP/backlog/tooling-misc.md)" @@ -227,11 +316,18 @@ critical_crates = { if prefix.startswith("crates/") } full_packages = set(parse_array("FULL_LOCAL_PACKAGES")) +full_clippy_core = set(parse_array("FULL_LOCAL_CLIPPY_CORE_PACKAGES")) +full_clippy_support = set(parse_array("FULL_LOCAL_CLIPPY_SUPPORT_PACKAGES")) +full_clippy_bins = set(parse_array("FULL_LOCAL_CLIPPY_BIN_ONLY_PACKAGES")) full_test_packages = set(parse_array("FULL_LOCAL_TEST_PACKAGES")) +fast_lib_only = set(parse_array("FAST_CLIPPY_LIB_ONLY_PACKAGES")) missing_build = sorted(critical_crates - full_packages) +missing_clippy = sorted(critical_crates - (full_clippy_core | full_clippy_support | full_clippy_bins)) print("missing_build=" + ",".join(missing_build)) +print("missing_clippy=" + ",".join(missing_clippy)) print("ttd_browser_tested=" + str("ttd-browser" in full_test_packages).lower()) +print("warp_core_fast_lib_only=" + str("warp-core" in fast_lib_only).lower()) PY )" if printf '%s\n' "$coverage_output" | grep -q '^missing_build=$'; then @@ -240,12 +336,64 @@ else fail "full-critical crates must all be present in FULL_LOCAL_PACKAGES" printf '%s\n' "$coverage_output" fi +if printf '%s\n' "$coverage_output" | grep -q '^missing_clippy=$'; then + pass "every full-critical crate is covered by one of the curated local clippy lanes" +else + fail "full-critical crates must all be present in the local clippy lane package sets" + printf '%s\n' "$coverage_output" +fi if printf '%s\n' "$coverage_output" | grep -q '^ttd_browser_tested=true$'; then pass "ttd-browser is covered by the full local test lane" else fail "ttd-browser must be exercised by the full local test lane" printf '%s\n' "$coverage_output" fi +if printf '%s\n' "$coverage_output" | grep -q '^warp_core_fast_lib_only=true$'; then + pass "warp-core uses the narrowed fast local clippy scope" +else + fail "warp-core should stay in the narrowed fast local clippy package set" + printf '%s\n' "$coverage_output" +fi + +if grep -q '^verify-full-sequential:' Makefile; then + pass "Makefile exposes a sequential fallback for the parallel full verifier" +else + fail "Makefile should expose verify-full-sequential as a fallback path" +fi + +fake_full_output="$(run_fake_verify full crates/warp-core/src/lib.rs)" +if printf '%s\n' "$fake_full_output" | grep -q '\[verify-local\] full: launching 9 local lanes'; then + pass "full verification fans out into explicit parallel lanes" +else + fail "full verification should launch the curated local lane set" + printf '%s\n' "$fake_full_output" +fi +if printf '%s\n' "$fake_full_output" | grep -q 'target/verify-lanes/full-clippy-core'; then + pass "full verification isolates clippy into its own target dir" +else + fail "full verification should route clippy through an isolated target dir" + printf '%s\n' "$fake_full_output" +fi +if printf '%s\n' "$fake_full_output" | grep -q 'target/verify-lanes/full-tests-warp-core'; then + pass "full verification isolates warp-core tests into their own target dir" +else + fail "full verification should route warp-core tests through an isolated target dir" + printf '%s\n' "$fake_full_output" +fi + +fake_fast_output="$(run_fake_verify fast crates/warp-core/src/lib.rs)" +if printf '%s\n' "$fake_fast_output" | grep -q 'clippy -p warp-core --lib -- -D warnings -D missing_docs'; then + pass "fast verification uses the narrowed warp-core clippy scope" +else + fail "fast verification should run warp-core clippy on the narrowed local target set" + printf '%s\n' "$fake_fast_output" +fi +if printf '%s\n' "$fake_fast_output" | grep -vq 'clippy -p warp-core --all-targets'; then + pass "fast verification no longer uses warp-core all-targets clippy" +else + fail "fast verification must not fall back to warp-core all-targets clippy" + printf '%s\n' "$fake_fast_output" +fi echo "PASS: $PASS" echo "FAIL: $FAIL" From 755c2bf649d0373013f4c62602373e7d3213e8e1 Mon Sep 17 00:00:00 2001 From: James Ross Date: Sun, 15 Mar 2026 17:58:37 -0700 Subject: [PATCH 07/18] feat(tooling): scope local verification lanes --- CHANGELOG.md | 3 + scripts/hooks/README.md | 5 + scripts/verify-local.sh | 280 +++++++++++++++++++++++++++---- tests/hooks/test_verify_local.sh | 43 ++++- 4 files changed, 299 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c894e8cd..a20d9dfc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,9 @@ - **Changed** staged and reduced local Rust checks now use a narrower fast-path target surface, keeping the heaviest all-target clippy drag in CI instead of every local iteration loop. +- **Changed** full local verification is now scope-aware: tooling-only full + changes stay tooling-local, while critical Rust changes run local smoke lanes + and defer exhaustive proof to CI. - **Added** `make verify-full-sequential` as an explicit fallback when the lane runner itself needs debugging. diff --git a/scripts/hooks/README.md b/scripts/hooks/README.md index 7bd5171e..521dee6b 100644 --- a/scripts/hooks/README.md +++ b/scripts/hooks/README.md @@ -22,6 +22,11 @@ The local full gate now runs as curated parallel lanes with isolated the same target lock. `make verify-full-sequential` remains available as a fallback if you need to debug the lane runner itself. +A critical path no longer means “run the same local Rust cargo gauntlet for +every kind of full change.” Tooling-only full changes stay tooling-local, while +critical Rust changes run a local smoke lane and leave the exhaustive all-target +proof to CI. + A successful `make verify-full` run still shares the same success stamp as the canonical pre-push full gate, so pushing the same `HEAD` does not rerun that identical full verification locally. The staged and reduced local Rust paths are diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh index f5396371..e7ebb664 100755 --- a/scripts/verify-local.sh +++ b/scripts/verify-local.sh @@ -122,6 +122,28 @@ readonly FULL_CRITICAL_EXACT=( "Makefile" ) +readonly FULL_TOOLING_PREFIXES=( + ".github/workflows/" + ".githooks/" + "scripts/" +) + +readonly FULL_TOOLING_EXACT=( + "Makefile" + "package.json" + "pnpm-lock.yaml" + "pnpm-workspace.yaml" + "deny.toml" + "audit.toml" + "det-policy.yaml" +) + +readonly FULL_BROAD_RUST_EXACT=( + "Cargo.toml" + "Cargo.lock" + "rust-toolchain.toml" +) + readonly FULL_LOCAL_PACKAGES=( "warp-core" "warp-geom" @@ -187,6 +209,18 @@ readonly FAST_CLIPPY_LIB_ONLY_PACKAGES=( "echo-dind-tests" ) +FULL_SCOPE_MODE="" +FULL_SCOPE_HAS_TOOLING=0 +FULL_SCOPE_SELECTED_CRATES=() +FULL_SCOPE_CLIPPY_CORE_PACKAGES=() +FULL_SCOPE_CLIPPY_SUPPORT_PACKAGES=() +FULL_SCOPE_CLIPPY_BIN_ONLY_PACKAGES=() +FULL_SCOPE_TEST_SUPPORT_PACKAGES=() +FULL_SCOPE_RUSTDOC_PACKAGES=() +FULL_SCOPE_RUN_WARP_CORE_SMOKE=0 +FULL_SCOPE_RUN_WARP_WASM_RUNTIME=0 +FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME=0 + ensure_command() { local cmd="$1" if ! command -v "$cmd" >/dev/null 2>&1; then @@ -281,6 +315,46 @@ is_docs_only_path() { [[ "$file" == docs/* || "$file" == *.md ]] } +array_contains() { + local needle="$1" + shift + local item + for item in "$@"; do + if [[ "$item" == "$needle" ]]; then + return 0 + fi + done + return 1 +} + +is_tooling_full_path() { + local file="$1" + local prefix + for prefix in "${FULL_TOOLING_PREFIXES[@]}"; do + if [[ "$file" == "$prefix"* ]]; then + return 0 + fi + done + local exact + for exact in "${FULL_TOOLING_EXACT[@]}"; do + if [[ "$file" == "$exact" ]]; then + return 0 + fi + done + return 1 +} + +is_broad_rust_full_path() { + local file="$1" + local exact + for exact in "${FULL_BROAD_RUST_EXACT[@]}"; do + if [[ "$file" == "$exact" ]]; then + return 0 + fi + done + return 1 +} + classify_change_set() { local had_files=0 local classification="docs" @@ -309,6 +383,16 @@ list_changed_crates() { printf '%s\n' "$CHANGED_FILES" | sed -n 's#^crates/\([^/]*\)/.*#\1#p' | sort -u } +list_changed_critical_crates() { + local crate + while IFS= read -r crate; do + [[ -z "$crate" ]] && continue + if array_contains "$crate" "${FULL_LOCAL_PACKAGES[@]}"; then + printf '%s\n' "$crate" + fi + done < <(list_changed_crates) +} + stamp_suite_for_classification() { local classification="$1" @@ -514,6 +598,7 @@ run_parallel_lanes() { done echo "[verify-local] ${suite}: launching ${#lane_names[@]} local lanes" + echo "[verify-local] ${suite}: lanes=${lane_names[*]}" for i in "${!lane_names[@]}"; do ( set -euo pipefail @@ -602,6 +687,80 @@ targeted_test_args_for_crate() { printf '%s\n' "--tests" } +filter_package_set_by_selection() { + local selection_name="$1" + local candidate_name="$2" + local pkg + local -n selection_ref="$selection_name" + local -n candidate_ref="$candidate_name" + + for pkg in "${candidate_ref[@]}"; do + if array_contains "$pkg" "${selection_ref[@]}"; then + printf '%s\n' "$pkg" + fi + done +} + +prepare_full_scope() { + local broad_rust_change=0 + local tooling_change=0 + local file + + while IFS= read -r file; do + [[ -z "$file" ]] && continue + if is_broad_rust_full_path "$file"; then + broad_rust_change=1 + fi + if is_tooling_full_path "$file"; then + tooling_change=1 + fi + done <<< "${CHANGED_FILES}" + + FULL_SCOPE_HAS_TOOLING=$tooling_change + + if [[ $broad_rust_change -eq 1 ]]; then + FULL_SCOPE_MODE="broad-rust" + FULL_SCOPE_SELECTED_CRATES=("${FULL_LOCAL_PACKAGES[@]}") + else + mapfile -t FULL_SCOPE_SELECTED_CRATES < <(list_changed_critical_crates) + if [[ ${#FULL_SCOPE_SELECTED_CRATES[@]} -gt 0 ]]; then + FULL_SCOPE_MODE="targeted-rust" + else + FULL_SCOPE_MODE="tooling-only" + fi + fi + + mapfile -t FULL_SCOPE_CLIPPY_CORE_PACKAGES < <( + filter_package_set_by_selection FULL_SCOPE_SELECTED_CRATES FULL_LOCAL_CLIPPY_CORE_PACKAGES + ) + mapfile -t FULL_SCOPE_CLIPPY_SUPPORT_PACKAGES < <( + filter_package_set_by_selection FULL_SCOPE_SELECTED_CRATES FULL_LOCAL_CLIPPY_SUPPORT_PACKAGES + ) + mapfile -t FULL_SCOPE_CLIPPY_BIN_ONLY_PACKAGES < <( + filter_package_set_by_selection FULL_SCOPE_SELECTED_CRATES FULL_LOCAL_CLIPPY_BIN_ONLY_PACKAGES + ) + mapfile -t FULL_SCOPE_TEST_SUPPORT_PACKAGES < <( + filter_package_set_by_selection FULL_SCOPE_SELECTED_CRATES FULL_LOCAL_TEST_PACKAGES + ) + mapfile -t FULL_SCOPE_RUSTDOC_PACKAGES < <( + filter_package_set_by_selection FULL_SCOPE_SELECTED_CRATES FULL_LOCAL_RUSTDOC_PACKAGES + ) + + FULL_SCOPE_RUN_WARP_CORE_SMOKE=0 + FULL_SCOPE_RUN_WARP_WASM_RUNTIME=0 + FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME=0 + + if array_contains "warp-core" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then + FULL_SCOPE_RUN_WARP_CORE_SMOKE=1 + fi + if array_contains "warp-wasm" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then + FULL_SCOPE_RUN_WARP_WASM_RUNTIME=1 + fi + if array_contains "echo-wasm-abi" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then + FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME=1 + fi +} + run_pattern_guards() { ensure_command rg @@ -648,52 +807,80 @@ run_full_lane_fmt() { } run_full_lane_clippy_core() { + if [[ ${#FULL_SCOPE_CLIPPY_CORE_PACKAGES[@]} -eq 0 ]]; then + echo "[verify-local][clippy-core] no selected core packages" + return + fi local args=() - mapfile -t args < <(package_args "${FULL_LOCAL_CLIPPY_CORE_PACKAGES[@]}") - echo "[verify-local][clippy-core] curated clippy on core packages" + mapfile -t args < <(package_args "${FULL_SCOPE_CLIPPY_CORE_PACKAGES[@]}") + echo "[verify-local][clippy-core] curated clippy on selected core packages" lane_cargo "full-clippy-core" clippy "${args[@]}" --lib -- -D warnings -D missing_docs } run_full_lane_clippy_support() { + if [[ ${#FULL_SCOPE_CLIPPY_SUPPORT_PACKAGES[@]} -eq 0 ]]; then + echo "[verify-local][clippy-support] no selected support packages" + return + fi local args=() - mapfile -t args < <(package_args "${FULL_LOCAL_CLIPPY_SUPPORT_PACKAGES[@]}") - echo "[verify-local][clippy-support] curated clippy on support packages" + mapfile -t args < <(package_args "${FULL_SCOPE_CLIPPY_SUPPORT_PACKAGES[@]}") + echo "[verify-local][clippy-support] curated clippy on selected support packages" lane_cargo "full-clippy-support" clippy "${args[@]}" --lib --tests -- -D warnings -D missing_docs } run_full_lane_clippy_bins() { + if [[ ${#FULL_SCOPE_CLIPPY_BIN_ONLY_PACKAGES[@]} -eq 0 ]]; then + echo "[verify-local][clippy-bins] no selected binary-only packages" + return + fi local args=() - mapfile -t args < <(package_args "${FULL_LOCAL_CLIPPY_BIN_ONLY_PACKAGES[@]}") - echo "[verify-local][clippy-bins] curated clippy on binary-only packages" + mapfile -t args < <(package_args "${FULL_SCOPE_CLIPPY_BIN_ONLY_PACKAGES[@]}") + echo "[verify-local][clippy-bins] curated clippy on selected binary-only packages" lane_cargo "full-clippy-bins" clippy "${args[@]}" --bins -- -D warnings -D missing_docs } run_full_lane_tests_support() { + if [[ ${#FULL_SCOPE_TEST_SUPPORT_PACKAGES[@]} -eq 0 ]]; then + echo "[verify-local][tests-support] no selected support-package tests" + return + fi local args=() - mapfile -t args < <(package_args "${FULL_LOCAL_TEST_PACKAGES[@]}") - echo "[verify-local][tests-support] critical support-package tests" + mapfile -t args < <(package_args "${FULL_SCOPE_TEST_SUPPORT_PACKAGES[@]}") + echo "[verify-local][tests-support] selected support-package tests" lane_cargo "full-tests-support" test "${args[@]}" --lib --tests } run_full_lane_tests_runtime() { - echo "[verify-local][tests-runtime] warp-wasm + ABI runtime checks" - lane_cargo "full-tests-runtime" test -p warp-wasm --features engine --lib - lane_cargo "full-tests-runtime" test -p echo-wasm-abi --lib + if [[ "$FULL_SCOPE_RUN_WARP_WASM_RUNTIME" != "1" && "$FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME" != "1" ]]; then + echo "[verify-local][tests-runtime] no selected runtime packages" + return + fi + echo "[verify-local][tests-runtime] selected runtime checks" + if [[ "$FULL_SCOPE_RUN_WARP_WASM_RUNTIME" == "1" ]]; then + lane_cargo "full-tests-runtime" test -p warp-wasm --features engine --lib + fi + if [[ "$FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME" == "1" ]]; then + lane_cargo "full-tests-runtime" test -p echo-wasm-abi --lib + fi } run_full_lane_tests_warp_core() { - echo "[verify-local][tests-warp-core] curated warp-core suite" + if [[ "$FULL_SCOPE_RUN_WARP_CORE_SMOKE" != "1" ]]; then + echo "[verify-local][tests-warp-core] warp-core not selected" + return + fi + echo "[verify-local][tests-warp-core] local warp-core smoke suite" lane_cargo "full-tests-warp-core" test -p warp-core --lib lane_cargo "full-tests-warp-core" test -p warp-core --test inbox - lane_cargo "full-tests-warp-core" test -p warp-core --test invariant_property_tests - lane_cargo "full-tests-warp-core" test -p warp-core --test golden_vectors_phase0 - lane_cargo "full-tests-warp-core" test -p warp-core --test materialization_determinism - lane_cargo "full-tests-warp-core" test -p warp-core --features golden_prng --test prng_golden_regression } run_full_lane_rustdoc() { + if [[ ${#FULL_SCOPE_RUSTDOC_PACKAGES[@]} -eq 0 ]]; then + echo "[verify-local][rustdoc] no selected public-doc crates" + return + fi local doc_pkg - for doc_pkg in "${FULL_LOCAL_RUSTDOC_PACKAGES[@]}"; do + for doc_pkg in "${FULL_SCOPE_RUSTDOC_PACKAGES[@]}"; do echo "[verify-local][rustdoc] ${doc_pkg}" CARGO_TARGET_DIR="$(lane_target_dir "full-rustdoc")" \ RUSTDOCFLAGS="-D warnings" \ @@ -701,6 +888,19 @@ run_full_lane_rustdoc() { done } +run_full_lane_hook_tests() { + if [[ "$FULL_SCOPE_HAS_TOOLING" != "1" ]]; then + echo "[verify-local][hook-tests] no tooling changes detected" + return + fi + if [[ ! -f tests/hooks/test_verify_local.sh ]]; then + echo "[verify-local][hook-tests] tests/hooks/test_verify_local.sh not present" + return + fi + echo "[verify-local][hook-tests] hook regression coverage" + bash tests/hooks/test_verify_local.sh +} + run_full_lane_guards() { run_pattern_guards run_spdx_check @@ -709,8 +909,9 @@ run_full_lane_guards() { } run_full_checks_sequential() { - echo "[verify-local] critical local gate" + echo "[verify-local] critical local gate (${FULL_SCOPE_MODE})" run_full_lane_fmt + run_full_lane_hook_tests run_full_lane_clippy_core run_full_lane_clippy_support run_full_lane_clippy_bins @@ -722,22 +923,41 @@ run_full_checks_sequential() { } run_full_checks_parallel() { - echo "[verify-local] critical local gate" - run_parallel_lanes \ - "full" \ - "fmt" run_full_lane_fmt \ - "clippy-core" run_full_lane_clippy_core \ - "clippy-support" run_full_lane_clippy_support \ - "clippy-bins" run_full_lane_clippy_bins \ - "tests-support" run_full_lane_tests_support \ - "tests-runtime" run_full_lane_tests_runtime \ - "tests-warp-core" run_full_lane_tests_warp_core \ - "rustdoc" run_full_lane_rustdoc \ - "guards" run_full_lane_guards + local -a lanes=("full" "fmt" run_full_lane_fmt "guards" run_full_lane_guards) + + echo "[verify-local] critical local gate (${FULL_SCOPE_MODE})" + + if [[ "$FULL_SCOPE_HAS_TOOLING" == "1" ]]; then + lanes+=("hook-tests" run_full_lane_hook_tests) + fi + if [[ ${#FULL_SCOPE_CLIPPY_CORE_PACKAGES[@]} -gt 0 ]]; then + lanes+=("clippy-core" run_full_lane_clippy_core) + fi + if [[ ${#FULL_SCOPE_CLIPPY_SUPPORT_PACKAGES[@]} -gt 0 ]]; then + lanes+=("clippy-support" run_full_lane_clippy_support) + fi + if [[ ${#FULL_SCOPE_CLIPPY_BIN_ONLY_PACKAGES[@]} -gt 0 ]]; then + lanes+=("clippy-bins" run_full_lane_clippy_bins) + fi + if [[ ${#FULL_SCOPE_TEST_SUPPORT_PACKAGES[@]} -gt 0 ]]; then + lanes+=("tests-support" run_full_lane_tests_support) + fi + if [[ "$FULL_SCOPE_RUN_WARP_WASM_RUNTIME" == "1" || "$FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME" == "1" ]]; then + lanes+=("tests-runtime" run_full_lane_tests_runtime) + fi + if [[ "$FULL_SCOPE_RUN_WARP_CORE_SMOKE" == "1" ]]; then + lanes+=("tests-warp-core" run_full_lane_tests_warp_core) + fi + if [[ ${#FULL_SCOPE_RUSTDOC_PACKAGES[@]} -gt 0 ]]; then + lanes+=("rustdoc" run_full_lane_rustdoc) + fi + + run_parallel_lanes "${lanes[@]}" } run_full_checks() { ensure_toolchain + prepare_full_scope if should_run_parallel_lanes; then run_full_checks_parallel return diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh index a594282d..68410b6a 100755 --- a/tests/hooks/test_verify_local.sh +++ b/tests/hooks/test_verify_local.sh @@ -85,7 +85,7 @@ run_fake_verify() { local tmp tmp="$(mktemp -d)" - mkdir -p "$tmp/scripts" "$tmp/bin" "$tmp/.git" + mkdir -p "$tmp/scripts" "$tmp/bin" "$tmp/.git" "$tmp/tests/hooks" mkdir -p "$tmp/crates/warp-core/src" cp scripts/verify-local.sh "$tmp/scripts/verify-local.sh" chmod +x "$tmp/scripts/verify-local.sh" @@ -143,6 +143,13 @@ exit 0 EOF chmod +x "$tmp/bin/cargo" "$tmp/bin/rustup" "$tmp/bin/rg" "$tmp/bin/npx" "$tmp/bin/git" + cat >"$tmp/tests/hooks/test_verify_local.sh" <<'EOF' +#!/usr/bin/env bash +set -euo pipefail +echo "fake hook coverage" +EOF + chmod +x "$tmp/tests/hooks/test_verify_local.sh" + local changed changed="$(mktemp)" printf '%s\n' "$changed_file" >"$changed" @@ -362,12 +369,18 @@ else fi fake_full_output="$(run_fake_verify full crates/warp-core/src/lib.rs)" -if printf '%s\n' "$fake_full_output" | grep -q '\[verify-local\] full: launching 9 local lanes'; then +if printf '%s\n' "$fake_full_output" | grep -q '\[verify-local\] full: launching '; then pass "full verification fans out into explicit parallel lanes" else fail "full verification should launch the curated local lane set" printf '%s\n' "$fake_full_output" fi +if printf '%s\n' "$fake_full_output" | grep -q 'critical local gate (targeted-rust)'; then + pass "critical crate changes use the targeted-rust full scope" +else + fail "critical crate changes should use the targeted-rust full scope" + printf '%s\n' "$fake_full_output" +fi if printf '%s\n' "$fake_full_output" | grep -q 'target/verify-lanes/full-clippy-core'; then pass "full verification isolates clippy into its own target dir" else @@ -380,6 +393,12 @@ else fail "full verification should route warp-core tests through an isolated target dir" printf '%s\n' "$fake_full_output" fi +if printf '%s\n' "$fake_full_output" | grep -q -- '--test invariant_property_tests'; then + fail "local warp-core full verification should stay on the smoke suite" + printf '%s\n' "$fake_full_output" +else + pass "local warp-core full verification stays on the smoke suite" +fi fake_fast_output="$(run_fake_verify fast crates/warp-core/src/lib.rs)" if printf '%s\n' "$fake_fast_output" | grep -q 'clippy -p warp-core --lib -- -D warnings -D missing_docs'; then @@ -395,6 +414,26 @@ else printf '%s\n' "$fake_fast_output" fi +fake_tooling_output="$(run_fake_verify full scripts/verify-local.sh)" +if printf '%s\n' "$fake_tooling_output" | grep -q 'critical local gate (tooling-only)'; then + pass "tooling-only full verification uses the tooling-only scope" +else + fail "tooling-only full verification should stay in tooling-only scope" + printf '%s\n' "$fake_tooling_output" +fi +if printf '%s\n' "$fake_tooling_output" | grep -q 'lanes=fmt guards hook-tests'; then + pass "tooling-only full verification runs hook regression coverage" +else + fail "tooling-only full verification should run hook regression coverage" + printf '%s\n' "$fake_tooling_output" +fi +if printf '%s\n' "$fake_tooling_output" | grep -q 'target/verify-lanes/full-clippy-core'; then + fail "tooling-only full verification should not launch core Rust lanes" + printf '%s\n' "$fake_tooling_output" +else + pass "tooling-only full verification skips core Rust lanes" +fi + echo "PASS: $PASS" echo "FAIL: $FAIL" From c52ad3003f33f7c6cc3fec425e1bf5f83d2ed91c Mon Sep 17 00:00:00 2001 From: James Ross Date: Sun, 15 Mar 2026 18:06:16 -0700 Subject: [PATCH 08/18] feat(tooling): specialize warp-core local smoke --- CHANGELOG.md | 4 +++ scripts/hooks/README.md | 5 +++ scripts/verify-local.sh | 53 +++++++++++++++++++++++++++++++- tests/hooks/test_verify_local.sh | 44 ++++++++++++++++++++++++++ 4 files changed, 105 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a20d9dfc..2255a1b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,10 @@ - **Changed** full local verification is now scope-aware: tooling-only full changes stay tooling-local, while critical Rust changes run local smoke lanes and defer exhaustive proof to CI. +- **Changed** local `warp-core` smoke selection is now file-family aware: + default source edits stay on `--lib`, runtime/inbox files pull `inbox`, + playback files pull playback-smoke tests, and PRNG edits pull the golden + regression. - **Added** `make verify-full-sequential` as an explicit fallback when the lane runner itself needs debugging. diff --git a/scripts/hooks/README.md b/scripts/hooks/README.md index 521dee6b..0d9c0480 100644 --- a/scripts/hooks/README.md +++ b/scripts/hooks/README.md @@ -27,6 +27,11 @@ every kind of full change.” Tooling-only full changes stay tooling-local, whil critical Rust changes run a local smoke lane and leave the exhaustive all-target proof to CI. +That local smoke path is also file-family aware for `warp-core`: ordinary source +edits stay on the library test lane, while runtime/inbox, playback, and PRNG +touches pull the specific extra smoke checks they need instead of one fixed +bundle every time. + A successful `make verify-full` run still shares the same success stamp as the canonical pre-push full gate, so pushing the same `HEAD` does not rerun that identical full verification locally. The staged and reduced local Rust paths are diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh index e7ebb664..ba3ea006 100755 --- a/scripts/verify-local.sh +++ b/scripts/verify-local.sh @@ -220,6 +220,8 @@ FULL_SCOPE_RUSTDOC_PACKAGES=() FULL_SCOPE_RUN_WARP_CORE_SMOKE=0 FULL_SCOPE_RUN_WARP_WASM_RUNTIME=0 FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME=0 +FULL_SCOPE_WARP_CORE_EXTRA_TESTS=() +FULL_SCOPE_WARP_CORE_RUN_PRNG=0 ensure_command() { local cmd="$1" @@ -327,6 +329,15 @@ array_contains() { return 1 } +append_unique() { + local value="$1" + local array_name="$2" + local -n array_ref="$array_name" + if ! array_contains "$value" "${array_ref[@]}"; then + array_ref+=("$value") + fi +} + is_tooling_full_path() { local file="$1" local prefix @@ -701,6 +712,37 @@ filter_package_set_by_selection() { done } +prepare_warp_core_scope() { + FULL_SCOPE_WARP_CORE_EXTRA_TESTS=() + FULL_SCOPE_WARP_CORE_RUN_PRNG=0 + + local file + while IFS= read -r file; do + [[ -z "$file" ]] && continue + case "$file" in + crates/warp-core/tests/*.rs) + append_unique "$(basename "$file" .rs)" FULL_SCOPE_WARP_CORE_EXTRA_TESTS + ;; + crates/warp-core/src/coordinator.rs|\ + crates/warp-core/src/engine_impl.rs|\ + crates/warp-core/src/head.rs|\ + crates/warp-core/src/head_inbox.rs|\ + crates/warp-core/src/worldline_state.rs|\ + crates/warp-core/src/worldline_registry.rs|\ + crates/warp-core/src/runtime*.rs) + append_unique "inbox" FULL_SCOPE_WARP_CORE_EXTRA_TESTS + ;; + crates/warp-core/src/playback.rs) + append_unique "playback_cursor_tests" FULL_SCOPE_WARP_CORE_EXTRA_TESTS + append_unique "outputs_playback_tests" FULL_SCOPE_WARP_CORE_EXTRA_TESTS + ;; + crates/warp-core/src/math/prng.rs) + FULL_SCOPE_WARP_CORE_RUN_PRNG=1 + ;; + esac + done <<< "${CHANGED_FILES}" +} + prepare_full_scope() { local broad_rust_change=0 local tooling_change=0 @@ -749,9 +791,12 @@ prepare_full_scope() { FULL_SCOPE_RUN_WARP_CORE_SMOKE=0 FULL_SCOPE_RUN_WARP_WASM_RUNTIME=0 FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME=0 + FULL_SCOPE_WARP_CORE_EXTRA_TESTS=() + FULL_SCOPE_WARP_CORE_RUN_PRNG=0 if array_contains "warp-core" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then FULL_SCOPE_RUN_WARP_CORE_SMOKE=1 + prepare_warp_core_scope fi if array_contains "warp-wasm" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then FULL_SCOPE_RUN_WARP_WASM_RUNTIME=1 @@ -871,7 +916,13 @@ run_full_lane_tests_warp_core() { fi echo "[verify-local][tests-warp-core] local warp-core smoke suite" lane_cargo "full-tests-warp-core" test -p warp-core --lib - lane_cargo "full-tests-warp-core" test -p warp-core --test inbox + local test_target + for test_target in "${FULL_SCOPE_WARP_CORE_EXTRA_TESTS[@]}"; do + lane_cargo "full-tests-warp-core" test -p warp-core --test "$test_target" + done + if [[ "$FULL_SCOPE_WARP_CORE_RUN_PRNG" == "1" ]]; then + lane_cargo "full-tests-warp-core" test -p warp-core --features golden_prng --test prng_golden_regression + fi } run_full_lane_rustdoc() { diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh index 68410b6a..7483601f 100755 --- a/tests/hooks/test_verify_local.sh +++ b/tests/hooks/test_verify_local.sh @@ -414,6 +414,50 @@ else printf '%s\n' "$fake_fast_output" fi +fake_warp_core_default_output="$(run_fake_verify full crates/warp-core/src/provenance_store.rs)" +if printf '%s\n' "$fake_warp_core_default_output" | grep -q 'test -p warp-core --lib'; then + pass "warp-core default smoke keeps the lib test lane" +else + fail "warp-core default smoke should keep the lib test lane" + printf '%s\n' "$fake_warp_core_default_output" +fi +if printf '%s\n' "$fake_warp_core_default_output" | grep -q -- '--test inbox'; then + fail "warp-core default smoke should not always pull inbox" + printf '%s\n' "$fake_warp_core_default_output" +else + pass "warp-core default smoke avoids inbox when the file family does not need it" +fi + +fake_warp_core_runtime_output="$(run_fake_verify full crates/warp-core/src/coordinator.rs)" +if printf '%s\n' "$fake_warp_core_runtime_output" | grep -q -- '--test inbox'; then + pass "runtime-facing warp-core changes pull the inbox smoke test" +else + fail "runtime-facing warp-core changes should pull the inbox smoke test" + printf '%s\n' "$fake_warp_core_runtime_output" +fi + +fake_warp_core_playback_output="$(run_fake_verify full crates/warp-core/src/playback.rs)" +if printf '%s\n' "$fake_warp_core_playback_output" | grep -q -- '--test playback_cursor_tests'; then + pass "playback changes pull the playback cursor smoke test" +else + fail "playback changes should pull the playback cursor smoke test" + printf '%s\n' "$fake_warp_core_playback_output" +fi +if printf '%s\n' "$fake_warp_core_playback_output" | grep -q -- '--test outputs_playback_tests'; then + pass "playback changes pull the outputs playback smoke test" +else + fail "playback changes should pull the outputs playback smoke test" + printf '%s\n' "$fake_warp_core_playback_output" +fi + +fake_warp_core_prng_output="$(run_fake_verify full crates/warp-core/src/math/prng.rs)" +if printf '%s\n' "$fake_warp_core_prng_output" | grep -q -- '--features golden_prng --test prng_golden_regression'; then + pass "PRNG changes pull the golden regression smoke test" +else + fail "PRNG changes should pull the golden regression smoke test" + printf '%s\n' "$fake_warp_core_prng_output" +fi + fake_tooling_output="$(run_fake_verify full scripts/verify-local.sh)" if printf '%s\n' "$fake_tooling_output" | grep -q 'critical local gate (tooling-only)'; then pass "tooling-only full verification uses the tooling-only scope" From 515e41cd3adb47afb44ade02b0790adc12c3743f Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 08:40:09 -0700 Subject: [PATCH 09/18] feat(tooling): specialize wasm local smoke --- CHANGELOG.md | 5 ++ scripts/hooks/README.md | 5 ++ scripts/verify-local.sh | 109 ++++++++++++++++++++++++++----- tests/hooks/test_verify_local.sh | 64 ++++++++++++++++++ 4 files changed, 166 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2255a1b8..c4ea32da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,11 @@ default source edits stay on `--lib`, runtime/inbox files pull `inbox`, playback files pull playback-smoke tests, and PRNG edits pull the golden regression. +- **Changed** local `warp-wasm` and `echo-wasm-abi` smoke selection is now + file-family aware too: `warp-wasm/src/lib.rs` stays on plain lib smoke, + `warp_kernel.rs` pulls the engine-enabled lane, canonical ABI work pulls only + canonical/floating-point vectors, and non-Rust crate docs no longer wake Rust + lanes at all. - **Added** `make verify-full-sequential` as an explicit fallback when the lane runner itself needs debugging. diff --git a/scripts/hooks/README.md b/scripts/hooks/README.md index 0d9c0480..d3604827 100644 --- a/scripts/hooks/README.md +++ b/scripts/hooks/README.md @@ -32,6 +32,11 @@ edits stay on the library test lane, while runtime/inbox, playback, and PRNG touches pull the specific extra smoke checks they need instead of one fixed bundle every time. +The same principle now applies to the WASM boundary crates: `warp-wasm` +distinguishes plain lib work from `warp_kernel` engine work, `echo-wasm-abi` +pulls targeted canonical/codec vectors when those surfaces move, and README-only +or other non-Rust crate changes do not wake the Rust smoke lanes. + A successful `make verify-full` run still shares the same success stamp as the canonical pre-push full gate, so pushing the same `HEAD` does not rerun that identical full verification locally. The staged and reduced local Rust paths are diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh index ba3ea006..1399347c 100755 --- a/scripts/verify-local.sh +++ b/scripts/verify-local.sh @@ -218,8 +218,9 @@ FULL_SCOPE_CLIPPY_BIN_ONLY_PACKAGES=() FULL_SCOPE_TEST_SUPPORT_PACKAGES=() FULL_SCOPE_RUSTDOC_PACKAGES=() FULL_SCOPE_RUN_WARP_CORE_SMOKE=0 -FULL_SCOPE_RUN_WARP_WASM_RUNTIME=0 -FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME=0 +FULL_SCOPE_WARP_WASM_TEST_MODE="none" +FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB=0 +FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS=() FULL_SCOPE_WARP_CORE_EXTRA_TESTS=() FULL_SCOPE_WARP_CORE_RUN_PRNG=0 @@ -395,13 +396,19 @@ list_changed_crates() { } list_changed_critical_crates() { - local crate - while IFS= read -r crate; do - [[ -z "$crate" ]] && continue - if array_contains "$crate" "${FULL_LOCAL_PACKAGES[@]}"; then - printf '%s\n' "$crate" - fi - done < <(list_changed_crates) + local file crate + while IFS= read -r file; do + [[ -z "$file" ]] && continue + case "$file" in + crates/*/Cargo.toml|crates/*/build.rs|crates/*/src/*|crates/*/tests/*) + crate="$(printf '%s\n' "$file" | sed -n 's#^crates/\([^/]*\)/.*#\1#p')" + [[ -z "$crate" ]] && continue + if array_contains "$crate" "${FULL_LOCAL_PACKAGES[@]}"; then + printf '%s\n' "$crate" + fi + ;; + esac + done <<< "${CHANGED_FILES}" | sort -u } stamp_suite_for_classification() { @@ -743,6 +750,66 @@ prepare_warp_core_scope() { done <<< "${CHANGED_FILES}" } +prepare_warp_wasm_scope() { + FULL_SCOPE_WARP_WASM_TEST_MODE="none" + + if [[ "$FULL_SCOPE_MODE" == "broad-rust" ]]; then + FULL_SCOPE_WARP_WASM_TEST_MODE="engine-lib" + return + fi + + local file + while IFS= read -r file; do + [[ -z "$file" ]] && continue + case "$file" in + crates/warp-wasm/Cargo.toml|crates/warp-wasm/src/warp_kernel.rs) + FULL_SCOPE_WARP_WASM_TEST_MODE="engine-lib" + return + ;; + crates/warp-wasm/src/lib.rs) + if [[ "$FULL_SCOPE_WARP_WASM_TEST_MODE" == "none" ]]; then + FULL_SCOPE_WARP_WASM_TEST_MODE="plain-lib" + fi + ;; + esac + done <<< "${CHANGED_FILES}" +} + +prepare_echo_wasm_abi_scope() { + FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB=0 + FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS=() + + if [[ "$FULL_SCOPE_MODE" == "broad-rust" ]]; then + FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB=1 + return + fi + + local file test_name + while IFS= read -r file; do + [[ -z "$file" ]] && continue + case "$file" in + crates/echo-wasm-abi/Cargo.toml|\ + crates/echo-wasm-abi/src/lib.rs|\ + crates/echo-wasm-abi/src/kernel_port.rs|\ + crates/echo-wasm-abi/src/eintlog.rs|\ + crates/echo-wasm-abi/src/ttd.rs) + FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB=1 + ;; + crates/echo-wasm-abi/src/canonical.rs) + append_unique "canonical_vectors" FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS + append_unique "non_canonical_floats" FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS + ;; + crates/echo-wasm-abi/src/codec.rs) + append_unique "codec" FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS + ;; + crates/echo-wasm-abi/tests/*.rs) + test_name="$(basename "$file" .rs)" + append_unique "$test_name" FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS + ;; + esac + done <<< "${CHANGED_FILES}" +} + prepare_full_scope() { local broad_rust_change=0 local tooling_change=0 @@ -789,8 +856,9 @@ prepare_full_scope() { ) FULL_SCOPE_RUN_WARP_CORE_SMOKE=0 - FULL_SCOPE_RUN_WARP_WASM_RUNTIME=0 - FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME=0 + FULL_SCOPE_WARP_WASM_TEST_MODE="none" + FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB=0 + FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS=() FULL_SCOPE_WARP_CORE_EXTRA_TESTS=() FULL_SCOPE_WARP_CORE_RUN_PRNG=0 @@ -799,10 +867,10 @@ prepare_full_scope() { prepare_warp_core_scope fi if array_contains "warp-wasm" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then - FULL_SCOPE_RUN_WARP_WASM_RUNTIME=1 + prepare_warp_wasm_scope fi if array_contains "echo-wasm-abi" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then - FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME=1 + prepare_echo_wasm_abi_scope fi } @@ -896,17 +964,24 @@ run_full_lane_tests_support() { } run_full_lane_tests_runtime() { - if [[ "$FULL_SCOPE_RUN_WARP_WASM_RUNTIME" != "1" && "$FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME" != "1" ]]; then + if [[ "$FULL_SCOPE_WARP_WASM_TEST_MODE" == "none" && "$FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB" != "1" && ${#FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS[@]} -eq 0 ]]; then echo "[verify-local][tests-runtime] no selected runtime packages" return fi echo "[verify-local][tests-runtime] selected runtime checks" - if [[ "$FULL_SCOPE_RUN_WARP_WASM_RUNTIME" == "1" ]]; then + if [[ "$FULL_SCOPE_WARP_WASM_TEST_MODE" == "plain-lib" ]]; then + lane_cargo "full-tests-runtime" test -p warp-wasm --lib + fi + if [[ "$FULL_SCOPE_WARP_WASM_TEST_MODE" == "engine-lib" ]]; then lane_cargo "full-tests-runtime" test -p warp-wasm --features engine --lib fi - if [[ "$FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME" == "1" ]]; then + if [[ "$FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB" == "1" ]]; then lane_cargo "full-tests-runtime" test -p echo-wasm-abi --lib fi + local test_target + for test_target in "${FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS[@]}"; do + lane_cargo "full-tests-runtime" test -p echo-wasm-abi --test "$test_target" + done } run_full_lane_tests_warp_core() { @@ -993,7 +1068,7 @@ run_full_checks_parallel() { if [[ ${#FULL_SCOPE_TEST_SUPPORT_PACKAGES[@]} -gt 0 ]]; then lanes+=("tests-support" run_full_lane_tests_support) fi - if [[ "$FULL_SCOPE_RUN_WARP_WASM_RUNTIME" == "1" || "$FULL_SCOPE_RUN_ECHO_WASM_ABI_RUNTIME" == "1" ]]; then + if [[ "$FULL_SCOPE_WARP_WASM_TEST_MODE" != "none" || "$FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB" == "1" || ${#FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS[@]} -gt 0 ]]; then lanes+=("tests-runtime" run_full_lane_tests_runtime) fi if [[ "$FULL_SCOPE_RUN_WARP_CORE_SMOKE" == "1" ]]; then diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh index 7483601f..94df1093 100755 --- a/tests/hooks/test_verify_local.sh +++ b/tests/hooks/test_verify_local.sh @@ -458,6 +458,70 @@ else printf '%s\n' "$fake_warp_core_prng_output" fi +fake_warp_wasm_lib_output="$(run_fake_verify full crates/warp-wasm/src/lib.rs)" +if printf '%s\n' "$fake_warp_wasm_lib_output" | grep -q 'test -p warp-wasm --lib'; then + pass "warp-wasm lib changes use the plain lib smoke lane" +else + fail "warp-wasm lib changes should use the plain lib smoke lane" + printf '%s\n' "$fake_warp_wasm_lib_output" +fi +if printf '%s\n' "$fake_warp_wasm_lib_output" | grep -q -- '--features engine --lib'; then + fail "warp-wasm lib changes should not force the engine smoke lane" + printf '%s\n' "$fake_warp_wasm_lib_output" +else + pass "warp-wasm lib changes avoid the engine smoke lane" +fi + +fake_warp_wasm_kernel_output="$(run_fake_verify full crates/warp-wasm/src/warp_kernel.rs)" +if printf '%s\n' "$fake_warp_wasm_kernel_output" | grep -q -- 'test -p warp-wasm --features engine --lib'; then + pass "warp-kernel changes use the engine-enabled lib smoke lane" +else + fail "warp-kernel changes should use the engine-enabled lib smoke lane" + printf '%s\n' "$fake_warp_wasm_kernel_output" +fi + +fake_echo_wasm_abi_kernel_port_output="$(run_fake_verify full crates/echo-wasm-abi/src/kernel_port.rs)" +if printf '%s\n' "$fake_echo_wasm_abi_kernel_port_output" | grep -q -- 'test -p echo-wasm-abi --lib'; then + pass "echo-wasm-abi kernel-port changes keep the lib smoke lane" +else + fail "echo-wasm-abi kernel-port changes should keep the lib smoke lane" + printf '%s\n' "$fake_echo_wasm_abi_kernel_port_output" +fi + +fake_echo_wasm_abi_canonical_output="$(run_fake_verify full crates/echo-wasm-abi/src/canonical.rs)" +if printf '%s\n' "$fake_echo_wasm_abi_canonical_output" | grep -q -- '--test canonical_vectors'; then + pass "canonical ABI changes pull canonical vector coverage" +else + fail "canonical ABI changes should pull canonical vector coverage" + printf '%s\n' "$fake_echo_wasm_abi_canonical_output" +fi +if printf '%s\n' "$fake_echo_wasm_abi_canonical_output" | grep -q -- '--test non_canonical_floats'; then + pass "canonical ABI changes pull non-canonical float coverage" +else + fail "canonical ABI changes should pull non-canonical float coverage" + printf '%s\n' "$fake_echo_wasm_abi_canonical_output" +fi +if printf '%s\n' "$fake_echo_wasm_abi_canonical_output" | grep -q -- 'test -p echo-wasm-abi --lib'; then + fail "canonical ABI changes should not always force the lib smoke lane" + printf '%s\n' "$fake_echo_wasm_abi_canonical_output" +else + pass "canonical ABI changes avoid the generic lib smoke lane" +fi + +fake_warp_wasm_readme_output="$(run_fake_verify full crates/warp-wasm/README.md)" +if printf '%s\n' "$fake_warp_wasm_readme_output" | grep -q 'critical local gate (tooling-only)'; then + pass "non-rust critical crate docs stay off the Rust smoke lanes" +else + fail "non-rust critical crate docs should stay off the Rust smoke lanes" + printf '%s\n' "$fake_warp_wasm_readme_output" +fi +if printf '%s\n' "$fake_warp_wasm_readme_output" | grep -q 'tests-runtime'; then + fail "non-rust critical crate docs should not launch runtime smoke lanes" + printf '%s\n' "$fake_warp_wasm_readme_output" +else + pass "non-rust critical crate docs skip runtime smoke lanes" +fi + fake_tooling_output="$(run_fake_verify full scripts/verify-local.sh)" if printf '%s\n' "$fake_tooling_output" | grep -q 'critical local gate (tooling-only)'; then pass "tooling-only full verification uses the tooling-only scope" From a3f143a931791013017471b265d7ab2bda6913f5 Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 08:54:58 -0700 Subject: [PATCH 10/18] feat(tooling): add ultra-fast local lane --- CHANGELOG.md | 4 + Makefile | 5 +- scripts/hooks/README.md | 10 ++- scripts/verify-local.sh | 129 ++++++++++++++++++++++++++++++- tests/hooks/test_verify_local.sh | 68 ++++++++++++++++ 5 files changed, 210 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c4ea32da..556808a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,10 @@ `warp_kernel.rs` pulls the engine-enabled lane, canonical ABI work pulls only canonical/floating-point vectors, and non-Rust crate docs no longer wake Rust lanes at all. +- **Added** `make verify-ultra-fast` as the shortest local edit-loop lane: + changed Rust crates get `cargo check`, critical runtime surfaces still pull + targeted smoke tests, tooling-only changes stay on a syntax/smoke path, and + clippy/rustdoc/guard scans stay on heavier local paths and CI. - **Added** `make verify-full-sequential` as an explicit fallback when the lane runner itself needs debugging. diff --git a/Makefile b/Makefile index 5bf32d5a..0babe7a7 100644 --- a/Makefile +++ b/Makefile @@ -7,12 +7,15 @@ SHELL := /bin/bash PORT ?= 5173 BENCH_PORT ?= 8000 -.PHONY: hooks verify-fast verify-pr verify-full verify-full-sequential docs docs-build docs-ci +.PHONY: hooks verify-ultra-fast verify-fast verify-pr verify-full verify-full-sequential docs docs-build docs-ci hooks: @git config core.hooksPath .githooks @chmod +x .githooks/* 2>/dev/null || true @echo "[hooks] Installed git hooks from .githooks (core.hooksPath)" +verify-ultra-fast: + @./scripts/verify-local.sh ultra-fast + verify-fast: @./scripts/verify-local.sh fast diff --git a/scripts/hooks/README.md b/scripts/hooks/README.md index d3604827..31fa560a 100644 --- a/scripts/hooks/README.md +++ b/scripts/hooks/README.md @@ -15,7 +15,8 @@ older local workflows. Both [`scripts/hooks/pre-commit`](./pre-commit) and Authoritative behavior lives in `.githooks/pre-commit` and `.githooks/pre-push`. For explicit local runs outside git hooks, prefer the -`make verify-fast`, `make verify-pr`, and `make verify-full` entry points. +`make verify-ultra-fast`, `make verify-fast`, `make verify-pr`, and +`make verify-full` entry points. The local full gate now runs as curated parallel lanes with isolated `CARGO_TARGET_DIR`s, which keeps expensive cargo invocations from serializing on @@ -37,6 +38,13 @@ distinguishes plain lib work from `warp_kernel` engine work, `echo-wasm-abi` pulls targeted canonical/codec vectors when those surfaces move, and README-only or other non-Rust crate changes do not wake the Rust smoke lanes. +`make verify-ultra-fast` is now the shortest edit-loop lane. It stays +compile-first: Rust changes get `cargo check` on changed Rust crates plus the +same targeted critical smoke selection used by the full gate, while clippy, +rustdoc, guard scans, and exhaustive local proof stay on the heavier paths and +in CI. Tooling-only changes stay on a syntax/smoke path instead of inheriting +the full hook regression suite. + A successful `make verify-full` run still shares the same success stamp as the canonical pre-push full gate, so pushing the same `HEAD` does not rerun that identical full verification locally. The staged and reduced local Rust paths are diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh index 1399347c..a82555f3 100755 --- a/scripts/verify-local.sh +++ b/scripts/verify-local.sh @@ -269,11 +269,21 @@ list_changed_index_files() { git diff --cached --name-only --diff-filter=ACMRTUXBD } +list_changed_worktree_files() { + { + git diff --name-only --diff-filter=ACMRTUXBD HEAD + git ls-files --others --exclude-standard + } | awk 'NF' | sort -u +} + mode_context() { case "$1" in pre-commit|detect-pre-commit) printf 'pre-commit\n' ;; + fast|ultra-fast) + printf 'working-tree\n' + ;; *) printf '%s\n' "$1" ;; @@ -293,6 +303,11 @@ list_changed_files() { return fi + if [[ "$context" == "working-tree" ]]; then + list_changed_worktree_files + return + fi + list_changed_branch_files } @@ -395,6 +410,20 @@ list_changed_crates() { printf '%s\n' "$CHANGED_FILES" | sed -n 's#^crates/\([^/]*\)/.*#\1#p' | sort -u } +list_changed_rust_crates() { + local file crate + while IFS= read -r file; do + [[ -z "$file" ]] && continue + case "$file" in + crates/*/Cargo.toml|crates/*/build.rs|crates/*/src/*|crates/*/tests/*) + crate="$(printf '%s\n' "$file" | sed -n 's#^crates/\([^/]*\)/.*#\1#p')" + [[ -z "$crate" ]] && continue + printf '%s\n' "$crate" + ;; + esac + done <<< "${CHANGED_FILES}" | sort -u +} + list_changed_critical_crates() { local file crate while IFS= read -r file; do @@ -564,9 +593,9 @@ run_crate_lint_and_check() { } run_pre_commit_checks() { - mapfile -t changed_crates < <(list_changed_crates) + mapfile -t changed_crates < <(list_changed_rust_crates) if [[ ${#changed_crates[@]} -eq 0 ]]; then - echo "[verify-local] pre-commit: no staged crates detected" + echo "[verify-local] pre-commit: no staged Rust crates detected" return fi @@ -1027,6 +1056,14 @@ run_full_lane_hook_tests() { bash tests/hooks/test_verify_local.sh } +run_ultra_fast_tooling_smoke() { + if [[ "$FULL_SCOPE_HAS_TOOLING" != "1" ]]; then + return + fi + echo "[verify-local][ultra-fast] tooling smoke" + bash -n scripts/verify-local.sh tests/hooks/test_verify_local.sh +} + run_full_lane_guards() { run_pattern_guards run_spdx_check @@ -1091,6 +1128,87 @@ run_full_checks() { run_full_checks_sequential } +run_ultra_fast_smoke() { + prepare_full_scope + + echo "[verify-local] ultra-fast critical smoke (${FULL_SCOPE_MODE})" + + if [[ "$FULL_SCOPE_HAS_TOOLING" == "1" ]]; then + run_ultra_fast_tooling_smoke + fi + + if [[ "$FULL_SCOPE_RUN_WARP_CORE_SMOKE" == "1" ]]; then + echo "[verify-local][ultra-fast] warp-core smoke" + cargo +"$PINNED" test -p warp-core --lib + local warp_core_test_target + for warp_core_test_target in "${FULL_SCOPE_WARP_CORE_EXTRA_TESTS[@]}"; do + cargo +"$PINNED" test -p warp-core --test "$warp_core_test_target" + done + if [[ "$FULL_SCOPE_WARP_CORE_RUN_PRNG" == "1" ]]; then + cargo +"$PINNED" test -p warp-core --features golden_prng --test prng_golden_regression + fi + fi + + if [[ "$FULL_SCOPE_WARP_WASM_TEST_MODE" == "plain-lib" ]]; then + echo "[verify-local][ultra-fast] warp-wasm plain lib smoke" + cargo +"$PINNED" test -p warp-wasm --lib + fi + if [[ "$FULL_SCOPE_WARP_WASM_TEST_MODE" == "engine-lib" ]]; then + echo "[verify-local][ultra-fast] warp-wasm engine lib smoke" + cargo +"$PINNED" test -p warp-wasm --features engine --lib + fi + if [[ "$FULL_SCOPE_ECHO_WASM_ABI_RUN_LIB" == "1" ]]; then + echo "[verify-local][ultra-fast] echo-wasm-abi lib smoke" + cargo +"$PINNED" test -p echo-wasm-abi --lib + fi + local abi_test_target + for abi_test_target in "${FULL_SCOPE_ECHO_WASM_ABI_EXTRA_TESTS[@]}"; do + echo "[verify-local][ultra-fast] echo-wasm-abi --test ${abi_test_target}" + cargo +"$PINNED" test -p echo-wasm-abi --test "$abi_test_target" + done +} + +run_ultra_fast_checks() { + local classification="$1" + local -a changed_crates=() + + if [[ "$classification" == "docs" ]]; then + echo "[verify-local] ultra-fast docs-only change set" + run_docs_lint + return + fi + + mapfile -t changed_crates < <(list_changed_rust_crates) + if [[ ${#changed_crates[@]} -eq 0 ]]; then + if [[ "$classification" == "full" ]]; then + run_ultra_fast_smoke + return + fi + echo "[verify-local] ultra-fast: no changed Rust crates detected" + run_docs_lint + return + fi + + ensure_toolchain + echo "[verify-local] ultra-fast verification for changed Rust crates: ${changed_crates[*]}" + echo "[verify-local] cargo fmt --all -- --check" + cargo +"$PINNED" fmt --all -- --check + + local crate + for crate in "${changed_crates[@]}"; do + if [[ ! -f "crates/${crate}/Cargo.toml" ]]; then + echo "[verify-local] skipping ${crate}: missing crates/${crate}/Cargo.toml" >&2 + continue + fi + echo "[verify-local] cargo check -p ${crate}" + cargo +"$PINNED" check -p "$crate" --quiet + done + + if [[ "$classification" == "full" ]]; then + run_ultra_fast_smoke + fi +} + run_auto_mode() { local classification="$1" local suite @@ -1147,9 +1265,12 @@ case "$MODE" in printf 'changed_crates=%s\n' "$(list_changed_crates | paste -sd, -)" ;; fast) - mapfile -t changed_crates < <(list_changed_crates) + mapfile -t changed_crates < <(list_changed_rust_crates) run_targeted_checks "${changed_crates[@]}" ;; + ultra-fast) + run_ultra_fast_checks "$CLASSIFICATION" + ;; pre-commit) if should_skip_via_stamp "$(stamp_suite_for_classification "$CLASSIFICATION")"; then echo "[verify-local] reusing cached pre-commit verification for index $(printf '%.12s' "$VERIFY_STAMP_SUBJECT")" @@ -1170,7 +1291,7 @@ case "$MODE" in write_stamp "full" ;; *) - echo "usage: scripts/verify-local.sh [detect|detect-pre-commit|fast|pre-commit|pr|full|auto|pre-push]" >&2 + echo "usage: scripts/verify-local.sh [detect|detect-pre-commit|ultra-fast|fast|pre-commit|pr|full|auto|pre-push]" >&2 exit 1 ;; esac diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh index 94df1093..244ea7ad 100755 --- a/tests/hooks/test_verify_local.sh +++ b/tests/hooks/test_verify_local.sh @@ -362,6 +362,12 @@ else printf '%s\n' "$coverage_output" fi +if grep -q '^verify-ultra-fast:' Makefile; then + pass "Makefile exposes an ultra-fast edit-loop lane" +else + fail "Makefile should expose verify-ultra-fast for the shortest local loop" +fi + if grep -q '^verify-full-sequential:' Makefile; then pass "Makefile exposes a sequential fallback for the parallel full verifier" else @@ -414,6 +420,68 @@ else printf '%s\n' "$fake_fast_output" fi +fake_ultra_fast_output="$(run_fake_verify ultra-fast crates/warp-core/src/coordinator.rs)" +if printf '%s\n' "$fake_ultra_fast_output" | grep -q 'ultra-fast verification for changed Rust crates: warp-core'; then + pass "ultra-fast reports the narrowed changed Rust crate set" +else + fail "ultra-fast should report the narrowed changed Rust crate set" + printf '%s\n' "$fake_ultra_fast_output" +fi +if printf '%s\n' "$fake_ultra_fast_output" | grep -q 'cargo check -p warp-core'; then + pass "ultra-fast runs cargo check on changed Rust crates" +else + fail "ultra-fast should run cargo check on changed Rust crates" + printf '%s\n' "$fake_ultra_fast_output" +fi +if printf '%s\n' "$fake_ultra_fast_output" | grep -q -- '--test inbox'; then + pass "ultra-fast still pulls targeted runtime smoke for critical warp-core changes" +else + fail "ultra-fast should keep targeted runtime smoke for critical warp-core changes" + printf '%s\n' "$fake_ultra_fast_output" +fi +if printf '%s\n' "$fake_ultra_fast_output" | grep -q 'clippy -p warp-core'; then + fail "ultra-fast should skip clippy to stay compile-first" + printf '%s\n' "$fake_ultra_fast_output" +else + pass "ultra-fast skips clippy" +fi +if printf '%s\n' "$fake_ultra_fast_output" | grep -q 'doc -p warp-core'; then + fail "ultra-fast should skip rustdoc gates" + printf '%s\n' "$fake_ultra_fast_output" +else + pass "ultra-fast skips rustdoc gates" +fi + +fake_ultra_fast_warp_wasm_output="$(run_fake_verify ultra-fast crates/warp-wasm/src/warp_kernel.rs)" +if printf '%s\n' "$fake_ultra_fast_warp_wasm_output" | grep -q -- 'test -p warp-wasm --features engine --lib'; then + pass "ultra-fast preserves warp-wasm engine smoke selection" +else + fail "ultra-fast should preserve warp-wasm engine smoke selection" + printf '%s\n' "$fake_ultra_fast_warp_wasm_output" +fi + +fake_ultra_fast_readme_output="$(run_fake_verify ultra-fast crates/warp-wasm/README.md)" +if printf '%s\n' "$fake_ultra_fast_readme_output" | grep -q 'cargo check -p warp-wasm'; then + fail "ultra-fast should not wake Rust cargo for non-Rust critical crate docs" + printf '%s\n' "$fake_ultra_fast_readme_output" +else + pass "ultra-fast keeps non-Rust critical crate docs off Rust cargo" +fi + +fake_ultra_fast_tooling_output="$(run_fake_verify ultra-fast scripts/verify-local.sh)" +if printf '%s\n' "$fake_ultra_fast_tooling_output" | grep -q '\[verify-local\]\[ultra-fast\] tooling smoke'; then + pass "ultra-fast tooling changes stay on the tooling smoke lane" +else + fail "ultra-fast tooling changes should stay on the tooling smoke lane" + printf '%s\n' "$fake_ultra_fast_tooling_output" +fi +if printf '%s\n' "$fake_ultra_fast_tooling_output" | grep -q 'hook regression coverage'; then + fail "ultra-fast tooling changes should not inherit the full hook regression suite" + printf '%s\n' "$fake_ultra_fast_tooling_output" +else + pass "ultra-fast tooling changes avoid the full hook regression suite" +fi + fake_warp_core_default_output="$(run_fake_verify full crates/warp-core/src/provenance_store.rs)" if printf '%s\n' "$fake_warp_core_default_output" | grep -q 'test -p warp-core --lib'; then pass "warp-core default smoke keeps the lib test lane" From 1734b737b23b7f94a9dd18d4a1bf8d88b1b1e0f0 Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 12:51:10 -0700 Subject: [PATCH 11/18] fix(warp-core): tighten fork and legacy drain semantics --- crates/warp-core/src/provenance_store.rs | 62 ++++++++++++++++- crates/warp-wasm/src/warp_kernel.rs | 84 +++++++++++++++++++----- scripts/verify-local.sh | 25 ++++++- tests/hooks/test_verify_local.sh | 22 ++++++- 4 files changed, 172 insertions(+), 21 deletions(-) diff --git a/crates/warp-core/src/provenance_store.rs b/crates/warp-core/src/provenance_store.rs index c880def5..b82bc331 100644 --- a/crates/warp-core/src/provenance_store.rs +++ b/crates/warp-core/src/provenance_store.rs @@ -758,7 +758,11 @@ impl LocalProvenanceStore { let new_history = WorldlineHistory { u0_ref: source_history.u0_ref, initial_boundary_hash: source_history.initial_boundary_hash, - entries: source_history.entries[..end_idx].to_vec(), + entries: source_history.entries[..end_idx] + .iter() + .cloned() + .map(|entry| Self::rewrite_entry_for_fork(entry, source, new_id)) + .collect(), checkpoints: source_history .checkpoints .iter() @@ -770,6 +774,25 @@ impl LocalProvenanceStore { Ok(()) } + fn rewrite_entry_for_fork( + mut entry: ProvenanceEntry, + source: WorldlineId, + new_id: WorldlineId, + ) -> ProvenanceEntry { + entry.worldline_id = new_id; + if let Some(head_key) = entry.head_key.as_mut() { + if head_key.worldline_id == source { + head_key.worldline_id = new_id; + } + } + for parent in &mut entry.parents { + if parent.worldline_id == source { + parent.worldline_id = new_id; + } + } + entry + } + /// Returns the initial boundary hash registered for this worldline. /// /// # Errors @@ -1472,10 +1495,37 @@ mod tests { store.fork(source, 0, target).unwrap(); assert_eq!(store.len(target).unwrap(), 1); - assert_eq!(store.entry(target, 0).unwrap().expected, test_triplet(0)); + let forked_entry = store.entry(target, 0).unwrap(); + assert_eq!(forked_entry.worldline_id, target); + assert_eq!(forked_entry.head_key.unwrap().worldline_id, target); + assert_eq!(forked_entry.expected, test_triplet(0)); assert!(store.checkpoint_before(target, 1).is_none()); } + #[test] + fn fork_rewrites_same_worldline_parent_refs_to_target_worldline() { + let mut store = LocalProvenanceStore::new(); + let source = test_worldline_id(); + let target = WorldlineId([99u8; 32]); + let warp = test_warp_id(); + + store.register_worldline(source, warp).unwrap(); + store.append_local_commit(test_entry(0)).unwrap(); + store.append_local_commit(test_entry(1)).unwrap(); + + store.fork(source, 1, target).unwrap(); + + let forked_entry = store.entry(target, 1).unwrap(); + assert_eq!( + forked_entry.parents, + vec![ProvenanceRef { + worldline_id: target, + worldline_tick: 0, + commit_hash: test_triplet(0).commit_hash, + }] + ); + } + #[test] fn append_with_writes_stores_atom_writes() { let mut store = LocalProvenanceStore::new(); @@ -1639,8 +1689,14 @@ mod tests { service.fork(source, 0, target).unwrap(); assert_eq!(service.len(target).unwrap(), 1); - assert_eq!(service.entry(target, 0).unwrap().expected, test_triplet(0)); + let forked_entry = service.entry(target, 0).unwrap(); + assert_eq!(forked_entry.worldline_id, target); + assert_eq!(forked_entry.head_key.unwrap().worldline_id, target); + assert_eq!(forked_entry.expected, test_triplet(0)); assert!(service.checkpoint_before(target, 1).is_none()); + service + .build_btr(target, 0, 1, 7, b"auth".to_vec()) + .unwrap(); } #[test] diff --git a/crates/warp-wasm/src/warp_kernel.rs b/crates/warp-wasm/src/warp_kernel.rs index 1211cb9e..c361c58a 100644 --- a/crates/warp-wasm/src/warp_kernel.rs +++ b/crates/warp-wasm/src/warp_kernel.rs @@ -400,28 +400,40 @@ impl KernelPort for WarpKernel { Err(err) => return Err(err), }; - if self.last_drained_commit_tick == Some(artifact.resolved.resolved_tick) { + let latest_commit_tick = artifact.resolved.resolved_tick; + if self.last_drained_commit_tick == Some(latest_commit_tick) { return Ok(DrainResponse { channels: Vec::new(), }); } - self.last_drained_commit_tick = Some(artifact.resolved.resolved_tick); - - let channels = match artifact.payload { - ObservationPayload::TruthChannels(channels) => channels - .into_iter() - .map(|(channel, data)| ChannelData { - channel_id: channel.0.to_vec(), - data, - }) - .collect(), - _ => { + let start_tick = self.last_drained_commit_tick.map_or(0, |tick| tick + 1); + let mut channels = Vec::new(); + for tick in start_tick..=latest_commit_tick { + let artifact = self.observe_core(ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id: self.default_worldline, + at: ObservationAt::Tick(tick), + }, + frame: ObservationFrame::RecordedTruth, + projection: ObservationProjection::TruthChannels { channels: None }, + })?; + + let ObservationPayload::TruthChannels(tick_channels) = artifact.payload else { return Err(AbiError { code: error_codes::ENGINE_ERROR, message: "observe returned non-truth payload for drain adapter".into(), - }) - } - }; + }); + }; + channels.extend( + tick_channels + .into_iter() + .map(|(channel, data)| ChannelData { + channel_id: channel.0.to_vec(), + data, + }), + ); + } + self.last_drained_commit_tick = Some(latest_commit_tick); Ok(DrainResponse { channels }) } @@ -487,6 +499,7 @@ mod tests { }, pack_intent_v1, }; + use warp_core::{materialization::make_channel_id, ProvenanceStore}; #[test] fn new_kernel_has_zero_tick() { @@ -720,6 +733,47 @@ mod tests { assert_eq!(head_before, head_after); } + #[test] + fn drain_view_ops_returns_all_committed_outputs_since_last_drain() { + let mut kernel = WarpKernel::new().unwrap(); + let intent_a = pack_intent_v1(1, b"hello").unwrap(); + let intent_b = pack_intent_v1(2, b"world").unwrap(); + kernel.dispatch_intent(&intent_a).unwrap(); + kernel.step(1).unwrap(); + kernel.dispatch_intent(&intent_b).unwrap(); + kernel.step(1).unwrap(); + + let worldline_id = kernel.default_worldline; + let frontier_state = kernel + .runtime + .worldlines() + .get(&worldline_id) + .unwrap() + .state(); + let mut provenance = ProvenanceService::new(); + provenance + .register_worldline(worldline_id, frontier_state) + .unwrap(); + + for tick in 0..kernel.provenance.len(worldline_id).unwrap() { + let mut entry = kernel.provenance.entry(worldline_id, tick).unwrap(); + entry.outputs = vec![( + make_channel_id("test:truth"), + format!("tick-{tick}").into_bytes(), + )]; + provenance.append_local_commit(entry).unwrap(); + } + kernel.provenance = provenance; + + let drain = kernel.drain_view_ops().unwrap(); + assert_eq!(drain.channels.len(), 2); + assert_eq!(drain.channels[0].data, b"tick-0".to_vec()); + assert_eq!(drain.channels[1].data, b"tick-1".to_vec()); + + let drain_again = kernel.drain_view_ops().unwrap(); + assert!(drain_again.channels.is_empty()); + } + #[test] fn registry_info_has_abi_version() { let kernel = WarpKernel::new().unwrap(); diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh index a82555f3..79b8087a 100755 --- a/scripts/verify-local.sh +++ b/scripts/verify-local.sh @@ -424,6 +424,20 @@ list_changed_rust_crates() { done <<< "${CHANGED_FILES}" | sort -u } +list_changed_tooling_shell_files() { + local file + while IFS= read -r file; do + [[ -z "$file" ]] && continue + case "$file" in + .githooks/*|scripts/*.sh|scripts/hooks/*|tests/hooks/*.sh) + if [[ -f "$file" ]]; then + printf '%s\n' "$file" + fi + ;; + esac + done <<< "${CHANGED_FILES}" | sort -u +} + list_changed_critical_crates() { local file crate while IFS= read -r file; do @@ -1061,7 +1075,16 @@ run_ultra_fast_tooling_smoke() { return fi echo "[verify-local][ultra-fast] tooling smoke" - bash -n scripts/verify-local.sh tests/hooks/test_verify_local.sh + mapfile -t shell_files < <(list_changed_tooling_shell_files) + if [[ ${#shell_files[@]} -eq 0 ]]; then + echo "[verify-local][ultra-fast] no changed shell tooling files" + return + fi + local file + for file in "${shell_files[@]}"; do + echo "[verify-local][ultra-fast] bash -n ${file}" + bash -n "$file" + done } run_full_lane_guards() { diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh index 244ea7ad..eddab1fe 100755 --- a/tests/hooks/test_verify_local.sh +++ b/tests/hooks/test_verify_local.sh @@ -85,7 +85,7 @@ run_fake_verify() { local tmp tmp="$(mktemp -d)" - mkdir -p "$tmp/scripts" "$tmp/bin" "$tmp/.git" "$tmp/tests/hooks" + mkdir -p "$tmp/scripts/hooks" "$tmp/bin" "$tmp/.git" "$tmp/.githooks" "$tmp/tests/hooks" mkdir -p "$tmp/crates/warp-core/src" cp scripts/verify-local.sh "$tmp/scripts/verify-local.sh" chmod +x "$tmp/scripts/verify-local.sh" @@ -148,7 +148,17 @@ EOF set -euo pipefail echo "fake hook coverage" EOF - chmod +x "$tmp/tests/hooks/test_verify_local.sh" + cat >"$tmp/.githooks/pre-push" <<'EOF' +#!/usr/bin/env bash +set -euo pipefail +echo "fake canonical pre-push" +EOF + cat >"$tmp/scripts/hooks/pre-commit" <<'EOF' +#!/usr/bin/env bash +set -euo pipefail +echo "fake legacy pre-commit shim" +EOF + chmod +x "$tmp/tests/hooks/test_verify_local.sh" "$tmp/.githooks/pre-push" "$tmp/scripts/hooks/pre-commit" local changed changed="$(mktemp)" @@ -482,6 +492,14 @@ else pass "ultra-fast tooling changes avoid the full hook regression suite" fi +fake_ultra_fast_hook_output="$(run_fake_verify ultra-fast .githooks/pre-push)" +if printf '%s\n' "$fake_ultra_fast_hook_output" | grep -q '\[verify-local\]\[ultra-fast\] bash -n \.githooks/pre-push'; then + pass "ultra-fast syntax-checks changed canonical hook entrypoints" +else + fail "ultra-fast should syntax-check changed canonical hook entrypoints" + printf '%s\n' "$fake_ultra_fast_hook_output" +fi + fake_warp_core_default_output="$(run_fake_verify full crates/warp-core/src/provenance_store.rs)" if printf '%s\n' "$fake_warp_core_default_output" | grep -q 'test -p warp-core --lib'; then pass "warp-core default smoke keeps the lib test lane" From f43db401ab3036e460df0df7abc2d324a814129f Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 15:40:08 -0700 Subject: [PATCH 12/18] fix(warp-core): resolve observation review issues --- crates/ttd-browser/src/lib.rs | 35 ++++-------- crates/warp-core/src/coordinator.rs | 62 +++++++++++++++++++--- crates/warp-core/src/observation.rs | 15 +++++- crates/warp-core/src/playback.rs | 5 +- crates/warp-core/src/provenance_store.rs | 59 ++++++++++++++++++++ crates/warp-core/src/worldline_registry.rs | 8 +++ crates/warp-core/tests/inbox.rs | 12 ++--- crates/warp-wasm/src/warp_kernel.rs | 45 +++++++++++++--- 8 files changed, 191 insertions(+), 50 deletions(-) diff --git a/crates/ttd-browser/src/lib.rs b/crates/ttd-browser/src/lib.rs index 35628023..50a43a9e 100644 --- a/crates/ttd-browser/src/lib.rs +++ b/crates/ttd-browser/src/lib.rs @@ -1384,7 +1384,7 @@ mod tests { Vec::new(), expected, patch, - outputs, + outputs.clone(), Vec::new(), ); @@ -1396,29 +1396,16 @@ mod tests { let tx_id = engine.begin(cursor_id).unwrap(); let receipt_bytes = engine.commit_inner(tx_id).unwrap(); + let (frame, consumed) = echo_session_proto::decode_ttdr_v2(&receipt_bytes).unwrap(); + assert_eq!(consumed, receipt_bytes.len()); - // Parse receipt to check emissions_digest. - // TTDR v2 header starts with magic "TTDR" (4 bytes) + version (2 bytes) + flags (2 bytes) - // emissions_digest is at offset 104 in the header (v2): - // magic(4) + ver(2) + flags(2) + schema(32) + wl(32) + tick(8) + commit(32) + state(32) + patch(32) = 176? - // Let's check echo-session-proto for the offset. - - // Actually, let's just assert the receipt is non-empty and trust the logic, - // or check that it's NOT all zeros at the expected position. - // Header: - // version: 2 - // flags: 2 - // schema_hash: 32 - // worldline_id: 32 - // tick: 8 - // commit_hash: 32 - // state_root: 32 - // patch_digest: 32 - // emissions_digest: 32 <-- offset = 2 + 2 + 32 + 32 + 8 + 32 + 32 + 32 = 172 - // Wait, TTDR v2 frame encoding might be CBOR or raw. - // echo-session-proto says it's a TtdrFrame struct. - - assert!(!receipt_bytes.is_empty()); - // If we want to be sure, we'd need to decode it. + let finalized_channels: Vec = outputs + .into_iter() + .map(|(channel, data)| FinalizedChannel { channel, data }) + .collect(); + assert_eq!( + frame.header.emissions_digest, + compute_emissions_digest(&finalized_channels) + ); } } diff --git a/crates/warp-core/src/coordinator.rs b/crates/warp-core/src/coordinator.rs index 97adfa1c..8db4b24e 100644 --- a/crates/warp-core/src/coordinator.rs +++ b/crates/warp-core/src/coordinator.rs @@ -14,10 +14,12 @@ use crate::engine_impl::{CommitOutcome, Engine, EngineError}; use crate::head::{PlaybackHeadRegistry, RunnableWriterSet, WriterHead, WriterHeadKey}; use crate::head_inbox::{InboxAddress, InboxIngestResult, IngressEnvelope, IngressTarget}; use crate::ident::Hash; -use crate::provenance_store::{HistoryError, ProvenanceEntry, ProvenanceService, ProvenanceStore}; +use crate::provenance_store::{ + HistoryError, ProvenanceCheckpoint, ProvenanceEntry, ProvenanceService, ProvenanceStore, +}; use crate::worldline::WorldlineId; use crate::worldline_registry::WorldlineRegistry; -use crate::worldline_state::WorldlineState; +use crate::worldline_state::{WorldlineFrontier, WorldlineState}; // ============================================================================= // Runtime Errors and Ingress Disposition @@ -120,6 +122,13 @@ pub struct WorldlineRuntime { public_inboxes: BTreeMap>, } +#[derive(Clone, Debug)] +struct RuntimeCheckpoint { + global_tick: u64, + heads: BTreeMap, + frontiers: BTreeMap, +} + impl WorldlineRuntime { /// Creates an empty runtime. #[must_use] @@ -132,6 +141,42 @@ impl WorldlineRuntime { self.runnable.rebuild(&self.heads); } + fn checkpoint_for(&self, keys: &[WriterHeadKey]) -> Result { + let mut heads = BTreeMap::new(); + let mut frontiers = BTreeMap::new(); + + for key in keys { + let head = self.heads.get(key).ok_or(RuntimeError::UnknownHead(*key))?; + heads.insert(*key, head.clone()); + if let std::collections::btree_map::Entry::Vacant(slot) = + frontiers.entry(key.worldline_id) + { + let frontier = self + .worldlines + .get(&key.worldline_id) + .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))?; + slot.insert(frontier.clone()); + } + } + + Ok(RuntimeCheckpoint { + global_tick: self.global_tick, + heads, + frontiers, + }) + } + + fn restore(&mut self, checkpoint: RuntimeCheckpoint) { + self.global_tick = checkpoint.global_tick; + for head in checkpoint.heads.into_values() { + self.heads.insert(head); + } + for frontier in checkpoint.frontiers.into_values() { + self.worldlines.replace_frontier(frontier); + } + self.refresh_runnable(); + } + /// Returns the registered worldline frontiers. #[must_use] pub fn worldlines(&self) -> &WorldlineRegistry { @@ -350,8 +395,9 @@ impl SchedulerCoordinator { } } - let runtime_before = runtime.clone(); - let provenance_before = provenance.clone(); + let runtime_before = runtime.checkpoint_for(&keys)?; + let provenance_before: ProvenanceCheckpoint = + provenance.checkpoint_for(keys.iter().map(|key| key.worldline_id))?; for key in &keys { let admitted = runtime @@ -450,13 +496,13 @@ impl SchedulerCoordinator { let record = match outcome { Ok(Ok(record)) => record, Ok(Err(err)) => { - *runtime = runtime_before; - *provenance = provenance_before; + runtime.restore(runtime_before); + provenance.restore(&provenance_before); return Err(err); } Err(payload) => { - *runtime = runtime_before; - *provenance = provenance_before; + runtime.restore(runtime_before); + provenance.restore(&provenance_before); resume_unwind(payload); } }; diff --git a/crates/warp-core/src/observation.rs b/crates/warp-core/src/observation.rs index a13ffca1..ad56bb55 100644 --- a/crates/warp-core/src/observation.rs +++ b/crates/warp-core/src/observation.rs @@ -90,6 +90,11 @@ pub enum ObservationProjectionKind { } impl ObservationProjectionKind { + /// Converts a validated internal projection into the ABI form. + /// + /// This helper is only valid when `self` and `projection` are matching + /// variants. Reaching the fallback arm is a programmer error in the + /// observation service, not a recoverable runtime condition. fn to_abi(self, projection: &ObservationProjection) -> abi::ObservationProjection { match (self, projection) { (Self::Head, ObservationProjection::Head) => abi::ObservationProjection::Head, @@ -115,7 +120,15 @@ impl ObservationProjectionKind { query_id: *query_id, vars_bytes: vars_bytes.clone(), }, - _ => unreachable!("projection kind and projection must agree"), + _ => { + debug_assert!( + false, + "ObservationProjectionKind::to_abi requires matching kind/projection variants" + ); + unreachable!( + "ObservationProjectionKind::to_abi requires matching kind/projection variants" + ) + } } } } diff --git a/crates/warp-core/src/playback.rs b/crates/warp-core/src/playback.rs index 41e71109..8931155e 100644 --- a/crates/warp-core/src/playback.rs +++ b/crates/warp-core/src/playback.rs @@ -461,9 +461,8 @@ impl PlaybackCursor { .patch .ok_or(SeekError::HistoryUnavailable { tick: patch_tick })?; let expected = entry.expected; - let parents = provenance - .parents(self.worldline_id, patch_tick) - .map_err(|_| SeekError::HistoryUnavailable { tick: patch_tick })? + let parents = entry + .parents .into_iter() .map(|parent| parent.commit_hash) .collect::>(); diff --git a/crates/warp-core/src/provenance_store.rs b/crates/warp-core/src/provenance_store.rs index b82bc331..fea4d5e1 100644 --- a/crates/warp-core/src/provenance_store.rs +++ b/crates/warp-core/src/provenance_store.rs @@ -483,6 +483,18 @@ struct WorldlineHistory { checkpoints: Vec, } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +struct ProvenanceWorldlineCheckpoint { + entry_len: usize, + checkpoint_len: usize, +} + +/// Lightweight rollback marker for touched provenance worldlines. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ProvenanceCheckpoint { + worldlines: BTreeMap, +} + /// In-memory provenance store backed by `Vec`s. #[derive(Debug, Clone, Default)] pub struct LocalProvenanceStore { @@ -810,6 +822,35 @@ impl LocalProvenanceStore { pub fn tip_ref(&self, w: WorldlineId) -> Result, HistoryError> { Ok(self.history(w)?.entries.last().map(ProvenanceEntry::as_ref)) } + + fn checkpoint_for(&self, worldline_ids: I) -> Result + where + I: IntoIterator, + { + let mut worldlines = BTreeMap::new(); + for worldline_id in worldline_ids { + let history = self.history(worldline_id)?; + worldlines.insert( + worldline_id, + ProvenanceWorldlineCheckpoint { + entry_len: history.entries.len(), + checkpoint_len: history.checkpoints.len(), + }, + ); + } + Ok(ProvenanceCheckpoint { worldlines }) + } + + fn restore(&mut self, checkpoint: &ProvenanceCheckpoint) { + for (worldline_id, marker) in &checkpoint.worldlines { + if let Some(history) = self.worldlines.get_mut(worldline_id) { + history.entries.truncate(marker.entry_len); + history.checkpoints.truncate(marker.checkpoint_len); + } else { + debug_assert!(false, "provenance checkpoint referenced unknown worldline"); + } + } + } } impl ProvenanceStore for LocalProvenanceStore { @@ -953,6 +994,24 @@ impl ProvenanceService { self.store.tip_ref(worldline_id) } + /// Creates a lightweight rollback checkpoint for touched worldlines. + /// + /// # Errors + /// + /// Returns [`HistoryError::WorldlineNotFound`] if any listed worldline has + /// not been registered. + pub fn checkpoint_for(&self, worldline_ids: I) -> Result + where + I: IntoIterator, + { + self.store.checkpoint_for(worldline_ids) + } + + /// Restores touched worldlines to a previously captured rollback checkpoint. + pub fn restore(&mut self, checkpoint: &ProvenanceCheckpoint) { + self.store.restore(checkpoint); + } + /// Builds a contiguous BTR from the registered provenance history. /// /// # Errors diff --git a/crates/warp-core/src/worldline_registry.rs b/crates/warp-core/src/worldline_registry.rs index 180f0141..84ce1359 100644 --- a/crates/warp-core/src/worldline_registry.rs +++ b/crates/warp-core/src/worldline_registry.rs @@ -91,6 +91,14 @@ impl WorldlineRegistry { self.worldlines.get_mut(worldline_id) } + /// Replaces the stored frontier for a worldline. + pub(crate) fn replace_frontier( + &mut self, + frontier: WorldlineFrontier, + ) -> Option { + self.worldlines.insert(frontier.worldline_id(), frontier) + } + /// Returns the number of registered worldlines. #[must_use] pub fn len(&self) -> usize { diff --git a/crates/warp-core/tests/inbox.rs b/crates/warp-core/tests/inbox.rs index fccf5019..aec6dad0 100644 --- a/crates/warp-core/tests/inbox.rs +++ b/crates/warp-core/tests/inbox.rs @@ -62,7 +62,7 @@ fn runtime_store(runtime: &WorldlineRuntime, worldline_id: WorldlineId) -> &Grap .unwrap() } -fn mirrored_provenance(runtime: &WorldlineRuntime) -> ProvenanceService { +fn registered_worldlines_provenance(runtime: &WorldlineRuntime) -> ProvenanceService { let mut provenance = ProvenanceService::new(); for (worldline_id, frontier) in runtime.worldlines().iter() { provenance @@ -95,7 +95,7 @@ fn runtime_ingest_commits_without_legacy_graph_inbox_nodes() { } ); - let mut provenance = mirrored_provenance(&runtime); + let mut provenance = registered_worldlines_provenance(&runtime); let records = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); @@ -141,7 +141,7 @@ fn runtime_ingest_is_idempotent_per_resolved_head_after_commit() { head_key: default_key, } ); - let mut provenance = mirrored_provenance(&runtime); + let mut provenance = registered_worldlines_provenance(&runtime); SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!( @@ -193,7 +193,7 @@ fn runtime_ingest_keeps_distinct_intents_as_distinct_event_nodes() { runtime.ingest(intent_a.clone()).unwrap(); runtime.ingest(intent_b.clone()).unwrap(); - let mut provenance = mirrored_provenance(&runtime); + let mut provenance = registered_worldlines_provenance(&runtime); let records = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); @@ -222,7 +222,7 @@ fn runtime_commit_patch_replays_to_post_state() { )) .unwrap(); - let mut provenance = mirrored_provenance(&runtime); + let mut provenance = registered_worldlines_provenance(&runtime); let records = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); @@ -260,7 +260,7 @@ fn runtime_commit_provenance_matches_worldline_state_mirror() { )) .unwrap(); - let mut provenance = mirrored_provenance(&runtime); + let mut provenance = registered_worldlines_provenance(&runtime); let records = SchedulerCoordinator::super_tick(&mut runtime, &mut provenance, &mut engine).unwrap(); assert_eq!(records.len(), 1); diff --git a/crates/warp-wasm/src/warp_kernel.rs b/crates/warp-wasm/src/warp_kernel.rs index c361c58a..e058daa3 100644 --- a/crates/warp-wasm/src/warp_kernel.rs +++ b/crates/warp-wasm/src/warp_kernel.rs @@ -295,6 +295,16 @@ impl WarpKernel { message: e.to_string(), }) } + + fn map_legacy_snapshot_error(err: AbiError) -> AbiError { + if err.code == error_codes::INVALID_TICK { + return AbiError { + code: error_codes::LEGACY_INVALID_TICK, + message: err.message, + }; + } + err + } } impl KernelPort for WarpKernel { @@ -471,14 +481,17 @@ impl KernelPort for WarpKernel { } fn snapshot_at(&mut self, tick: u64) -> Result, AbiError> { - Self::snapshot_bytes_from_observation(self.observe_core(ObservationRequest { - coordinate: ObservationCoordinate { - worldline_id: self.default_worldline, - at: ObservationAt::Tick(tick), - }, - frame: ObservationFrame::CommitBoundary, - projection: ObservationProjection::Snapshot, - })?) + let artifact = self + .observe_core(ObservationRequest { + coordinate: ObservationCoordinate { + worldline_id: self.default_worldline, + at: ObservationAt::Tick(tick), + }, + frame: ObservationFrame::CommitBoundary, + projection: ObservationProjection::Snapshot, + }) + .map_err(Self::map_legacy_snapshot_error)?; + Self::snapshot_bytes_from_observation(artifact) } fn registry_info(&self) -> RegistryInfo { @@ -601,6 +614,22 @@ mod tests { fn snapshot_at_invalid_tick_returns_error() { let mut kernel = WarpKernel::new().unwrap(); let err = kernel.snapshot_at(999).unwrap_err(); + assert_eq!(err.code, error_codes::LEGACY_INVALID_TICK); + } + + #[test] + fn observe_invalid_tick_returns_observation_error_code() { + let kernel = WarpKernel::new().unwrap(); + let err = kernel + .observe(AbiObservationRequest { + coordinate: AbiObservationCoordinate { + worldline_id: kernel.default_worldline.0.to_vec(), + at: AbiObservationAt::Tick { tick: 999 }, + }, + frame: AbiObservationFrame::CommitBoundary, + projection: AbiObservationProjection::Snapshot, + }) + .unwrap_err(); assert_eq!(err.code, error_codes::INVALID_TICK); } From 94695f7a9098f8f8d389ea7ce6f0a93ce4d77856 Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 15:40:55 -0700 Subject: [PATCH 13/18] fix(tooling): harden local verifier edge cases --- scripts/verify-local.sh | 77 +++++++++++++++++++++++++++----- tests/hooks/test_verify_local.sh | 14 +++++- 2 files changed, 80 insertions(+), 11 deletions(-) diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh index 79b8087a..f927bdcb 100755 --- a/scripts/verify-local.sh +++ b/scripts/verify-local.sh @@ -349,7 +349,7 @@ append_unique() { local value="$1" local array_name="$2" local -n array_ref="$array_name" - if ! array_contains "$value" "${array_ref[@]}"; then + if ! array_contains "$value" ${array_ref[@]+"${array_ref[@]}"}; then array_ref+=("$value") fi } @@ -446,7 +446,7 @@ list_changed_critical_crates() { crates/*/Cargo.toml|crates/*/build.rs|crates/*/src/*|crates/*/tests/*) crate="$(printf '%s\n' "$file" | sed -n 's#^crates/\([^/]*\)/.*#\1#p')" [[ -z "$crate" ]] && continue - if array_contains "$crate" "${FULL_LOCAL_PACKAGES[@]}"; then + if array_contains "$crate" ${FULL_LOCAL_PACKAGES[@]+"${FULL_LOCAL_PACKAGES[@]}"}; then printf '%s\n' "$crate" fi ;; @@ -638,7 +638,18 @@ lane_cargo() { } should_run_parallel_lanes() { - [[ "$VERIFY_LANE_MODE" == "parallel" ]] + case "$VERIFY_LANE_MODE" in + parallel) + return 0 + ;; + sequential|serial) + return 1 + ;; + *) + echo "[verify-local] invalid VERIFY_LANE_MODE: $VERIFY_LANE_MODE" >&2 + exit 1 + ;; + esac } run_parallel_lanes() { @@ -652,6 +663,19 @@ run_parallel_lanes() { local -a lane_pids=() local i + cleanup_parallel_lanes() { + local pid + for pid in "${lane_pids[@]}"; do + kill "$pid" 2>/dev/null || true + done + for pid in "${lane_pids[@]}"; do + wait "$pid" 2>/dev/null || true + done + rm -rf "$logdir" + } + + trap 'cleanup_parallel_lanes; trap - INT TERM; exit 130' INT TERM + while [[ $# -gt 0 ]]; do lane_names+=("$1") lane_funcs+=("$2") @@ -691,16 +715,39 @@ run_parallel_lanes() { echo "--- ${lane_names[$i]} ---" >&2 cat "$logfile" >&2 done + trap - INT TERM rm -rf "$logdir" exit 1 fi + trap - INT TERM rm -rf "$logdir" } crate_supports_lib_target() { local crate="$1" - [[ "$crate" != "xtask" ]] + local crate_dir="crates/${crate}" + local manifest="${crate_dir}/Cargo.toml" + + if [[ ! -f "$manifest" ]]; then + return 1 + fi + + [[ -f "${crate_dir}/src/lib.rs" ]] && return 0 + grep -Eq '^\[lib\]' "$manifest" +} + +crate_supports_bin_target() { + local crate="$1" + local crate_dir="crates/${crate}" + local manifest="${crate_dir}/Cargo.toml" + + if [[ ! -f "$manifest" ]]; then + return 1 + fi + + [[ -f "${crate_dir}/src/main.rs" ]] && return 0 + grep -Eq '^\[\[bin\]\]' "$manifest" } crate_is_fast_clippy_lib_only() { @@ -728,8 +775,16 @@ clippy_target_args_for_scope() { return fi - printf '%s\n' "--lib" - if ! crate_is_fast_clippy_lib_only "$crate"; then + if crate_supports_lib_target "$crate"; then + printf '%s\n' "--lib" + elif crate_supports_bin_target "$crate"; then + printf '%s\n' "--bins" + else + printf '%s\n' "--all-targets" + return + fi + + if crate_supports_lib_target "$crate" && ! crate_is_fast_clippy_lib_only "$crate"; then printf '%s\n' "--tests" fi } @@ -744,6 +799,8 @@ targeted_test_args_for_crate() { if crate_supports_lib_target "$crate"; then printf '%s\n' "--lib" + elif crate_supports_bin_target "$crate"; then + printf '%s\n' "--bins" fi printf '%s\n' "--tests" } @@ -756,7 +813,7 @@ filter_package_set_by_selection() { local -n candidate_ref="$candidate_name" for pkg in "${candidate_ref[@]}"; do - if array_contains "$pkg" "${selection_ref[@]}"; then + if array_contains "$pkg" ${selection_ref[@]+"${selection_ref[@]}"}; then printf '%s\n' "$pkg" fi done @@ -905,14 +962,14 @@ prepare_full_scope() { FULL_SCOPE_WARP_CORE_EXTRA_TESTS=() FULL_SCOPE_WARP_CORE_RUN_PRNG=0 - if array_contains "warp-core" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then + if array_contains "warp-core" ${FULL_SCOPE_SELECTED_CRATES[@]+"${FULL_SCOPE_SELECTED_CRATES[@]}"}; then FULL_SCOPE_RUN_WARP_CORE_SMOKE=1 prepare_warp_core_scope fi - if array_contains "warp-wasm" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then + if array_contains "warp-wasm" ${FULL_SCOPE_SELECTED_CRATES[@]+"${FULL_SCOPE_SELECTED_CRATES[@]}"}; then prepare_warp_wasm_scope fi - if array_contains "echo-wasm-abi" "${FULL_SCOPE_SELECTED_CRATES[@]}"; then + if array_contains "echo-wasm-abi" ${FULL_SCOPE_SELECTED_CRATES[@]+"${FULL_SCOPE_SELECTED_CRATES[@]}"}; then prepare_echo_wasm_abi_scope fi } diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh index eddab1fe..5b5501ea 100755 --- a/tests/hooks/test_verify_local.sh +++ b/tests/hooks/test_verify_local.sh @@ -82,6 +82,7 @@ EOF run_fake_verify() { local mode="$1" local changed_file="$2" + local lane_mode="${3:-parallel}" local tmp tmp="$(mktemp -d)" @@ -171,6 +172,7 @@ EOF cd "$tmp" && \ PATH="$tmp/bin:$PATH" \ VERIFY_FORCE=1 \ + VERIFY_LANE_MODE="$lane_mode" \ VERIFY_STAMP_SUBJECT="test-head" \ VERIFY_CHANGED_FILES_FILE="$changed" \ VERIFY_FAKE_CARGO_LOG="$cargo_log" \ @@ -409,6 +411,14 @@ else fail "full verification should route warp-core tests through an isolated target dir" printf '%s\n' "$fake_full_output" fi + +fake_full_seq_output="$(run_fake_verify full crates/warp-core/src/lib.rs sequential)" +if printf '%s\n' "$fake_full_seq_output" | grep -q '\[verify-local\] full: launching '; then + fail "sequential fallback should not launch parallel lanes" + printf '%s\n' "$fake_full_seq_output" +else + pass "sequential fallback dispatches through the non-parallel runner" +fi if printf '%s\n' "$fake_full_output" | grep -q -- '--test invariant_property_tests'; then fail "local warp-core full verification should stay on the smoke suite" printf '%s\n' "$fake_full_output" @@ -615,7 +625,9 @@ else fail "tooling-only full verification should stay in tooling-only scope" printf '%s\n' "$fake_tooling_output" fi -if printf '%s\n' "$fake_tooling_output" | grep -q 'lanes=fmt guards hook-tests'; then +if printf '%s\n' "$fake_tooling_output" | grep -q 'fmt' \ + && printf '%s\n' "$fake_tooling_output" | grep -q 'guards' \ + && printf '%s\n' "$fake_tooling_output" | grep -q 'hook-tests'; then pass "tooling-only full verification runs hook regression coverage" else fail "tooling-only full verification should run hook regression coverage" From 703482c5b70dbb11c9482af6dbd9332b2f2c03c0 Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 15:41:11 -0700 Subject: [PATCH 14/18] docs: sync observation and ABI review notes --- CHANGELOG.md | 10 ++++++++-- docs/plans/adr-0008-and-0009.md | 12 +++++++++++- docs/spec/SPEC-0009-wasm-abi-v1.md | 6 ++++-- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 556808a4..08dd3d5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,8 @@ ### feat(warp-core): close Phase 4 and pivot reads to observe +- **Added** ADR-0011 documenting the explicit observation contract with + worldline, coordinate, frame, and projection semantics. - **Changed** Phase 4 provenance/BTR work is now the documented substrate baseline: provenance is entry-based, parent refs are stored explicitly, and the standalone `ProvenanceService` owns authoritative worldline history. @@ -44,11 +46,15 @@ `INVALID_WORLDLINE`, `INVALID_TICK`, `UNSUPPORTED_FRAME_PROJECTION`, `UNSUPPORTED_QUERY`, and `OBSERVATION_UNAVAILABLE`. - **Changed** `WarpKernel` and the WASM ABI now expose `observe(...)`, while - `get_head`, `snapshot_at`, `execute_query`, and `drain_view_ops` are thin - one-phase adapters over the observation contract. + `get_head`, `snapshot_at`, and `drain_view_ops` are thin one-phase adapters + over the observation contract. `execute_query(...)` currently lowers through + observation semantics and returns deterministic `UNSUPPORTED_QUERY` until full + query support is implemented. - **Changed** `drain_view_ops()` is now legacy adapter/debug behavior only: it reads recorded truth through `observe(...)` and tracks only adapter-local drain state instead of mutating runtime-owned materialization state. +- **Changed** `ttd-browser` migrated to the entry-based provenance API after + the Phase 4 hard cut removed the old provenance convenience methods. ### fix(warp-core): close final Phase 3 PR review threads diff --git a/docs/plans/adr-0008-and-0009.md b/docs/plans/adr-0008-and-0009.md index 2fb56dae..4e0e284a 100644 --- a/docs/plans/adr-0008-and-0009.md +++ b/docs/plans/adr-0008-and-0009.md @@ -611,7 +611,16 @@ pub struct ObservationArtifact { pub payload: ObservationPayload, } -pub fn observe(request: ObservationRequest) -> Result; +pub struct ObservationService; + +impl ObservationService { + pub fn observe( + runtime: &WorldlineRuntime, + provenance: &ProvenanceService, + engine: &Engine, + request: ObservationRequest, + ) -> Result; +} ``` ### Design Notes @@ -1132,6 +1141,7 @@ docs/ adr-0008-and-0009.md adr/ ADR-0010-observational-seek-and-administrative-rewind.md + ADR-0011-explicit-observation-contract.md adr-exceptions.md crates/warp-core/src/ diff --git a/docs/spec/SPEC-0009-wasm-abi-v1.md b/docs/spec/SPEC-0009-wasm-abi-v1.md index 32d948f1..9ed4286d 100644 --- a/docs/spec/SPEC-0009-wasm-abi-v1.md +++ b/docs/spec/SPEC-0009-wasm-abi-v1.md @@ -232,7 +232,9 @@ envelope like all other responses. ## Not Yet Implemented -These are honestly reported as `NOT_SUPPORTED` (error code 5): +These are honestly reported as transitional, endpoint-specific errors: -- `execute_query`: Lowered through `observe(...)`, but real query evaluation is not yet built. +- `execute_query`: Lowered through `observe(...)`; returns `UNSUPPORTED_QUERY` + (error code `11`) until real query evaluation lands. - `render_snapshot`: Snapshot-to-ViewOps projection not yet built. + Returns `NOT_SUPPORTED` (error code `5`). From c3139c09206316151deca87773b4a769c998f445 Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 15:47:46 -0700 Subject: [PATCH 15/18] fix(warp-core): harden BTR tick arithmetic --- crates/warp-core/src/provenance_store.rs | 46 +++++++++++++++++++++--- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/crates/warp-core/src/provenance_store.rs b/crates/warp-core/src/provenance_store.rs index fea4d5e1..b1c620ef 100644 --- a/crates/warp-core/src/provenance_store.rs +++ b/crates/warp-core/src/provenance_store.rs @@ -171,6 +171,10 @@ pub enum BtrError { got: u64, }, + /// Payload tick arithmetic overflowed `u64`. + #[error("BTR payload tick arithmetic overflowed")] + TickOverflow, + /// The record worldline was not registered in the provenance service. #[error("BTR references unknown worldline: {0:?}")] UnknownWorldline(WorldlineId), @@ -343,9 +347,15 @@ pub struct BtrPayload { impl BtrPayload { /// Returns the exclusive end tick for the payload. - #[must_use] - pub fn end_tick_exclusive(&self) -> u64 { - self.start_tick + self.entries.len() as u64 + /// + /// # Errors + /// + /// Returns [`BtrError::TickOverflow`] if the payload length cannot be added + /// to `start_tick` without overflowing `u64`. + pub fn end_tick_exclusive(&self) -> Result { + self.start_tick + .checked_add(self.entries.len() as u64) + .ok_or(BtrError::TickOverflow) } /// Validates structural payload invariants. @@ -385,7 +395,7 @@ impl BtrPayload { got: entry.worldline_tick, }); } - expected_tick += 1; + expected_tick = expected_tick.checked_add(1).ok_or(BtrError::TickOverflow)?; } Ok(()) @@ -1774,6 +1784,34 @@ mod tests { )); } + #[test] + fn btr_validation_rejects_tick_overflow() { + let head_key = WriterHeadKey { + worldline_id: test_worldline_id(), + head_id: make_head_id("overflow"), + }; + let payload = BtrPayload { + worldline_id: test_worldline_id(), + start_tick: u64::MAX, + entries: vec![ProvenanceEntry::local_commit( + test_worldline_id(), + u64::MAX, + 0, + head_key, + Vec::new(), + test_triplet(0), + test_patch(0), + Vec::new(), + Vec::new(), + )], + }; + assert!(matches!( + payload.end_tick_exclusive(), + Err(BtrError::TickOverflow) + )); + assert!(matches!(payload.validate(), Err(BtrError::TickOverflow))); + } + #[test] fn validate_btr_rejects_bad_input_boundary_hash() { let mut service = ProvenanceService::new(); From bc12587b9afce074d9e9eb55ce8cd0b88f589bca Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 15:48:10 -0700 Subject: [PATCH 16/18] docs: sync ADR-0011 observation entrypoint --- docs/adr/ADR-0011-explicit-observation-contract.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/adr/ADR-0011-explicit-observation-contract.md b/docs/adr/ADR-0011-explicit-observation-contract.md index 8f5673a2..76e856c5 100644 --- a/docs/adr/ADR-0011-explicit-observation-contract.md +++ b/docs/adr/ADR-0011-explicit-observation-contract.md @@ -41,7 +41,12 @@ frame and projection. The canonical internal entrypoint is: ```rust -observe(request: ObservationRequest) -> Result +ObservationService::observe( + runtime: &WorldlineRuntime, + provenance: &ProvenanceService, + engine: &Engine, + request: ObservationRequest, +) -> Result ``` All meaningful reads must flow through this path. From 401586b8d0ba07316051a7a549821a6d74610a02 Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 18:37:10 -0700 Subject: [PATCH 17/18] fix(tooling): skip non-shell hook docs in ultra-fast smoke --- scripts/verify-local.sh | 13 ++++++++++++- tests/hooks/test_verify_local.sh | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh index f927bdcb..2dc1c25b 100755 --- a/scripts/verify-local.sh +++ b/scripts/verify-local.sh @@ -430,7 +430,7 @@ list_changed_tooling_shell_files() { [[ -z "$file" ]] && continue case "$file" in .githooks/*|scripts/*.sh|scripts/hooks/*|tests/hooks/*.sh) - if [[ -f "$file" ]]; then + if is_shell_tooling_file "$file"; then printf '%s\n' "$file" fi ;; @@ -438,6 +438,17 @@ list_changed_tooling_shell_files() { done <<< "${CHANGED_FILES}" | sort -u } +is_shell_tooling_file() { + local file="$1" + [[ -f "$file" ]] || return 1 + case "$file" in + *.sh) return 0 ;; + esac + local first_line="" + IFS= read -r first_line < "$file" || true + [[ "$first_line" =~ ^#!.*(^|[[:space:]/])(ba|z)?sh([[:space:]]|$) ]] +} + list_changed_critical_crates() { local file crate while IFS= read -r file; do diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh index 5b5501ea..de072d98 100755 --- a/tests/hooks/test_verify_local.sh +++ b/tests/hooks/test_verify_local.sh @@ -510,6 +510,20 @@ else printf '%s\n' "$fake_ultra_fast_hook_output" fi +fake_ultra_fast_hook_readme_output="$(run_fake_verify ultra-fast scripts/hooks/README.md)" +if printf '%s\n' "$fake_ultra_fast_hook_readme_output" | grep -q '\[verify-local\]\[ultra-fast\] bash -n scripts/hooks/README.md'; then + fail "ultra-fast should skip non-shell files in hook directories" + printf '%s\n' "$fake_ultra_fast_hook_readme_output" +else + pass "ultra-fast skips non-shell files in hook directories" +fi +if printf '%s\n' "$fake_ultra_fast_hook_readme_output" | grep -q '\[verify-local\]\[ultra-fast\] no changed shell tooling files'; then + pass "non-shell hook docs do not fabricate shell smoke targets" +else + fail "non-shell hook docs should not appear as shell tooling files" + printf '%s\n' "$fake_ultra_fast_hook_readme_output" +fi + fake_warp_core_default_output="$(run_fake_verify full crates/warp-core/src/provenance_store.rs)" if printf '%s\n' "$fake_warp_core_default_output" | grep -q 'test -p warp-core --lib'; then pass "warp-core default smoke keeps the lib test lane" From 3686e8c8eb4c29c310f02b0d5a577b98fc710136 Mon Sep 17 00:00:00 2001 From: James Ross Date: Mon, 16 Mar 2026 18:37:45 -0700 Subject: [PATCH 18/18] docs: note ultra-fast shell smoke hardening --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08dd3d5e..a84394f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,10 @@ clippy/rustdoc/guard scans stay on heavier local paths and CI. - **Added** `make verify-full-sequential` as an explicit fallback when the lane runner itself needs debugging. +- **Fixed** ultra-fast tooling smoke now detects actual shell tooling files by + extension or shebang, so extensionless hook entrypoints stay covered while + non-shell files like hook docs or timing logs do not false-fail under + `bash -n`. ### feat(warp-core): close Phase 4 and pivot reads to observe