Billy Barrow 2 дней назад
Сommit
834187d3c4
100 измененных файлов с 47236 добавлено и 0 удалено
  1. 125 0
      Architecture/01-Overview.md
  2. 168 0
      Architecture/02-Namespaces.md
  3. 424 0
      Architecture/03-Core-Interfaces.md
  4. 800 0
      Architecture/04-Class-Hierarchy.md
  5. 371 0
      Architecture/05-Path-System.md
  6. 627 0
      Architecture/06-Entity-Types.md
  7. 1000 0
      Architecture/07-Storage-Layer.md
  8. 418 0
      Architecture/08-Set-Operations.md
  9. 702 0
      Architecture/09-Client-Server-Protocol.md
  10. 453 0
      Architecture/10-File-Organization.md
  11. 843 0
      Architecture/11-Indexed-Entities.md
  12. 668 0
      Architecture/12-Storage-Redesign.md
  13. 1264 0
      Architecture/13-Transaction-Batching.md
  14. 1434 0
      Architecture/14-Migration-System.md
  15. 66 0
      Architecture/README.md
  16. 397 0
      KEY-SCHEMA.md
  17. 21 0
      LICENSE
  18. 929 0
      PERF.md
  19. 359 0
      README.md
  20. 256 0
      STORAGE-BACKENDS.md
  21. 290 0
      examples/BasicUsage.vala
  22. 11 0
      examples/meson.build
  23. 32 0
      implementation_plan.md
  24. 23 0
      meson.build
  25. 1336 0
      plans/async-io-refactor-design.md
  26. 111 0
      plans/hookmanager-batch-fix.md
  27. 93 0
      plans/hookmanager-batch-optimize.md
  28. 455 0
      plans/safepath-design.md
  29. 403 0
      plans/storage-migration-plan.md
  30. 500 0
      plans/virtual-entity-resolution-design.md
  31. 307 0
      src/Core/Engine.vala
  32. 293 0
      src/Core/Entity.vala
  33. 96 0
      src/Core/EntityError.vala
  34. 523 0
      src/Core/EntityPath.vala
  35. 300 0
      src/Core/EntitySet.vala
  36. 138 0
      src/Core/EntityType.vala
  37. 134 0
      src/Core/SafePath.vala
  38. 621 0
      src/Engine/ConnectionString.vala
  39. 1323 0
      src/Engine/EmbeddedEngine.vala
  40. 403 0
      src/Engine/EmbeddedTransaction.vala
  41. 280 0
      src/Engine/EngineConfiguration.vala
  42. 232 0
      src/Engine/EngineFactory.vala
  43. 1175 0
      src/Engine/HookManager.vala
  44. 831 0
      src/Engine/RemoteEngine.vala
  45. 245 0
      src/Entities/AbstractEntity.vala
  46. 925 0
      src/Entities/Catalogue.vala
  47. 719 0
      src/Entities/Category.vala
  48. 452 0
      src/Entities/Container.vala
  49. 305 0
      src/Entities/Document.vala
  50. 1456 0
      src/Entities/Index.vala
  51. 138 0
      src/Implexus.vala
  52. 93 0
      src/Migrations/BootstrapMigration.vala
  53. 100 0
      src/Migrations/Migration.vala
  54. 62 0
      src/Migrations/MigrationError.vala
  55. 395 0
      src/Migrations/MigrationRunner.vala
  56. 316 0
      src/Migrations/MigrationStorage.vala
  57. 8 0
      src/Migrations/meson.build
  58. 1516 0
      src/Protocol/Message.vala
  59. 296 0
      src/Protocol/MessageReader.vala
  60. 276 0
      src/Protocol/MessageWriter.vala
  61. 70 0
      src/Protocol/ProtocolError.vala
  62. 457 0
      src/Server/ClientHandler.vala
  63. 419 0
      src/Server/Server.vala
  64. 243 0
      src/Storage/AsyncDbmQueue.vala
  65. 68 0
      src/Storage/Dbm.vala
  66. 64 0
      src/Storage/DbmOperation.vala
  67. 524 0
      src/Storage/ElementSerializer.vala
  68. 320 0
      src/Storage/FilesystemDbm.vala
  69. 337 0
      src/Storage/Gdbm/GdbmDbm.vala
  70. 83 0
      src/Storage/Gdbm/gdbm_wrapper.c
  71. 54 0
      src/Storage/Gdbm/gdbm_wrapper.h
  72. 178 0
      src/Storage/HighLevel/CatalogueStore.vala
  73. 212 0
      src/Storage/HighLevel/CategoryStore.vala
  74. 84 0
      src/Storage/HighLevel/ContainerStore.vala
  75. 63 0
      src/Storage/HighLevel/DocumentStore.vala
  76. 124 0
      src/Storage/HighLevel/EntityStore.vala
  77. 296 0
      src/Storage/HighLevel/IndexStore.vala
  78. 491 0
      src/Storage/Lmdb/LmdbDbm.vala
  79. 134 0
      src/Storage/LowLevel/CatalogueConfigStorage.vala
  80. 409 0
      src/Storage/LowLevel/CatalogueIndexStorage.vala
  81. 134 0
      src/Storage/LowLevel/CategoryConfigStorage.vala
  82. 320 0
      src/Storage/LowLevel/CategoryIndexStorage.vala
  83. 181 0
      src/Storage/LowLevel/ChildrenStorage.vala
  84. 143 0
      src/Storage/LowLevel/EntityMetadataStorage.vala
  85. 100 0
      src/Storage/LowLevel/PropertiesStorage.vala
  86. 587 0
      src/Storage/LowLevel/TextIndexStorage.vala
  87. 289 0
      src/Storage/LowLevel/TypeIndexStorage.vala
  88. 540 0
      src/Storage/Storage.vala
  89. 65 0
      src/Storage/StorageError.vala
  90. 147 0
      src/meson.build
  91. 450 0
      tests/Core/EntityPathTest.vala
  92. 637 0
      tests/Core/SafePathTest.vala
  93. 491 0
      tests/Engine/ConnectionStringTest.vala
  94. 3566 0
      tests/Engine/EmbeddedEngineTest.vala
  95. 1081 0
      tests/Migrations/MigrationTest.vala
  96. 571 0
      tests/Protocol/MessageTest.vala
  97. 2735 0
      tests/Storage/DbmPersistenceTest.vala
  98. 556 0
      tests/Storage/ElementSerializerTest.vala
  99. 608 0
      tests/Storage/FilesystemDbmTest.vala
  100. 868 0
      tests/Storage/GdbmDbmTest.vala

+ 125 - 0
Architecture/01-Overview.md

@@ -0,0 +1,125 @@
+# Overview
+
+## What is Implexus?
+
+Implexus is a path-based document database library and engine for Vala. It provides a hierarchical data storage system where documents are identified by application-assigned paths, similar to filesystem paths with folders.
+
+## Design Goals
+
+### 1. Path-Based Identification
+Documents are identified by paths like `/users/john/profile` rather than opaque IDs. This makes the database intuitive to use and enables hierarchical organization.
+
+### 2. Flexible Entity System
+Four entity types support different access patterns:
+- **CONTAINER**: Container for child entities (like folders)
+- **DOCUMENT**: Typed objects with properties
+- **CATEGORY**: Auto-generated categories based on expression evaluation
+- **INDEX**: Text search with dynamic container results
+
+### 3. Storage Abstraction
+DBM-style key-value storage interface allows different backends while maintaining a consistent API.
+
+### 4. Binary Serialization
+Element types are serialized to/from binary format only - no GLib.Object specific logic. This keeps serialization simple and predictable.
+
+### 5. Unified API
+Applications use the same `Engine` interface whether running in embedded mode or connecting to a remote server.
+
+### 6. Invercargill Integration
+Built on Invercargill types:
+- `Element` interface for type-safe values
+- `Properties` interface for document properties
+- `Enumerable<T>` and `Lot<T>` for collection returns
+- `Set<T>` and `ReadOnlySet<T>` for set operations
+- `Expressions` for category and index queries
+
+## Operating Modes
+
+### Embedded Mode
+The database engine runs as a library within the application process. Direct method calls, no network overhead.
+
+```vala
+var engine = new EmbeddedEngine(storage);
+var container = engine.get_root().create_container("users");
+```
+
+### Client/Server Mode
+A daemon process manages the database, clients connect via TCP. Same API through `RemoteEngine`.
+
+```vala
+var engine = new RemoteEngine.connect("localhost", 9090);
+var container = engine.get_root().create_container("users");
+```
+
+## Key Design Principles
+
+### No Libgee or GLib Collections
+Uses Invercargill data structures exclusively:
+- `Invercargill.DataStructures.Vector<T>` instead of `GLib.List<T>`
+- `Invercargill.DataStructures.Dictionary<K,V>` instead of `GLib.HashTable<K,V>`
+- `Invercargill.DataStructures.HashSet<T>` for sets
+- `Invercargill.DataStructures.Category<K,V>` for categorized storage
+
+### Return Types
+- Use `Enumerable<T>` for lazy evaluation of potentially large result sets
+- Use `Lot<T>` for materialized collections
+- Use `ReadOnlySet<T>` for set-based operations
+
+### Expression-Based Queries
+Categorys and Indexes use `Invercargill.Expressions` for:
+- Property access on documents
+- Filtering and transformation
+- Text matching for indexes
+
+## High-Level Architecture
+
+```
+┌────────────────────────────────────────────────────────────────┐
+│                        Application Layer                        │
+└────────────────────────────────────────────────────────────────┘
+                                │
+                                ▼
+┌────────────────────────────────────────────────────────────────┐
+│                        Engine Interface                         │
+│                                                                 │
+│   Provides unified API for embedded and remote operation        │
+└────────────────────────────────────────────────────────────────┘
+                                │
+                ┌───────────────┴───────────────┐
+                ▼                               ▼
+┌───────────────────────────┐   ┌───────────────────────────────┐
+│      EmbeddedEngine       │   │        RemoteEngine           │
+│                           │   │                               │
+│  Direct in-process calls  │   │  TCP communication with       │
+│                           │   │  implexusd daemon             │
+└───────────────────────────┘   └───────────────────────────────┘
+                │                               │
+                └───────────────┬───────────────┘
+                                ▼
+┌────────────────────────────────────────────────────────────────┐
+│                        Entity System                            │
+│                                                                 │
+│   Entity interface with four implementations:                   │
+│   Container, Document, Category, Index                          │
+└────────────────────────────────────────────────────────────────┘
+                                │
+                                ▼
+┌────────────────────────────────────────────────────────────────┐
+│                        Storage Layer                            │
+│                                                                 │
+│   Storage interface for entity persistence                      │
+│   DBM interface for key-value storage                           │
+│   Binary serialization for Element types                        │
+└────────────────────────────────────────────────────────────────┘
+```
+
+## Relationship to Reference Projects
+
+Implexus follows patterns from:
+
+| Project | Patterns Used |
+|---------|---------------|
+| Spry/astralis | Project structure, meson build |
+| Invercargill | Element, Properties, Enumerable, Lot, Set, Expressions |
+| Invercargill-Json | Element-to-binary serialization pattern |
+| Inversion | Dependency injection for engine configuration |

+ 168 - 0
Architecture/02-Namespaces.md

@@ -0,0 +1,168 @@
+# Namespace Organization
+
+## Namespace Hierarchy
+
+Implexus uses a hierarchical namespace structure following Vala conventions:
+
+```
+Implexus                          // Root namespace
+├── Core                          // Core interfaces and types
+├── Entities                      // Entity implementations
+├── Storage                       // Storage layer
+├── Serialization                 // Binary serialization
+├── Protocol                      // Client/server protocol
+└── Server                        // Daemon implementation
+```
+
+## Namespace Details
+
+### Implexus.Core
+
+Core interfaces and enums that define the public API.
+
+| Type | Kind | Description |
+|------|------|-------------|
+| `Entity` | interface | Base interface for all database entities |
+| `EntityType` | enum | CONTAINER, DOCUMENT, CATEGORY, INDEX |
+| `Engine` | interface | Unified API for embedded and remote modes |
+| `Path` | class | Path parsing and manipulation |
+| `EntityReference` | class | Reference to an entity by path |
+| `EngineError` | errordomain | Engine operation errors |
+
+### Implexus.Entities
+
+Concrete entity implementations.
+
+| Type | Kind | Description |
+|------|------|-------------|
+| `AbstractEntity` | abstract class | Base implementation with common functionality |
+| `Container` | class | Container entity for child entities |
+| `Document` | class | Properties-based document entity |
+| `Category` | class | Expression-based auto-categorization |
+| `Index` | class | Text search with dynamic categories |
+| `IndexResult` | class | Container returned by index queries |
+| `EntitySet` | class | Set operations over entity collections |
+
+### Implexus.Storage
+
+Storage abstraction layer.
+
+| Type | Kind | Description |
+|------|------|-------------|
+| `Storage` | interface | Entity persistence operations |
+| `DBM` | interface | DBM-style key-value storage |
+| `FilesystemDbm` | class | Filesystem-based DBM implementation |
+| `StorageConfiguration` | class | Storage configuration options |
+| `Transaction` | interface | Transaction support for writes |
+
+### Implexus.Serialization
+
+Binary serialization for Element types.
+
+| Type | Kind | Description |
+|------|------|-------------|
+| `ElementWriter` | class | Writes Element values to binary |
+| `ElementReader` | class | Reads Element values from binary |
+| `EntitySerializer` | class | Serializes entities to binary |
+| `EntityDeserializer` | class | Deserializes entities from binary |
+| `SerializationFormat` | enum | Format version identifiers |
+
+### Implexus.Protocol
+
+Client/server communication protocol.
+
+| Type | Kind | Description |
+|------|------|-------------|
+| `Message` | interface | Base protocol message |
+| `Request` | interface | Client request message |
+| `Response` | interface | Server response message |
+| `MessageReader` | class | Reads messages from stream |
+| `MessageWriter` | class | Writes messages to stream |
+| `ProtocolError` | errordomain | Protocol-level errors |
+
+### Implexus.Server
+
+Daemon implementation for client/server mode.
+
+| Type | Kind | Description |
+|------|------|-------------|
+| `Server` | class | Main daemon server |
+| `ClientConnection` | class | Handles individual client |
+| `RequestHandler` | class | Processes client requests |
+| `ServerConfiguration` | class | Server configuration options |
+
+## Namespace Dependencies
+
+```mermaid
+graph TD
+    A[Implexus.Core] --> B[Invercargill]
+    C[Implexus.Entities] --> A
+    C[Implexus.Entities] --> D[Implexus.Serialization]
+    D[Implexus.Serialization] --> B
+    E[Implexus.Storage] --> A
+    E[Implexus.Storage] --> D
+    F[Implexus.Protocol] --> A
+    F[Implexus.Protocol] --> D
+    G[Implexus.Server] --> E
+    G[Implexus.Server] --> F
+    G[Implexus.Server] --> C
+```
+
+## External Dependencies
+
+| Dependency | Usage |
+|------------|-------|
+| `GLib-2.0` | Basic types, error domains |
+| `GObject-2.0` | Object system |
+| `Invercargill-1` | Element, Properties, Enumerable, Lot, Set, Expressions, DataStructures |
+
+## Naming Conventions
+
+### Interfaces
+- PascalCase names
+- No `I` prefix (Vala convention)
+- Examples: `Entity`, `Engine`, `Storage`, `DBM`
+
+### Classes
+- PascalCase names
+- Abstract classes prefixed with `Abstract`
+- Examples: `Container`, `Document`, `AbstractEntity`
+
+### Enums
+- PascalCase for type name
+- UPPER_SNAKE_CASE for values
+- Examples: `EntityType.CONTAINER`, `EntityType.DOCUMENT`
+
+### Error Domains
+- PascalCase with `Error` suffix
+- UPPER_SNAKE_CASE for codes
+- Examples: `EngineError.ENTITY_NOT_FOUND`
+
+### Delegates
+- PascalCase with descriptive suffix
+- Examples: `EntityFilter`, `EntityTransform`
+
+## Usage Example
+
+```vala
+using Implexus.Core;
+using Implexus.Entities;
+using Implexus.Storage;
+
+public int main(string[] args) {
+    // Create storage
+    var dbm = new FilesystemDbm("/path/to/database");
+    var storage = new DefaultStorage(dbm);
+    
+    // Create embedded engine
+    var engine = new EmbeddedEngine(storage);
+    
+    // Work with entities
+    var root = engine.get_root();
+    var users = root.create_container("users");
+    var doc = users.create_document("john", "UserProfile");
+    doc.set_property("email", new ValueElement("john@example.com"));
+    
+    return 0;
+}
+```

+ 424 - 0
Architecture/03-Core-Interfaces.md

@@ -0,0 +1,424 @@
+# Core Interfaces
+
+This document defines the fundamental interfaces that form Implexus's public API.
+
+## Async I/O Model
+
+All I/O operations in Implexus are asynchronous. Methods that require database access use Vala's `async` keyword and must be called with `yield`. This ensures the main loop remains responsive during database operations.
+
+**Key principles:**
+- Identity properties (path, name, entity_type, engine) are synchronous - no I/O required
+- Navigation, CRUD, and query operations are async - use `*_async` methods
+- Hooks run synchronously in the DBM worker thread context
+- Results are returned to the main loop via `Idle.add()`
+
+## EntityType Enum
+
+Defines the four types of entities in the database.
+
+```vala
+namespace Implexus.Core {
+
+public enum EntityType {
+    CONTAINER,
+    DOCUMENT,
+    CATEGORY,
+    INDEX;
+    
+    public string to_string() {
+        switch (this) {
+            case CONTAINER: return "container";
+            case DOCUMENT: return "document";
+            case CATEGORY: return "category";
+            case INDEX: return "index";
+            default: assert_not_reached();
+        }
+    }
+    
+    public static EntityType? from_string(string name) {
+        switch (name.down()) {
+            case "container": return CONTAINER;
+            case "document": return DOCUMENT;
+            case "category": return CATEGORY;
+            case "index": return INDEX;
+            default: return null;
+        }
+    }
+}
+
+} // namespace Implexus.Core
+```
+
+## Entity Interface
+
+The base interface for all database entities. All I/O operations are async.
+
+```vala
+namespace Implexus.Core {
+
+public interface Entity : Object {
+    
+    // === Identity (Synchronous - No I/O) ===
+    public abstract unowned Engine engine { get; }
+    public abstract Path path { owned get; }
+    public abstract string name { owned get; }
+    public abstract EntityType entity_type { get; }
+    public abstract string type_label { owned get; }
+    public abstract string configured_expression { owned get; }
+    public abstract string configured_type_label { owned get; }
+    
+    // === Parent/Child Navigation (Async) ===
+    public abstract async Entity? get_parent_async() throws EntityError;
+    public abstract async Invercargill.ReadOnlySet<string> get_child_names_async() throws EntityError;
+    public abstract async Entity? get_child_async(string name) throws EntityError;
+    public abstract async Entity[] get_children_async() throws EntityError;
+    
+    // === Child Management (CONTAINER only - Async) ===
+    public abstract async Entity? create_container_async(string name) throws EntityError;
+    public abstract async Entity? create_document_async(string name, string type_label) throws EntityError;
+    public abstract async Entity? create_category_async(
+        string name, 
+        string type_label, 
+        string expression
+    ) throws EntityError;
+    public abstract async Entity? create_index_async(
+        string name, 
+        string type_label, 
+        string expression
+    ) throws EntityError;
+    public abstract async Entity? create_catalogue_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws EntityError;
+    
+    // === Document Operations (DOCUMENT only - Async) ===
+    public abstract async Invercargill.Properties get_properties_async() throws EntityError;
+    public abstract async Invercargill.Element? get_entity_property_async(string name) throws EntityError;
+    public abstract async void set_entity_property_async(string name, Invercargill.Element value) throws EntityError;
+    public abstract async void remove_property_async(string name) throws EntityError;
+    
+    // === Lifecycle (Async) ===
+    public abstract async void delete_async() throws EntityError;
+    public abstract bool exists { get; }
+    
+    // === Set Operations (Async) ===
+    public abstract async EntitySet as_set_async();
+    
+    // === Signals ===
+    public signal void property_changed(string key);
+}
+
+} // namespace Implexus.Core
+```
+
+## Engine Interface
+
+Unified API for both embedded and remote operation modes. All I/O operations are async.
+
+```vala
+namespace Implexus.Core {
+
+public interface Engine : Object {
+    
+    // === Root Access (Async) ===
+    public abstract async Entity get_root_async() throws EngineError;
+    
+    // === Path-Based Access (Async) ===
+    public abstract async Entity? get_entity_async(Path path) throws EngineError;
+    public abstract async Entity? get_entity_or_null_async(Path path) throws EngineError;
+    public abstract async bool entity_exists_async(Path path) throws EngineError;
+    
+    // === Query Operations (Async) ===
+    public abstract async Entity[] query_by_type_async(string type_label) throws EngineError;
+    public abstract async Entity[] query_by_expression_async(
+        string type_label, 
+        string expression
+    ) throws EngineError;
+    
+    // === Transactions (Async) ===
+    public abstract async Transaction begin_transaction_async() throws EngineError;
+    public abstract async void commit_async() throws EngineError;
+    public abstract async void rollback_async();
+    public abstract bool in_transaction { get; }
+    
+    // === Configuration (Synchronous) ===
+    public abstract StorageConfiguration configuration { owned get; }
+    
+    // === Events ===
+    public signal void entity_created(Entity entity);
+    public signal void entity_deleted(Path path);
+    public signal void entity_modified(Entity entity);
+}
+
+} // namespace Implexus.Core
+```
+
+### Transaction Management
+
+Vala doesn't support async delegates, so there's no `with_write_transaction()` helper. Use manual begin/commit/rollback:
+
+```vala
+try {
+    yield engine.begin_transaction_async();
+    // perform operations
+    yield engine.commit_async();
+} catch (Error e) {
+    yield engine.rollback_async();
+    throw e;
+}
+```
+
+## Storage Interface
+
+Entity persistence abstraction.
+
+```vala
+namespace Implexus.Storage {
+
+public interface Storage : Object {
+    
+    // Entity Operations
+    public abstract bool has_entity(Path path);
+    public abstract uint8[]? load_entity(Path path);
+    public abstract void save_entity(Path path, uint8[] data) throws StorageError;
+    public abstract void delete_entity(Path path) throws StorageError;
+    
+    // Child Tracking
+    public abstract Invercargill.ReadOnlySet<string> get_child_names(Path parent_path);
+    public abstract void register_child(Path parent, string child_name);
+    public abstract void unregister_child(Path parent, string child_name);
+    
+    // Indexing Support
+    public abstract Invercargill.Enumerable<Path> get_paths_by_type(string type_label);
+    public abstract void register_type(Path path, string type_label);
+    public abstract void unregister_type(Path path, string type_label);
+    
+    // Transactions
+    public abstract void begin_transaction() throws StorageError;
+    public abstract void commit_transaction() throws StorageError;
+    public abstract void rollback_transaction();
+    public abstract bool in_transaction { get; }
+    
+    // Lifecycle
+    public abstract void open() throws StorageError;
+    public abstract void close();
+    public abstract void compact() throws StorageError;
+}
+
+} // namespace Implexus.Storage
+```
+
+## DBM Interface
+
+Low-level key-value storage interface with concurrent read support indicator.
+
+```vala
+namespace Implexus.Storage {
+
+public interface Dbm : Object {
+    
+    // === Concurrent Read Support ===
+    /**
+     * Whether this DBM supports concurrent read operations.
+     * - LMDB: true (MVCC allows concurrent readers)
+     * - GDBM: false (single-threaded access required)
+     * - Filesystem: false (single-threaded access required)
+     */
+    public abstract bool supports_concurrent_reads { get; }
+    
+    // === Basic Operations ===
+    public abstract bool has_key(string key);
+    public abstract Invercargill.BinaryData? @get(string key);
+    public abstract void @set(string key, Invercargill.BinaryData value) throws StorageError;
+    public abstract void delete(string key) throws StorageError;
+    
+    // === Iteration ===
+    public abstract Invercargill.Enumerable<string> keys { owned get; }
+    
+    // === Transactions ===
+    public abstract void begin_transaction() throws StorageError;
+    public abstract void commit_transaction() throws StorageError;
+    public abstract void rollback_transaction();
+    public abstract bool in_transaction { get; }
+}
+
+} // namespace Implexus.Storage
+```
+
+## Transaction Interface
+
+Transaction support for atomic operations.
+
+```vala
+namespace Implexus.Core {
+
+public interface Transaction : Object {
+    
+    public abstract bool active { get; }
+    public abstract async void commit_async() throws EngineError;
+    public abstract async void rollback_async();
+}
+
+} // namespace Implexus.Core
+```
+
+## Error Domains
+
+### EngineError
+
+```vala
+namespace Implexus.Core {
+
+public errordomain EngineError {
+    ENTITY_NOT_FOUND,
+    ENTITY_ALREADY_EXISTS,
+    INVALID_PATH,
+    INVALID_OPERATION,
+    TYPE_MISMATCH,
+    EXPRESSION_ERROR,
+    TRANSACTION_ERROR,
+    STORAGE_ERROR,
+    CONNECTION_ERROR,
+    PROTOCOL_ERROR;
+}
+
+} // namespace Implexus.Core
+```
+
+### EntityError
+
+```vala
+namespace Implexus.Core {
+
+public errordomain EntityError {
+    NOT_FOUND,
+    ALREADY_EXISTS,
+    INVALID_OPERATION,
+    TYPE_MISMATCH,
+    STORAGE_ERROR,
+    IO_ERROR;
+}
+
+} // namespace Implexus.Core
+```
+
+### StorageError
+
+```vala
+namespace Implexus.Storage {
+
+public errordomain StorageError {
+    OPEN_ERROR,
+    READ_ERROR,
+    WRITE_ERROR,
+    DELETE_ERROR,
+    TRANSACTION_ERROR,
+    CORRUPTION_ERROR,
+    DISK_FULL,
+    IO_ERROR;
+}
+
+} // namespace Implexus.Storage
+```
+
+## Interface Relationships
+
+```mermaid
+graph TB
+    subgraph Core
+        Entity[Entity Interface]
+        Engine[Engine Interface]
+        Transaction[Transaction Interface]
+        EntityType[EntityType Enum]
+    end
+    
+    subgraph Storage
+        Storage[Storage Interface]
+        Dbm[Dbm Interface]
+        AsyncDbmQueue[AsyncDbmQueue]
+        DbmOperation[DbmOperation]
+    end
+    
+    Engine --> Entity
+    Engine --> Transaction
+    Engine --> Storage
+    Storage --> Dbm
+    Storage --> AsyncDbmQueue
+    AsyncDbmQueue --> DbmOperation
+    Entity --> EntityType
+    Entity --> Engine
+```
+
+## Usage Patterns
+
+### Accessing Entities
+
+```vala
+// Via path (async)
+var entity = yield engine.get_entity_async(new Path("/users/john"));
+
+// Via navigation (async)
+var root = yield engine.get_root_async();
+var users = yield root.get_child_async("users");
+var john = yield users.get_child_async("john");
+```
+
+### Creating Entities
+
+```vala
+// Create container (async)
+var container = yield (yield engine.get_root_async()).create_container_async("app");
+
+// Create document (async)
+var doc = yield container.create_document_async("config", "AppConfig");
+yield doc.set_entity_property_async("version", new ValueElement("1.0.0"));
+
+// Create category - auto-categorize documents by property value (async)
+var category = yield container.create_category_async(
+    "by_status",
+    "Task",
+    "status"  // Expression to evaluate on each Task document
+);
+
+// Create index - text search on documents (async)
+var index = yield container.create_index_async(
+    "search",
+    "Task",
+    "description"  // Expression to search within
+);
+```
+
+### Using Transactions
+
+```vala
+try {
+    yield engine.begin_transaction_async();
+    var container = yield (yield engine.get_root_async()).create_container_async("batch");
+    var doc = yield container.create_document_async("item1", "Item");
+    yield doc.set_entity_property_async("value", new ValueElement(42));
+    
+    var doc2 = yield (yield engine.get_root_async()).get_child_async("batch");
+    doc2 = yield ((!) doc2).create_document_async("item2", "Item");
+    yield doc2.set_entity_property_async("value", new ValueElement(84));
+    
+    yield engine.commit_async();
+} catch (Error e) {
+    yield engine.rollback_async();
+    warning("Transaction failed: %s", e.message);
+}
+```
+
+### Querying Entities
+
+```vala
+// Query all entities of a type (async)
+var tasks = yield engine.query_by_type_async("Task");
+foreach (var task in tasks) {
+    var status = yield task.get_entity_property_async("status");
+    message("Task status: %s", status?.to_string() ?? "null");
+}
+
+// Query with expression filter (async)
+var activeTasks = yield engine.query_by_expression_async("Task", "status == 'active'");
+```

+ 800 - 0
Architecture/04-Class-Hierarchy.md

@@ -0,0 +1,800 @@
+# Class Hierarchy
+
+This document describes the class hierarchy and relationships between types in Implexus.
+
+## Async I/O Model
+
+All I/O operations in Implexus are asynchronous. The class hierarchy reflects this:
+
+- **Engine implementations** provide `*_async` methods for all I/O operations
+- **Entity implementations** provide `*_async` methods for navigation, CRUD, and property access
+- **Identity properties** (path, name, entity_type, engine) remain synchronous
+- **No wrapper classes** - AsyncEngine and AsyncEntity have been removed; async is built into the base interfaces
+
+## Inheritance Diagram
+
+```mermaid
+graph TB
+    subgraph Interfaces
+        EntityI[Entity Interface]
+        EngineI[Engine Interface]
+        StorageI[Storage Interface]
+        DbmI[Dbm Interface]
+        TransactionI[Transaction Interface]
+    end
+    
+    subgraph Abstract Classes
+        AbstractEntity[AbstractEntity]
+    end
+    
+    subgraph Entity Implementations
+        Container[Container]
+        Document[Document]
+        Category[Category]
+        Index[Index]
+        Catalogue[Catalogue]
+        IndexResult[IndexResult]
+    end
+    
+    subgraph Engine Implementations
+        EmbeddedEngine[EmbeddedEngine]
+        RemoteEngine[RemoteEngine]
+    end
+    
+    subgraph Storage Implementations
+        DefaultStorage[DefaultStorage]
+        AsyncDbmQueue[AsyncDbmQueue]
+        FilesystemDbm[FilesystemDbm]
+        GdbmDbm[GdbmDbm]
+        LmdbDbm[LmdbDbm]
+        EmbeddedTransaction[EmbeddedTransaction]
+    end
+    
+    EntityI --> AbstractEntity
+    AbstractEntity --> Container
+    AbstractEntity --> Document
+    AbstractEntity --> Category
+    AbstractEntity --> Index
+    AbstractEntity --> Catalogue
+    Index --> IndexResult
+    
+    EngineI --> EmbeddedEngine
+    EngineI --> RemoteEngine
+    
+    StorageI --> DefaultStorage
+    DbmI --> FilesystemDbm
+    DbmI --> GdbmDbm
+    DbmI --> LmdbDbm
+    AsyncDbmQueue --> DbmI
+    TransactionI --> EmbeddedTransaction
+```
+
+## AbstractEntity Class
+
+Base implementation providing common functionality for all entity types. All I/O operations are async.
+
+```vala
+namespace Implexus.Entities {
+
+public abstract class AbstractEntity : Object, Entity {
+    
+    // Protected fields
+    protected weak Engine _engine;
+    protected EntityPath _path;
+    
+    // === Identity (Synchronous - No I/O) ===
+    public unowned Engine engine { get { return _engine; } }
+    public EntityPath path { owned get { return _path; } }
+    public string name { owned get { return _path.name; } }
+    
+    public abstract EntityType entity_type { get; }
+    public abstract string type_label { owned get; }
+    public abstract string configured_expression { owned get; }
+    public abstract string configured_type_label { owned get; }
+    
+    // === Parent/Child Navigation (Async) ===
+    
+    public async Entity? get_parent_async() throws EntityError {
+        if (_path.is_root) return null;
+        return yield _engine.get_entity_or_null_async(_path.parent);
+    }
+    
+    public virtual async Invercargill.ReadOnlySet<string> get_child_names_async() throws EntityError { 
+        return engine.configuration.storage.get_child_names(_path);
+    }
+    
+    public virtual async Entity? get_child_async(string name) throws EntityError {
+        var child_path = _path.child(name);
+        return yield _engine.get_entity_or_null_async(child_path);
+    }
+    
+    public virtual async Entity[] get_children_async() throws EntityError {
+        var names = yield get_child_names_async();
+        var children = new Entity[0];
+        foreach (var name in names) {
+            var child = yield get_child_async(name);
+            if (child != null) {
+                children += (!) child;
+            }
+        }
+        return children;
+    }
+    
+    // === Creation methods - override in Container (Async) ===
+    
+    public virtual async Entity? create_container_async(string name) throws EntityError {
+        throw new EntityError.INVALID_OPERATION(
+            "Cannot create container on %s", entity_type.to_string()
+        );
+    }
+    
+    public virtual async Entity? create_document_async(string name, string type_label) throws EntityError {
+        throw new EntityError.INVALID_OPERATION(
+            "Cannot create document on %s", entity_type.to_string()
+        );
+    }
+    
+    public virtual async Entity? create_category_async(string name, string type_label, string expression) throws EntityError {
+        throw new EntityError.INVALID_OPERATION(
+            "Cannot create category on %s", entity_type.to_string()
+        );
+    }
+    
+    public virtual async Entity? create_index_async(string name, string type_label, string expression) throws EntityError {
+        throw new EntityError.INVALID_OPERATION(
+            "Cannot create index on %s", entity_type.to_string()
+        );
+    }
+    
+    public virtual async Entity? create_catalogue_async(string name, string type_label, string expression) throws EntityError {
+        throw new EntityError.INVALID_OPERATION(
+            "Cannot create catalogue on %s", entity_type.to_string()
+        );
+    }
+    
+    // === Document operations - override in Document (Async) ===
+    
+    public virtual async Invercargill.Properties get_properties_async() throws EntityError { 
+        throw new EntityError.INVALID_OPERATION("Not a document"); 
+    }
+    
+    public virtual async Invercargill.Element? get_entity_property_async(string name) throws EntityError {
+        throw new EntityError.INVALID_OPERATION("Not a document");
+    }
+    
+    public virtual async void set_entity_property_async(string name, Invercargill.Element value) throws EntityError {
+        throw new EntityError.INVALID_OPERATION("Not a document");
+    }
+    
+    public virtual async void remove_property_async(string name) throws EntityError {
+        throw new EntityError.INVALID_OPERATION("Not a document");
+    }
+    
+    // === Lifecycle (Async) ===
+    
+    public virtual async void delete_async() throws EntityError {
+        engine.configuration.storage.delete_entity(_path);
+        var parent = yield get_parent_async();
+        if (parent != null) {
+            engine.configuration.storage.unregister_child(parent.path, name);
+        }
+        engine.entity_deleted(_path);
+    }
+    
+    public virtual bool exists {
+        get { return engine.configuration.storage.has_entity(_path); }
+    }
+    
+    // === Set Operations (Async) ===
+    
+    public async EntitySet as_set_async() {
+        return new EntitySet(this);
+    }
+    
+    // Note: Invercargill.Element methods are inherited and implemented here
+}
+
+} // namespace Implexus.Entities
+```
+
+## Engine Implementations
+
+### EmbeddedEngine
+
+Direct in-process engine implementation with async I/O.
+
+```vala
+namespace Implexus.Engine {
+
+public class EmbeddedEngine : Object, Engine {
+    
+    private Storage _storage;
+    private Entity? _root;
+    private AsyncDbmQueue _queue;
+    private Transaction? _current_transaction;
+    
+    public EmbeddedEngine(Storage storage) {
+        _storage = storage;
+        _queue = new AsyncDbmQueue(storage.dbm);
+        _queue.start();
+    }
+    
+    // === Root Access (Async) ===
+    
+    public async Entity get_root_async() throws EngineError {
+        if (_root != null) return (!) _root;
+        _root = new Container(this, new EntityPath.root());
+        return _root;
+    }
+    
+    // === Path-Based Access (Async) ===
+    
+    public async Entity? get_entity_async(EntityPath path) throws EngineError {
+        var entity = yield get_entity_or_null_async(path);
+        if (entity == null) {
+            throw new EngineError.ENTITY_NOT_FOUND("Entity not found: %s", path.to_string());
+        }
+        return entity;
+    }
+    
+    public async Entity? get_entity_or_null_async(EntityPath path) throws EngineError {
+        if (!_storage.has_entity(path)) return null;
+        return yield load_entity_async(path);
+    }
+    
+    public async bool entity_exists_async(EntityPath path) throws EngineError {
+        return _storage.has_entity(path);
+    }
+    
+    // === Query Operations (Async) ===
+    
+    public async Entity[] query_by_type_async(string type_label) throws EngineError {
+        var paths = _storage.get_paths_by_type(type_label);
+        var entities = new Entity[0];
+        foreach (var path in paths) {
+            var entity = yield get_entity_or_null_async(path);
+            if (entity != null) {
+                entities += (!) entity;
+            }
+        }
+        return entities;
+    }
+    
+    public async Entity[] query_by_expression_async(string type_label, string expression) throws EngineError {
+        var evaluator = new Invercargill.Expressions.ExpressionEvaluator();
+        var all = yield query_by_type_async(type_label);
+        var results = new Entity[0];
+        foreach (var entity in all) {
+            try {
+                var props = yield entity.get_properties_async();
+                var result = evaluator.evaluate(expression, props);
+                if (result != null && !(result as Invercargill.NullElement).is_null()) {
+                    results += entity;
+                }
+            } catch {
+                // Skip entities that fail evaluation
+            }
+        }
+        return results;
+    }
+    
+    // === Transactions (Async) ===
+    
+    public async Transaction begin_transaction_async() throws EngineError {
+        if (_current_transaction != null) {
+            throw new EngineError.TRANSACTION_ERROR("Transaction already active");
+        }
+        _storage.begin_transaction();
+        _current_transaction = new EmbeddedTransaction(() => {
+            _storage.commit_transaction();
+            _current_transaction = null;
+        }, () => {
+            _storage.rollback_transaction();
+            _current_transaction = null;
+        });
+        return _current_transaction;
+    }
+    
+    public async void commit_async() throws EngineError {
+        if (_current_transaction == null) {
+            throw new EngineError.TRANSACTION_ERROR("No transaction active");
+        }
+        yield _current_transaction.commit_async();
+    }
+    
+    public async void rollback_async() {
+        if (_current_transaction != null) {
+            yield _current_transaction.rollback_async();
+        }
+    }
+    
+    public bool in_transaction { get { return _current_transaction != null; } }
+    
+    public StorageConfiguration configuration { 
+        owned get { return new StorageConfiguration(_storage); }
+    }
+    
+    private async Entity load_entity_async(EntityPath path) throws EngineError {
+        var data = _storage.load_entity(path);
+        if (data == null) return null;
+        
+        var deserializer = new EntityDeserializer();
+        return deserializer.deserialize((!) data, this, path);
+    }
+}
+
+} // namespace Implexus.Engine
+```
+
+### RemoteEngine
+
+Client for connecting to implexusd daemon. All operations are async over the network.
+
+```vala
+namespace Implexus.Engine {
+
+public class RemoteEngine : Object, Engine {
+    
+    private SocketConnection _connection;
+    private MessageReader _reader;
+    private MessageWriter _writer;
+    
+    public RemoteEngine.connect(string host, uint16 port) throws EngineError {
+        try {
+            var client = new SocketClient();
+            _connection = client.connect_to_host(host, port, null);
+            var stream = _connection.get_input_stream();
+            var output = _connection.get_output_stream();
+            _reader = new MessageReader(stream);
+            _writer = new MessageWriter(output);
+        } catch (Error e) {
+            throw new EngineError.CONNECTION_ERROR("Failed to connect: %s", e.message);
+        }
+    }
+    
+    public async Entity get_root_async() throws EngineError {
+        var entity = yield get_entity_or_null_async(new EntityPath.root());
+        if (entity == null) {
+            return new RemoteContainer(this, new EntityPath.root());
+        }
+        return entity;
+    }
+    
+    public async Entity? get_entity_async(EntityPath path) throws EngineError {
+        var response = yield send_request_async(new GetEntityRequest(path));
+        if (response is EntityNotFoundResponse) {
+            throw new EngineError.ENTITY_NOT_FOUND("Entity not found: %s", path.to_string());
+        }
+        return ((EntityResponse) response).entity;
+    }
+    
+    public async Entity? get_entity_or_null_async(EntityPath path) throws EngineError {
+        try {
+            return yield get_entity_async(path);
+        } catch {
+            return null;
+        }
+    }
+    
+    public async bool entity_exists_async(EntityPath path) throws EngineError {
+        var response = yield send_request_async(new EntityExistsRequest(path));
+        return ((BooleanResponse) response).value;
+    }
+    
+    // ... other async methods send protocol messages
+    
+    private async Response send_request_async(Request request) throws EngineError {
+        _writer.write(request);
+        var response = yield _reader.read_response_async();
+        if (response is ErrorResponse) {
+            throw new EngineError.PROTOCOL_ERROR(((ErrorResponse) response).message);
+        }
+        return response;
+    }
+}
+
+} // namespace Implexus.Engine
+```
+
+## Storage Implementations
+
+### DefaultStorage
+
+```vala
+namespace Implexus.Storage {
+
+public class DefaultStorage : Object, Storage {
+    
+    private Dbm _dbm;
+    private AsyncDbmQueue _queue;
+    private Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.HashSet<string>> _children;
+    private Invercargill.DataStructures.Category<string, Invercargill.DataStructures.Vector<string>> _type_index;
+    
+    public DefaultStorage(Dbm dbm) {
+        _dbm = dbm;
+        _queue = new AsyncDbmQueue(dbm);
+        _children = new Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.HashSet<string>>();
+        _type_index = new Invercargill.DataStructures.Category<string, Invercargill.DataStructures.Vector<string>>();
+    }
+    
+    public bool has_entity(EntityPath path) {
+        return _dbm.has_key(entity_key(path));
+    }
+    
+    public uint8[]? load_entity(EntityPath path) {
+        var data = _dbm.get(entity_key(path));
+        return data?.to_bytes();
+    }
+    
+    public void save_entity(EntityPath path, uint8[] data) throws StorageError {
+        _dbm.set(entity_key(path), new Invercargill.BinaryData.from_bytes(data));
+    }
+    
+    public void delete_entity(EntityPath path) throws StorageError {
+        _dbm.delete(entity_key(path));
+    }
+    
+    public Invercargill.ReadOnlySet<string> get_child_names(EntityPath parent_path) {
+        var key = children_key(parent_path);
+        if (!_children.has(key)) {
+            return new Invercargill.DataStructures.HashSet<string>().as_read_only();
+        }
+        return _children.get(key).as_read_only();
+    }
+    
+    public void register_child(EntityPath parent, string child_name) {
+        var key = children_key(parent);
+        if (!_children.has(key)) {
+            _children.set(key, new Invercargill.DataStructures.HashSet<string>());
+        }
+        _children.get(key).add(child_name);
+    }
+    
+    public void unregister_child(EntityPath parent, string child_name) {
+        var key = children_key(parent);
+        if (_children.has(key)) {
+            _children.get(key).remove(child_name);
+        }
+    }
+    
+    public Invercargill.Enumerable<EntityPath> get_paths_by_type(string type_label) {
+        if (!_type_index.has(type_label)) {
+            return new Invercargill.DataStructures.Vector<EntityPath>().as_enumerable();
+        }
+        return _type_index.get(type_label)
+                          .select(path_str => new EntityPath(path_str));
+    }
+    
+    public void register_type(EntityPath path, string type_label) {
+        if (!_type_index.has(type_label)) {
+            _type_index.set(type_label, new Invercargill.DataStructures.Vector<string>());
+        }
+        _type_index.get(type_label).add(path.to_string());
+    }
+    
+    public void unregister_type(EntityPath path, string type_label) {
+        if (_type_index.has(type_label)) {
+            _type_index.get(type_label).remove_all(path.to_string());
+        }
+    }
+    
+    private string entity_key(EntityPath path) { return @"entity:$(path.to_string())"; }
+    private string children_key(EntityPath path) { return @"children:$(path.to_string())"; }
+    
+    // Transaction methods delegate to Dbm
+    public void begin_transaction() throws StorageError { _dbm.begin_transaction(); }
+    public void commit_transaction() throws StorageError { _dbm.commit_transaction(); }
+    public void rollback_transaction() { _dbm.rollback_transaction(); }
+    public bool in_transaction { get { return _dbm.in_transaction; } }
+    
+    public void open() throws StorageError { 
+        _dbm.open(); 
+        _queue.start();
+        load_indices();
+    }
+    
+    public void close() { 
+        _queue.shutdown();
+        _dbm.close(); 
+    }
+    
+    public void compact() throws StorageError { 
+        _dbm.compact(); 
+    }
+    
+    private void load_indices() {
+        // Rebuild child and type indices from entity data
+        foreach (var entry in _dbm.keys) {
+            if (entry.has_prefix("entity:")) {
+                // Parse entity and update indices
+            }
+        }
+    }
+}
+
+} // namespace Implexus.Storage
+```
+
+### FilesystemDbm
+
+Simple file-based DBM implementation (single-threaded).
+
+```vala
+namespace Implexus.Storage {
+
+public class FilesystemDbm : Object, Dbm {
+    
+    private string _path;
+    private Invercargill.DataStructures.Dictionary<string, Invercargill.BinaryData> _data;
+    private bool _open;
+    private int _transaction_depth;
+    
+    /**
+     * FilesystemDbm does not support concurrent reads.
+     * All operations go through the AsyncDbmQueue worker thread.
+     */
+    public bool supports_concurrent_reads { get { return false; } }
+    
+    public FilesystemDbm(string path) {
+        _path = path;
+        _data = new Invercargill.DataStructures.Dictionary<string, Invercargill.BinaryData>();
+        _open = false;
+        _transaction_depth = 0;
+    }
+    
+    public bool has_key(string key) {
+        ensure_open();
+        return _data.has(key);
+    }
+    
+    public Invercargill.BinaryData? @get(string key) {
+        ensure_open();
+        if (!_data.has(key)) return null;
+        return _data.get(key);
+    }
+    
+    public void @set(string key, Invercargill.BinaryData value) throws StorageError {
+        ensure_open();
+        _data.set(key, value);
+    }
+    
+    public void delete(string key) throws StorageError {
+        ensure_open();
+        _data.remove(key);
+    }
+    
+    public Invercargill.Enumerable<string> keys {
+        owned get {
+            ensure_open();
+            return _data.keys;
+        }
+    }
+    
+    public void begin_transaction() throws StorageError {
+        ensure_open();
+        _transaction_depth++;
+    }
+    
+    public void commit_transaction() throws StorageError {
+        ensure_open();
+        if (_transaction_depth > 0) {
+            _transaction_depth--;
+            if (_transaction_depth == 0) {
+                sync();
+            }
+        }
+    }
+    
+    public void rollback_transaction() {
+        if (_transaction_depth > 0) {
+            _transaction_depth--;
+            // Reload from disk
+            load_from_disk();
+        }
+    }
+    
+    public bool in_transaction { get { return _transaction_depth > 0; } }
+    
+    public void open() throws StorageError {
+        if (_open) return;
+        
+        var file = File.new_for_path(_path);
+        if (file.query_exists()) {
+            load_from_disk();
+        } else {
+            file.get_parent().make_directory_with_parents();
+        }
+        _open = true;
+    }
+    
+    public void close() {
+        if (!_open) return;
+        sync();
+        _open = false;
+    }
+    
+    public void sync() throws StorageError {
+        // Write all data to disk
+        var file = File.new_for_path(_path);
+        try {
+            var stream = file.replace(null, false, FileCreateFlags.NONE);
+            var writer = new DataOutputStream(stream);
+            
+            // Write header
+            writer.write_string("IMPXDBM1\n");
+            
+            // Write entry count
+            writer.put_int64(_data.count);
+            
+            // Write entries
+            foreach (var entry in _data.entries) {
+                writer.put_int64(entry.key.length);
+                writer.write_string(entry.key);
+                writer.put_int64(entry.value.length);
+                writer.write(entry.data);
+            }
+        } catch (Error e) {
+            throw new StorageError.WRITE_ERROR("Failed to sync: %s", e.message);
+        }
+    }
+    
+    public void compact() throws StorageError {
+        sync();
+    }
+    
+    private void ensure_open() {
+        if (!_open) {
+            critical("DBM not open");
+        }
+    }
+    
+    private void load_from_disk() throws StorageError {
+        // Read all data from disk
+        var file = File.new_for_path(_path);
+        try {
+            var stream = new DataInputStream(file.read());
+            
+            // Read header
+            var header = stream.read_line();
+            if (header != "IMPXDBM1") {
+                throw new StorageError.CORRUPTION_ERROR("Invalid DBM file format");
+            }
+            
+            // Read entries
+            int64 count = stream.read_int64();
+            for (int64 i = 0; i < count; i++) {
+                int64 key_len = stream.read_int64();
+                uint8[] key_bytes = new uint8[key_len];
+                stream.read(key_bytes);
+                string key = (string) key_bytes;
+                
+                int64 value_len = stream.read_int64();
+                uint8[] value = new uint8[value_len];
+                stream.read(value);
+                
+                _data.set(key, new Invercargill.BinaryData.from_bytes(value));
+            }
+        } catch (Error e) {
+            throw new StorageError.READ_ERROR("Failed to load: %s", e.message);
+        }
+    }
+}
+
+} // namespace Implexus.Storage
+```
+
+### GdbmDbm
+
+GDBM-based implementation (single-threaded).
+
+```vala
+namespace Implexus.Storage {
+
+public class GdbmDbm : Object, Dbm {
+    
+    private string _path;
+    private void* _dbf;  // GDBM_FILE handle
+    
+    /**
+     * GDBM does not support concurrent reads.
+     * All operations go through the AsyncDbmQueue worker thread.
+     */
+    public bool supports_concurrent_reads { get { return false; } }
+    
+    // ... GDBM-specific implementation
+}
+
+} // namespace Implexus.Storage
+```
+
+### LmdbDbm
+
+LMDB-based implementation with concurrent read support.
+
+```vala
+namespace Implexus.Storage {
+
+public class LmdbDbm : Object, Dbm {
+    
+    private string _path;
+    private void* _env;  // MDB_env*
+    private void* _txn;  // Current transaction MDB_txn*
+    
+    /**
+     * LMDB supports concurrent reads via MVCC.
+     * Read operations can spawn their own threads,
+     * while writes go through the AsyncDbmQueue.
+     */
+    public bool supports_concurrent_reads { get { return true; } }
+    
+    // ... LMDB-specific implementation
+}
+
+} // namespace Implexus.Storage
+```
+
+## Class Summary
+
+| Class | Extends | Implements | Purpose |
+|-------|---------|------------|---------|
+| `AbstractEntity` | `Object` | `Entity` | Base entity implementation with async methods |
+| `Container` | `AbstractEntity` | - | Container for child entities |
+| `Document` | `AbstractEntity` | - | Properties-based document |
+| `Category` | `AbstractEntity` | - | Expression-based auto-categories |
+| `Index` | `AbstractEntity` | - | Text search with dynamic results |
+| `Catalogue` | `AbstractEntity` | - | Key-based document grouping |
+| `IndexResult` | `AbstractEntity` | - | Container returned by index query |
+| `EmbeddedEngine` | `Object` | `Engine` | In-process engine with async I/O |
+| `RemoteEngine` | `Object` | `Engine` | Client for daemon with async network I/O |
+| `DefaultStorage` | `Object` | `Storage` | Entity persistence |
+| `AsyncDbmQueue` | `Object` | - | Queue for async DBM operations |
+| `FilesystemDbm` | `Object` | `Dbm` | File-based key-value store (single-threaded) |
+| `GdbmDbm` | `Object` | `Dbm` | GDBM-based store (single-threaded) |
+| `LmdbDbm` | `Object` | `Dbm` | LMDB-based store (concurrent reads) |
+| `EmbeddedTransaction` | `Object` | `Transaction` | Transaction implementation |
+
+## Removed Classes
+
+The following classes have been removed as part of the async refactor:
+
+| Class | Reason for Removal |
+|-------|-------------------|
+| `AsyncEngine` | Async is now built into the base `Engine` interface |
+| `AsyncEntity` | Async is now built into the base `Entity` interface |
+| `with_write_transaction()` helper | Vala doesn't support async delegates; use manual begin/commit/rollback |
+
+## Async Method Summary
+
+### Engine Interface
+
+| Method | Description |
+|--------|-------------|
+| `get_root_async()` | Get root entity |
+| `get_entity_async(path)` | Get entity by path |
+| `get_entity_or_null_async(path)` | Get entity or null |
+| `entity_exists_async(path)` | Check entity existence |
+| `query_by_type_async(type_label)` | Query by type |
+| `query_by_expression_async(type_label, expr)` | Query with filter |
+| `begin_transaction_async()` | Start transaction |
+| `commit_async()` | Commit transaction |
+| `rollback_async()` | Rollback transaction |
+
+### Entity Interface
+
+| Method | Description |
+|--------|-------------|
+| `get_parent_async()` | Get parent entity |
+| `get_child_names_async()` | Get child names |
+| `get_child_async(name)` | Get child by name |
+| `get_children_async()` | Get all children |
+| `create_container_async(name)` | Create container child |
+| `create_document_async(name, type)` | Create document child |
+| `create_category_async(name, type, expr)` | Create category child |
+| `create_index_async(name, type, expr)` | Create index child |
+| `create_catalogue_async(name, type, expr)` | Create catalogue child |
+| `get_properties_async()` | Get document properties |
+| `get_entity_property_async(name)` | Get property value |
+| `set_entity_property_async(name, value)` | Set property value |
+| `remove_property_async(name)` | Remove property |
+| `delete_async()` | Delete entity |
+| `as_set_async()` | Create EntitySet |

+ 371 - 0
Architecture/05-Path-System.md

@@ -0,0 +1,371 @@
+# Path System
+
+This document describes how paths are parsed, resolved, and managed in Implexus.
+
+## Path Class
+
+The `Path` class represents a path to an entity in the database hierarchy.
+
+```vala
+namespace Implexus.Core {
+
+public class Path : Object, Invercargill.Element, Invercargill.Hashable, Invercargill.Equatable {
+    
+    private Invercargill.DataStructures.Vector<string> _segments;
+    
+    // Constructors
+    public Path(string path_string) {
+        _segments = new Invercargill.DataStructures.Vector<string>();
+        parse(path_string);
+    }
+    
+    public Path.root() {
+        _segments = new Invercargill.DataStructures.Vector<string>();
+    }
+    
+    public Path.from_segments(Invercargill.ReadOnlyCollection<string> segments) {
+        _segments = new Invercargill.DataStructures.Vector<string>();
+        foreach (var seg in segments) {
+            _segments.add(seg);
+        }
+    }
+    
+    public Path.child(Path parent, string name) {
+        _segments = new Invercargill.DataStructures.Vector<string>();
+        foreach (var seg in parent._segments) {
+            _segments.add(seg);
+        }
+        _segments.add(name);
+    }
+    
+    // Properties
+    public bool is_root { get { return _segments.count == 0; } }
+    
+    public string name {
+        owned get {
+            if (is_root) return "";
+            return _segments.last();
+        }
+    }
+    
+    public Path parent {
+        owned get {
+            if (is_root) return this;
+            var parent_segments = _segments.take(_segments.count - 1);
+            return new Path.from_segments(parent_segments);
+        }
+    }
+    
+    public int depth { get { return _segments.count; } }
+    
+    public Invercargill.ReadOnlyCollection<string> segments {
+        owned get { return _segments.as_read_only(); }
+    }
+    
+    // Path Operations
+    public Path child(string name) {
+        return new Path.child(this, validate_name(name));
+    }
+    
+    public Path sibling(string name) {
+        if (is_root) {
+            throw new EngineError.INVALID_PATH("Root has no siblings");
+        }
+        return parent.child(name);
+    }
+    
+    public Path ancestor(int levels) {
+        if (levels < 0 || levels > depth) {
+            throw new EngineError.INVALID_PATH("Invalid ancestor level: %d", levels);
+        }
+        var ancestor_segments = _segments.take(depth - levels);
+        return new Path.from_segments(ancestor_segments);
+    }
+    
+    public bool is_ancestor_of(Path other) {
+        if (depth >= other.depth) return false;
+        for (int i = 0; i < depth; i++) {
+            if (_segments.get(i) != other._segments.get(i)) return false;
+        }
+        return true;
+    }
+    
+    public bool is_descendant_of(Path other) {
+        return other.is_ancestor_of(this);
+    }
+    
+    public Path relative_to(Path ancestor) {
+        if (!ancestor.is_ancestor_of(this)) {
+            throw new EngineError.INVALID_PATH("%s is not an ancestor of %s", 
+                ancestor.to_string(), this.to_string());
+        }
+        var relative_segments = _segments.skip(ancestor.depth);
+        return new Path.from_segments(relative_segments);
+    }
+    
+    public Path resolve(Path relative_path) {
+        var result_segments = new Invercargill.DataStructures.Vector<string>();
+        foreach (var seg in _segments) {
+            result_segments.add(seg);
+        }
+        foreach (var seg in relative_path._segments) {
+            if (seg == "..") {
+                if (result_segments.count > 0) {
+                    result_segments.remove_at(result_segments.count - 1);
+                }
+            } else if (seg != ".") {
+                result_segments.add(seg);
+            }
+        }
+        return new Path.from_segments(result_segments);
+    }
+    
+    // String Conversion
+    public string to_string() {
+        if (is_root) return "/";
+        var builder = new StringBuilder();
+        foreach (var seg in _segments) {
+            builder.append("/");
+            builder.append(escape_segment(seg));
+        }
+        return builder.str;
+    }
+    
+    public string to_key() {
+        // Compact representation for storage keys
+        if (is_root) return "\0";
+        return string.joinv("\0", _segments.to_array());
+    }
+    
+    public static Path from_key(string key) {
+        if (key == "\0") return new Path.root();
+        var segments = key.split("\0");
+        return new Path.from_segments(new Invercargill.DataStructures.Vector<string>.from_array(segments));
+    }
+    
+    // Parsing
+    private void parse(string path_string) {
+        if (path_string == null || path_string == "") {
+            return; // Root path
+        }
+        
+        var normalized = path_string;
+        if (normalized.has_prefix("/")) {
+            normalized = normalized.substring(1);
+        }
+        if (normalized.has_suffix("/")) {
+            normalized = normalized.substring(0, normalized.length - 1);
+        }
+        
+        if (normalized == "") {
+            return; // Root path
+        }
+        
+        var parts = normalized.split("/");
+        foreach (var part in parts) {
+            if (part == "") continue;
+            _segments.add(unescape_segment(part));
+        }
+    }
+    
+    // Validation
+    private string validate_name(string name) {
+        if (name == null || name == "") {
+            throw new EngineError.INVALID_PATH("Entity name cannot be empty");
+        }
+        if (name.contains("/")) {
+            throw new EngineError.INVALID_PATH("Entity name cannot contain /: %s", name);
+        }
+        if (name == "." || name == "..") {
+            throw new EngineError.INVALID_PATH("Entity name cannot be . or ..");
+        }
+        return name;
+    }
+    
+    // Escaping for special characters in segment names
+    private string escape_segment(string segment) {
+        return segment.replace("~", "~7e")
+                      .replace("/", "~2f")
+                      .replace("\\", "~5c")
+                      .replace("\0", "~00");
+    }
+    
+    private string unescape_segment(string segment) {
+        return segment.replace("~00", "\0")
+                      .replace("~5c", "\\")
+                      .replace("~2f", "/")
+                      .replace("~7e", "~");
+    }
+    
+    // Hashable
+    public uint hash() {
+        uint h = 0;
+        foreach (var seg in _segments) {
+            h ^= str_hash(seg);
+        }
+        return h;
+    }
+    
+    // Equatable
+    public bool equals(Path other) {
+        if (depth != other.depth) return false;
+        for (int i = 0; i < depth; i++) {
+            if (_segments.get(i) != other._segments.get(i)) return false;
+        }
+        return true;
+    }
+    
+    // Element interface
+    public Type type() { return typeof(Path); }
+    public string type_name() { return "Path"; }
+    public bool is_null() { return false; }
+    public bool is_type(Type t) { return t.is_a(typeof(Path)); }
+    public bool assignable_to(Type t) { return t.is_a(typeof(Path)); }
+    public bool assignable_to_type(Type t) { return is_type(t); }
+    public T? as<T>() { return this; }
+    public T assert_as<T>() { return (T) this; }
+    public T? as_or_default<T>(T default_value) { return this; }
+    public bool try_get_as<T>(out T result) { result = this; return true; }
+    public T to_value<T>() { return (T) this; }
+    
+    // Static factory methods
+    public static Path parse(string path_string) {
+        return new Path(path_string);
+    }
+    
+    public static Path combine(Path base_path, string relative) {
+        return base_path.resolve(new Path(relative));
+    }
+}
+
+} // namespace Implexus.Core
+```
+
+## Path Resolution Flow
+
+```mermaid
+sequenceDiagram
+    participant App as Application
+    participant Engine as Engine
+    participant Storage as Storage
+    participant Cache as Entity Cache
+    
+    App->>Engine: get_entity with /users/john/profile
+    Engine->>Engine: Create Path object
+    Engine->>Storage: has_entity with path
+    Storage-->>Engine: true
+    Engine->>Cache: check cache for path
+    Cache-->>Engine: not found
+    Engine->>Storage: load_entity with path
+    Storage-->>Engine: binary data
+    Engine->>Engine: deserialize entity
+    Engine->>Cache: cache entity
+    Engine-->>App: Entity object
+```
+
+## Path Examples
+
+| Path String | Segments | is_root | depth |
+|-------------|----------|---------|-------|
+| `/` | `[]` | true | 0 |
+| `/users` | `["users"]` | false | 1 |
+| `/users/john` | `["users", "john"]` | false | 2 |
+| `/users/john/profile` | `["users", "john", "profile"]` | false | 3 |
+
+## Path Operations Examples
+
+```vala
+// Creation
+var root = new Path.root();
+var users = new Path("/users");
+var john = users.child("john");
+
+// Navigation
+assert(john.parent.equals(users));
+assert(john.name == "john");
+assert(john.depth == 2);
+
+// Relationships
+assert(users.is_ancestor_of(john));
+assert(john.is_descendant_of(users));
+assert(!john.is_ancestor_of(users));
+
+// Relative paths
+var profile = john.child("profile");
+var relative = profile.relative_to(users);
+assert(relative.to_string() == "john/profile");
+
+// Resolution
+var resolved = users.resolve(new Path("john/profile"));
+assert(resolved.equals(profile));
+
+// With .. and .
+var complex = john.resolve(new Path("../jane"));
+assert(complex.to_string() == "/users/jane");
+```
+
+## Path Validation Rules
+
+### Valid Names
+- Non-empty strings
+- Cannot contain `/`
+- Cannot be `.` or `..`
+- Any other characters allowed (including unicode)
+
+### Invalid Names
+- Empty string `""`
+- Contains slash `"a/b"`
+- Current directory `"."`
+- Parent directory `".."`
+
+### Escaping
+Special characters in names are escaped using tilde encoding:
+
+| Character | Escaped |
+|-----------|---------|
+| `~` | `~7e` |
+| `/` | `~2f` |
+| `\` | `~5c` |
+| `\0` | `~00` |
+
+This allows names like `"a/b"` (escaped as `"a~2fb"`) to be stored safely.
+
+## Path-Based Entity Resolution
+
+The engine resolves paths to entities through the storage layer:
+
+```vala
+public Entity? resolve_path(Path path) {
+    // 1. Check cache
+    if (_cache.has(path)) {
+        return _cache.get(path);
+    }
+    
+    // 2. Load from storage
+    if (!_storage.has_entity(path)) {
+        return null;
+    }
+    
+    // 3. Deserialize
+    var data = _storage.load_entity(path);
+    var entity = _deserializer.deserialize(data, this, path);
+    
+    // 4. Cache and return
+    _cache.set(path, entity);
+    return entity;
+}
+```
+
+## Child Name Tracking
+
+Storage tracks child names for each path to enable efficient enumeration:
+
+```
+Key: children:/users
+Value: ["john", "jane", "admin"]
+
+Key: children:/users/john
+Value: ["profile", "settings"]
+```
+
+This allows `get_child_names()` to return quickly without scanning all entities.

+ 627 - 0
Architecture/06-Entity-Types.md

@@ -0,0 +1,627 @@
+# Entity Types
+
+This document details the four entity type implementations: Container, Document, Category, and Index.
+
+## Entity Type Overview
+
+```mermaid
+graph TB
+    subgraph Entity Types
+        Container[Container - Container]
+        Document[Document - Properties]
+        Category[Category - Auto Categories]
+        Index[Index - Text Search]
+    end
+    
+    subgraph Relationships
+        Container -->|contains| Container
+        Container -->|contains| Document
+        Container -->|contains| Category
+        Container -->|contains| Index
+        Category -->|generates| Container
+        Index -->|returns| IndexResult
+        IndexResult -->|contains| Document
+    end
+```
+
+## Container Entity
+
+A Container is a container for child entities, similar to a filesystem folder.
+
+### Implementation
+
+```vala
+namespace Implexus.Entities {
+
+public class Container : AbstractEntity {
+    
+    public Container(Engine engine, Path path) {
+        base(engine, path);
+    }
+    
+    public override EntityType entity_type { get { return EntityType.CONTAINER; } }
+    
+    // Child creation - only Container can create children
+    public override Entity? create_container(string name) throws EngineError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.child(name);
+        var container = new Container(_engine, child_path);
+        
+        save_entity(container);
+        register_child(name);
+        
+        _engine.entity_created(container);
+        return container;
+    }
+    
+    public override Entity? create_document(string name, string type_label) throws EngineError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.child(name);
+        var document = new Document(_engine, child_path, type_label);
+        
+        save_entity(document);
+        register_child(name);
+        register_type(type_label, child_path);
+        
+        _engine.entity_created(document);
+        return document;
+    }
+    
+    public override Entity? create_category(
+        string name, 
+        string type_label, 
+        string expression
+    ) throws EngineError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.child(name);
+        var category = new Category(_engine, child_path, type_label, expression);
+        
+        save_entity(category);
+        register_child(name);
+        
+        _engine.entity_created(category);
+        return category;
+    }
+    
+    public override Entity? create_index(
+        string name, 
+        string type_label, 
+        string expression
+    ) throws EngineError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.child(name);
+        var index = new Index(_engine, child_path, type_label, expression);
+        
+        save_entity(index);
+        register_child(name);
+        
+        _engine.entity_created(index);
+        return index;
+    }
+    
+    public override void delete() throws EngineError {
+        // Delete all children first
+        foreach (var child_name in child_names) {
+            var child = get_child(child_name);
+            if (child != null) {
+                ((!) child).delete();
+            }
+        }
+        base.delete();
+    }
+    
+    private void validate_can_create_child(string name) throws EngineError {
+        if (name == null || name == "") {
+            throw new EngineError.INVALID_PATH("Child name cannot be empty");
+        }
+        var child_path = _path.child(name);
+        if (_engine.entity_exists(child_path)) {
+            throw new EngineError.ENTITY_ALREADY_EXISTS(
+                "Entity already exists: %s", child_path.to_string()
+            );
+        }
+    }
+    
+    private void save_entity(Entity entity) throws EngineError {
+        var serializer = new EntitySerializer();
+        var data = serializer.serialize(entity);
+        _engine.configuration.storage.save_entity(entity.path, data);
+    }
+    
+    private void register_child(string name) {
+        _engine.configuration.storage.register_child(_path, name);
+    }
+    
+    private void register_type(string type_label, Path path) {
+        _engine.configuration.storage.register_type(path, type_label);
+    }
+}
+
+} // namespace Implexus.Entities
+```
+
+## Document Entity
+
+A Document is a typed object with properties. The type_label is application-defined and used for querying.
+
+### Implementation
+
+```vala
+namespace Implexus.Entities {
+
+public class Document : AbstractEntity {
+    
+    private string _type_label;
+    private Invercargill.DataStructures.PropertyDictionary _properties;
+    
+    public Document(Engine engine, Path path, string type_label) {
+        base(engine, path);
+        _type_label = type_label;
+        _properties = new Invercargill.DataStructures.PropertyDictionary();
+    }
+    
+    public override EntityType entity_type { get { return EntityType.DOCUMENT; } }
+    
+    public override string type_label { 
+        owned get { return _type_label; } 
+    }
+    
+    public override Invercargill.Properties properties { 
+        owned get { return _properties; } 
+    }
+    
+    public override Invercargill.Element? get_property(string name) {
+        return _properties.get(name);
+    }
+    
+    public override void set_property(string name, Invercargill.Element value) {
+        _properties.set(name, value);
+        save();
+        _engine.entity_modified(this);
+    }
+    
+    public override void remove_property(string name) {
+        _properties.remove(name);
+        save();
+        _engine.entity_modified(this);
+    }
+    
+    public override Invercargill.ReadOnlySet<string> child_names { 
+        owned get { 
+            // Documents don't have children
+            return new Invercargill.DataStructures.HashSet<string>().as_read_only();
+        }
+    }
+    
+    public override Entity? get_child(string name) {
+        return null; // Documents don't have children
+    }
+    
+    private void save() throws EngineError {
+        var serializer = new EntitySerializer();
+        var data = serializer.serialize(this);
+        _engine.configuration.storage.save_entity(_path, data);
+    }
+}
+
+} // namespace Implexus.Entities
+```
+
+### Properties Interface
+
+Documents implement `Invercargill.Properties` through `PropertyDictionary`:
+
+```vala
+// Using Invercargill.DataStructures.PropertyDictionary
+var doc = container.create_document("user1", "User");
+doc.set_property("name", new Invercargill.ValueElement("John"));
+doc.set_property("age", new Invercargill.ValueElement(30));
+doc.set_property("active", new Invercargill.ValueElement(true));
+
+// Reading properties
+var name = doc.get_property("name").to_value<string>();
+var age = doc.get_property("age").to_value<int>();
+```
+
+## Category Entity
+
+A Category automatically generates Container entities based on expression evaluation over documents of a specific type.
+
+### How It Works
+
+1. Configure a Category with a type_label and expression
+2. When a child is requested by name, the expression is evaluated on all documents of that type
+3. A Container is returned containing documents where the expression result matches the requested name
+
+### Implementation
+
+```vala
+namespace Implexus.Entities {
+
+public class Category : AbstractEntity {
+    
+    private string _type_label;
+    private string _expression;
+    private Invercargill.Expressions.Expression? _compiled_expression;
+    
+    public Category(Engine engine, Path path, string type_label, string expression) {
+        base(engine, path);
+        _type_label = type_label;
+        _expression = expression;
+        _compiled_expression = null;
+    }
+    
+    public override EntityType entity_type { get { return EntityType.CATEGORY; } }
+    
+    public override string configured_type_label { 
+        owned get { return _type_label; } 
+    }
+    
+    public override string configured_expression { 
+        owned get { return _expression; } 
+    }
+    
+    // Compile expression lazily
+    private Invercargill.Expressions.Expression get_compiled_expression() throws EngineError {
+        if (_compiled_expression == null) {
+            var parser = new Invercargill.Expressions.ExpressionParser();
+            try {
+                _compiled_expression = parser.parse(_expression);
+            } catch (Error e) {
+                throw new EngineError.EXPRESSION_ERROR(
+                    "Failed to parse expression: %s", e.message
+                );
+            }
+        }
+        return (!) _compiled_expression;
+    }
+    
+    // Child names are the unique values of the expression over all documents
+    public override Invercargill.ReadOnlySet<string> child_names { 
+        owned get {
+            var names = new Invercargill.DataStructures.HashSet<string>();
+            try {
+                var expr = get_compiled_expression();
+                var evaluator = new Invercargill.Expressions.ExpressionEvaluator();
+                
+                foreach (var doc in _engine.query_by_type(_type_label)) {
+                    var result = evaluator.evaluate(expr, doc.properties);
+                    if (result != null && !result.is_null()) {
+                        names.add(result.to_string());
+                    }
+                }
+            } catch (Error e) {
+                warning("Error evaluating category expression: %s", e.message);
+            }
+            return names.as_read_only();
+        }
+    }
+    
+    // Get child returns a Container containing matching documents
+    public override Entity? get_child(string name) {
+        var matching_docs = new Invercargill.DataStructures.Vector<Entity>();
+        
+        try {
+            var expr = get_compiled_expression();
+            var evaluator = new Invercargill.Expressions.ExpressionEvaluator();
+            
+            foreach (var doc in _engine.query_by_type(_type_label)) {
+                var result = evaluator.evaluate(expr, doc.properties);
+                if (result != null && result.to_string() == name) {
+                    matching_docs.add(doc);
+                }
+            }
+        } catch (Error e) {
+            warning("Error evaluating category expression: %s", e.message);
+            return null;
+        }
+        
+        if (matching_docs.count == 0) {
+            return null;
+        }
+        
+        // Return a virtual container containing the matching documents
+        return new CategoryContainer(_engine, _path.child(name), matching_docs);
+    }
+    
+    // Categorys cannot create children
+    public override Entity? create_container(string name) throws EngineError {
+        throw new EngineError.INVALID_OPERATION("Cannot create children in a category");
+    }
+    
+    public override Invercargill.Enumerable<Entity> get_children() {
+        return child_names.select(name => get_child(name))
+                          .where(entity => entity != null)
+                          .select(entity => (!) entity);
+    }
+}
+
+} // namespace Implexus.Entities
+```
+
+### CategoryContainer
+
+A virtual container that contains documents matched by the category:
+
+```vala
+namespace Implexus.Entities {
+
+internal class CategoryContainer : AbstractEntity {
+    
+    private Invercargill.DataStructures.Vector<Entity> _documents;
+    
+    public CategoryContainer(
+        Engine engine, 
+        Path path, 
+        Invercargill.DataStructures.Vector<Entity> documents
+    ) {
+        base(engine, path);
+        _documents = documents;
+    }
+    
+    public override EntityType entity_type { get { return EntityType.CONTAINER; } }
+    
+    public override Invercargill.ReadOnlySet<string> child_names {
+        owned get {
+            var names = new Invercargill.DataStructures.HashSet<string>();
+            foreach (var doc in _documents) {
+                names.add(doc.name);
+            }
+            return names.as_read_only();
+        }
+    }
+    
+    public override Entity? get_child(string name) {
+        foreach (var doc in _documents) {
+            if (doc.name == name) {
+                return doc;
+            }
+        }
+        return null;
+    }
+    
+    public override Invercargill.Enumerable<Entity> get_children() {
+        return _documents.as_enumerable();
+    }
+    
+    // Read-only - cannot create children
+    public override Entity? create_container(string name) throws EngineError {
+        throw new EngineError.INVALID_OPERATION("Cannot create children in a category container");
+    }
+    
+    public override void delete() throws EngineError {
+        throw new EngineError.INVALID_OPERATION("Cannot delete a category container");
+    }
+}
+
+} // namespace Implexus.Entities
+```
+
+### Category Usage Example
+
+```vala
+// Create documents with a "status" property
+var tasks = engine.get_root().create_container("tasks");
+var task1 = tasks.create_document("task1", "Task");
+task1.set_property("status", new ValueElement("pending"));
+task1.set_property("title", new ValueElement("First task"));
+
+var task2 = tasks.create_document("task2", "Task");
+task2.set_property("status", new ValueElement("done"));
+task2.set_property("title", new ValueElement("Second task"));
+
+var task3 = tasks.create_document("task3", "Task");
+task3.set_property("status", new ValueElement("pending"));
+task3.set_property("title", new ValueElement("Third task"));
+
+// Create a category that groups by status
+var by_status = tasks.create_category("by_status", "Task", "status");
+
+// Navigate the category
+var pending = by_status.get_child("pending");  // Returns Container with task1, task3
+var done = by_status.get_child("done");        // Returns Container with task2
+
+// List all status values
+foreach (var status in by_status.child_names) {
+    print("Status: %s\n", status);
+}
+```
+
+## Index Entity
+
+An Index provides text search over documents. Requesting a child returns a Container containing documents matching the search term.
+
+### Implementation
+
+```vala
+namespace Implexus.Entities {
+
+public class Index : AbstractEntity {
+    
+    private string _type_label;
+    private string _expression;
+    private Invercargill.Expressions.Expression? _compiled_expression;
+    
+    public Index(Engine engine, Path path, string type_label, string expression) {
+        base(engine, path);
+        _type_label = type_label;
+        _expression = expression;
+        _compiled_expression = null;
+    }
+    
+    public override EntityType entity_type { get { return EntityType.INDEX; } }
+    
+    public override string configured_type_label { 
+        owned get { return _type_label; } 
+    }
+    
+    public override string configured_expression { 
+        owned get { return _expression; } 
+    }
+    
+    // Index children are opaque - cannot list them
+    public override Invercargill.ReadOnlySet<string> child_names { 
+        owned get {
+            // Indexes have opaque children - return empty set
+            return new Invercargill.DataStructures.HashSet<string>().as_read_only();
+        }
+    }
+    
+    // Get child performs text search and returns a Container with results
+    public override Entity? get_child(string search_term) {
+        var matching_docs = new Invercargill.DataStructures.Vector<Entity>();
+        
+        try {
+            var expr = get_compiled_expression();
+            var evaluator = new Invercargill.Expressions.ExpressionEvaluator();
+            var search_lower = search_term.down();
+            
+            foreach (var doc in _engine.query_by_type(_type_label)) {
+                var result = evaluator.evaluate(expr, doc.properties);
+                if (result != null) {
+                    var text = result.to_string().down();
+                    if (text.contains(search_lower)) {
+                        matching_docs.add(doc);
+                    }
+                }
+            }
+        } catch (Error e) {
+            warning("Error evaluating index expression: %s", e.message);
+            return null;
+        }
+        
+        if (matching_docs.count == 0) {
+            return null;
+        }
+        
+        // Return an IndexResult - a Container containing matching documents
+        return new IndexResult(_engine, _path.child(search_term), search_term, matching_docs);
+    }
+    
+    // Indexes cannot create children
+    public override Entity? create_container(string name) throws EngineError {
+        throw new EngineError.INVALID_OPERATION("Cannot create children in an index");
+    }
+    
+    private Invercargill.Expressions.Expression get_compiled_expression() throws EngineError {
+        if (_compiled_expression == null) {
+            var parser = new Invercargill.Expressions.ExpressionParser();
+            try {
+                _compiled_expression = parser.parse(_expression);
+            } catch (Error e) {
+                throw new EngineError.EXPRESSION_ERROR(
+                    "Failed to parse expression: %s", e.message
+                );
+            }
+        }
+        return (!) _compiled_expression;
+    }
+}
+
+} // namespace Implexus.Entities
+```
+
+### IndexResult
+
+The Container returned by an Index query:
+
+```vala
+namespace Implexus.Entities {
+
+public class IndexResult : AbstractEntity {
+    
+    private string _search_term;
+    private Invercargill.DataStructures.Vector<Entity> _documents;
+    
+    public IndexResult(
+        Engine engine, 
+        Path path, 
+        string search_term,
+        Invercargill.DataStructures.Vector<Entity> documents
+    ) {
+        base(engine, path);
+        _search_term = search_term;
+        _documents = documents;
+    }
+    
+    public string search_term { get { return _search_term; } }
+    
+    public override EntityType entity_type { get { return EntityType.CONTAINER; } }
+    
+    public override Invercargill.ReadOnlySet<string> child_names {
+        owned get {
+            var names = new Invercargill.DataStructures.HashSet<string>();
+            foreach (var doc in _documents) {
+                names.add(doc.name);
+            }
+            return names.as_read_only();
+        }
+    }
+    
+    public override Entity? get_child(string name) {
+        foreach (var doc in _documents) {
+            if (doc.name == name) {
+                return doc;
+            }
+        }
+        return null;
+    }
+    
+    public override Invercargill.Enumerable<Entity> get_children() {
+        return _documents.as_enumerable();
+    }
+    
+    // Read-only
+    public override Entity? create_container(string name) throws EngineError {
+        throw new EngineError.INVALID_OPERATION("Cannot create children in an index result");
+    }
+    
+    public override void delete() throws EngineError {
+        throw new EngineError.INVALID_OPERATION("Cannot delete an index result");
+    }
+}
+
+} // namespace Implexus.Entities
+```
+
+### Index Usage Example
+
+```vala
+// Create documents with searchable content
+var articles = engine.get_root().create_container("articles");
+var article1 = articles.create_document("article1", "Article");
+article1.set_property("title", new ValueElement("Introduction to Vala"));
+article1.set_property("content", new ValueElement("Vala is a programming language..."));
+
+var article2 = articles.create_document("article2", "Article");
+article2.set_property("title", new ValueElement("Advanced Vala Techniques"));
+article2.set_property("content", new ValueElement("This article covers advanced..."));
+
+// Create an index over the content property
+var search = articles.create_index("search", "Article", "content");
+
+// Search for documents
+var results = search.get_child("Vala");  // Returns IndexResult with article1, article2
+var intro_results = search.get_child("Introduction");  // Returns IndexResult with article1
+
+// Navigate results
+foreach (var doc in results.get_children()) {
+    print("Found: %s\n", doc.name);
+}
+```
+
+## Entity Type Comparison
+
+| Feature | Container | Document | Category | Index |
+|---------|----------|----------|-----------|-------|
+| Can contain children | Yes | No | Virtual | Virtual |
+| Has properties | No | Yes | No | No |
+| Has type_label | No | Yes | No | No |
+| Child names enumerable | Yes | - | Yes | No |
+| Expression-based | No | No | Yes | Yes |
+| Children are persistent | Yes | - | No | No |
+| Can create children | Yes | No | No | No |

+ 1000 - 0
Architecture/07-Storage-Layer.md

@@ -0,0 +1,1000 @@
+# Storage Layer
+
+This document describes the storage abstraction and binary serialization format.
+
+## Storage Architecture
+
+```mermaid
+graph TB
+    subgraph API Layer
+        Engine[Engine]
+    end
+    
+    subgraph Storage Layer
+        Storage[Storage Interface]
+        Serializer[EntitySerializer]
+        Deserializer[EntityDeserializer]
+    end
+    
+    subgraph Async Queue
+        AsyncDbmQueue[AsyncDbmQueue]
+        DbmOperation[DbmOperation]
+        WorkerThread[Worker Thread]
+    end
+    
+    subgraph Serialization
+        ElementWriter[ElementWriter]
+        ElementReader[ElementReader]
+    end
+    
+    subgraph Persistence
+        Dbm[Dbm Interface]
+        FilesystemDbm[FilesystemDbm]
+        GdbmDbm[GdbmDbm]
+        LmdbDbm[LmdbDbm]
+    end
+    
+    Engine --> Storage
+    Storage --> Serializer
+    Storage --> Deserializer
+    Serializer --> ElementWriter
+    Deserializer --> ElementReader
+    Storage --> AsyncDbmQueue
+    AsyncDbmQueue --> DbmOperation
+    AsyncDbmQueue --> WorkerThread
+    AsyncDbmQueue --> Dbm
+    Dbm --> FilesystemDbm
+    Dbm --> GdbmDbm
+    Dbm --> LmdbDbm
+```
+
+## Async I/O Architecture
+
+All database I/O operations are asynchronous, using a queue system to handle different DBM backend capabilities.
+
+### AsyncDbmQueue
+
+The `AsyncDbmQueue` manages asynchronous execution of database operations:
+
+```vala
+namespace Implexus.Storage {
+
+/**
+ * Queue system for async DBM operations.
+ *
+ * For DBMs without concurrent read support (GDBM, Filesystem):
+ * - All operations go through a single worker thread
+ * - Read operations are prioritized over writes
+ *
+ * For DBMs with concurrent read support (LMDB):
+ * - Write operations go through the worker thread
+ * - Read operations spawn their own threads
+ */
+public class AsyncDbmQueue : GLib.Object {
+    private weak Dbm _dbm;
+    private AsyncQueue<DbmOperation> _read_queue;
+    private AsyncQueue<DbmOperation> _write_queue;
+    private Thread<void>? _worker_thread = null;
+    private bool _running = false;
+    
+    /**
+     * Creates a new AsyncDbmQueue for the given DBM.
+     *
+     * @param dbm The DBM instance to wrap
+     */
+    public AsyncDbmQueue(Dbm dbm) {
+        _dbm = dbm;
+        _read_queue = new AsyncQueue<DbmOperation>();
+        _write_queue = new AsyncQueue<DbmOperation>();
+    }
+    
+    /**
+     * Starts the worker thread.
+     * Must be called before executing any operations.
+     */
+    public void start();
+    
+    /**
+     * Stops the worker thread and waits for it to finish.
+     *
+     * @param timeout_ms Maximum time to wait for shutdown (0 = wait forever)
+     * @return true if shutdown completed, false if timed out
+     */
+    public bool shutdown(int timeout_ms = 5000);
+    
+    /**
+     * Executes a read operation asynchronously.
+     *
+     * For concurrent-read DBMs (LMDB), spawns a new thread.
+     * For single-threaded DBMs (GDBM, Filesystem), queues and prioritizes.
+     */
+    public async void execute_read_async(owned DbmOperation op) throws Error;
+    
+    /**
+     * Executes a write operation asynchronously.
+     *
+     * Write operations always go through the queue to ensure
+     * serialization and data consistency.
+     */
+    public async void execute_write_async(owned DbmOperation op) throws Error;
+    
+    /**
+     * Queues a read operation and returns the operation object.
+     */
+    public DbmOperation queue_read();
+    
+    /**
+     * Queues a write operation and returns the operation object.
+     */
+    public DbmOperation queue_write();
+}
+
+} // namespace Implexus.Storage
+```
+
+### DbmOperation
+
+Represents a single operation in the queue:
+
+```vala
+namespace Implexus.Storage {
+
+/**
+ * Type of database operation.
+ */
+public enum DbmOperationType {
+    /** Read operation - prioritized over writes */
+    READ,
+    /** Write operation - processed in order */
+    WRITE
+}
+
+/**
+ * Represents a single operation in the AsyncDbmQueue.
+ *
+ * Stores the operation type, callback for async continuation,
+ * and provides storage for the result or error.
+ */
+public class DbmOperation : GLib.Object {
+    /** The type of this operation (READ or WRITE) */
+    public DbmOperationType op_type { get; construct; }
+    
+    /** The callback to resume the async method when complete */
+    public SourceFunc callback;
+    
+    /** The result of the operation (if successful) */
+    public void* result { get; set; }
+    
+    /** The error that occurred during execution (if any) */
+    public Error? error { get; set; }
+    
+    /** Whether the operation has been completed */
+    public bool completed { get; set; default = false; }
+    
+    /**
+     * Creates a new DbmOperation.
+     *
+     * @param type The type of operation (READ or WRITE)
+     * @param cb The callback to resume the async method
+     */
+    public DbmOperation(DbmOperationType type, owned SourceFunc cb);
+}
+
+} // namespace Implexus.Storage
+```
+
+### Worker Thread Behavior
+
+The worker thread processes operations with read prioritization:
+
+1. **Read Priority**: Check read queue first, then write queue
+2. **Blocking**: If no operations available, block on read queue with timeout
+3. **Result Delivery**: Use `Idle.add()` to return results to main loop
+4. **Shutdown**: Push dummy operation to wake worker, wait for completion
+
+```vala
+private void _worker() {
+    while (_running) {
+        // Try to get a read operation first (prioritize reads)
+        DbmOperation? op = _read_queue.try_pop();
+        
+        // If no read operation, try write
+        if (op == null) {
+            op = _write_queue.try_pop();
+        }
+        
+        // If still no operation, block on read queue with timeout
+        if (op == null) {
+            op = _read_queue.timed_pop(...);
+            continue;
+        }
+        
+        // Signal completion via Idle to return to main context
+        Idle.add(() => {
+            op.callback();
+            return Source.REMOVE;
+        });
+    }
+}
+```
+
+## DBM Interface
+
+The DBM interface provides low-level key-value storage with concurrent read support indicator:
+
+```vala
+namespace Implexus.Storage {
+
+public interface Dbm : Object {
+    
+    /**
+     * Whether this DBM implementation supports concurrent read operations.
+     * 
+     * If true, read operations can spawn new threads while writes go through
+     * the async queue. This enables better read parallelism.
+     * 
+     * - LMDB: true (MVCC allows concurrent readers)
+     * - GDBM: false (single-threaded access required)
+     * - Filesystem: false (single-threaded access required)
+     */
+    public abstract bool supports_concurrent_reads { get; }
+    
+    /**
+     * Checks if a key exists in the database.
+     */
+    public abstract bool has_key(string key);
+    
+    /**
+     * Gets the value for a key.
+     */
+    public abstract Invercargill.BinaryData? @get(string key);
+    
+    /**
+     * Sets a key-value pair.
+     */
+    public abstract void @set(string key, Invercargill.BinaryData value) throws StorageError;
+    
+    /**
+     * Deletes a key from the database.
+     */
+    public abstract void delete(string key) throws StorageError;
+    
+    /**
+     * Gets all keys in the database.
+     */
+    public abstract Invercargill.Enumerable<string> keys { owned get; }
+    
+    /**
+     * Begins a new transaction.
+     */
+    public abstract void begin_transaction() throws StorageError;
+    
+    /**
+     * Commits the current transaction.
+     */
+    public abstract void commit_transaction() throws StorageError;
+    
+    /**
+     * Rolls back the current transaction.
+     */
+    public abstract void rollback_transaction();
+    
+    /**
+     * Indicates whether a transaction is currently active.
+     */
+    public abstract bool in_transaction { get; }
+}
+
+} // namespace Implexus.Storage
+```
+
+### Concurrent Read Support by Backend
+
+| Backend | supports_concurrent_reads | Notes |
+|---------|---------------------------|-------|
+| LMDB | `true` | MVCC allows multiple concurrent readers |
+| GDBM | `false` | Single-threaded access required |
+| Filesystem | `false` | In-memory dictionary, single-threaded |
+
+## Key Naming Convention
+
+Storage keys are organized by prefix:
+
+| Key Pattern | Purpose |
+|-------------|---------|
+| `entity:<path>` | Serialized entity data |
+| `children:<path>` | Set of child names for path |
+| `type:<type_label>` | Set of paths for type |
+| `meta:version` | Database version |
+| `meta:config` | Configuration data |
+
+## Binary Serialization Format
+
+### Overview
+
+The serialization format encodes Element types to binary without GLib.Object specific logic. All values are written in big-endian byte order.
+
+### Format Version Header
+
+Every serialized entity starts with a header:
+
+```
+Offset  Size  Field
+0       4     Magic: 0x49 0x4D 0x50 0x58 ("IMPX")
+4       2     Version (currently 0x0001)
+6       1     Entity type (0=Container, 1=Document, 2=Category, 3=Index)
+7       2     Flags (reserved)
+```
+
+### Element Type Codes
+
+| Code | Element Type |
+|------|-------------|
+| 0x00 | NullElement |
+| 0x01 | ValueElement (type inferred from content) |
+| 0x02 | String |
+| 0x03 | Boolean |
+| 0x04 | Int8 |
+| 0x05 | UInt8 |
+| 0x06 | Int16 |
+| 0x07 | UInt16 |
+| 0x08 | Int32 |
+| 0x09 | UInt32 |
+| 0x0A | Int64 |
+| 0x0B | UInt64 |
+| 0x0C | Float |
+| 0x0D | Double |
+| 0x0E | Element Array |
+| 0x0F | Element Dictionary |
+
+### ElementWriter
+
+```vala
+namespace Implexus.Serialization {
+
+public class ElementWriter {
+    
+    private Invercargill.DataStructures.ByteBuffer _buffer;
+    
+    public ElementWriter() {
+        _buffer = new Invercargill.DataStructures.ByteBuffer();
+    }
+    
+    public void write_null() {
+        _buffer.append_byte(0x00);
+    }
+    
+    public void write_element(Invercargill.Element? element) {
+        if (element == null || element.is_null()) {
+            write_null();
+            return;
+        }
+        
+        var value = element as Invercargill.ValueElement;
+        if (value != null) {
+            write_value((!) value);
+            return;
+        }
+        
+        // Handle other element types
+        var type = element.type();
+        if (type == typeof(string)) {
+            write_string(element.to_value<string>());
+        } else if (type == typeof(bool)) {
+            write_bool(element.to_value<bool?>());
+        } else if (type == typeof(int8)) {
+            write_int8(element.to_value<int8>());
+        } else if (type == typeof(uint8)) {
+            write_uint8(element.to_value<uint8>());
+        } else if (type == typeof(int16)) {
+            write_int16(element.to_value<int16>());
+        } else if (type == typeof(uint16)) {
+            write_uint16(element.to_value<uint16>());
+        } else if (type == typeof(int32)) {
+            write_int32(element.to_value<int32>());
+        } else if (type == typeof(uint32)) {
+            write_uint32(element.to_value<uint32>());
+        } else if (type == typeof(int64)) {
+            write_int64(element.to_value<int64>());
+        } else if (type == typeof(uint64)) {
+            write_uint64(element.to_value<uint64>());
+        } else if (type == typeof(float)) {
+            write_float(element.to_value<float>());
+        } else if (type == typeof(double)) {
+            write_double(element.to_value<double>());
+        } else {
+            // Fallback: write as string
+            write_string(element.to_string());
+        }
+    }
+    
+    public void write_value(Invercargill.ValueElement value) {
+        // ValueElement wraps GLib.Value, determine type
+        var gvalue = value.to_gvalue();
+        write_gvalue(ref gvalue);
+    }
+    
+    private void write_gvalue(ref GLib.Value gvalue) {
+        if (gvalue.type() == typeof(string)) {
+            write_string((string) gvalue);
+        } else if (gvalue.type() == typeof(bool)) {
+            write_bool((bool) gvalue);
+        } else if (gvalue.type() == typeof(int)) {
+            write_int64((int) gvalue);
+        } else if (gvalue.type() == typeof(int64)) {
+            write_int64((int64) gvalue);
+        } else if (gvalue.type() == typeof(double)) {
+            write_double((double) gvalue);
+        } else if (gvalue.type() == typeof(float)) {
+            write_float((float) gvalue);
+        } else {
+            write_string(gvalue.strdup_contents());
+        }
+    }
+    
+    public void write_string(string value) {
+        _buffer.append_byte(0x02);
+        var bytes = value.data;
+        write_length(bytes.length);
+        _buffer.append_bytes(bytes);
+    }
+    
+    public void write_bool(bool value) {
+        _buffer.append_byte(0x03);
+        _buffer.append_byte(value ? 1 : 0);
+    }
+    
+    public void write_int8(int8 value) {
+        _buffer.append_byte(0x04);
+        _buffer.append_byte((uint8) value);
+    }
+    
+    public void write_uint8(uint8 value) {
+        _buffer.append_byte(0x05);
+        _buffer.append_byte(value);
+    }
+    
+    public void write_int16(int16 value) {
+        _buffer.append_byte(0x06);
+        _buffer.append_int16_be(value);
+    }
+    
+    public void write_uint16(uint16 value) {
+        _buffer.append_byte(0x07);
+        _buffer.append_uint16_be(value);
+    }
+    
+    public void write_int32(int32 value) {
+        _buffer.append_byte(0x08);
+        _buffer.append_int32_be(value);
+    }
+    
+    public void write_uint32(uint32 value) {
+        _buffer.append_byte(0x09);
+        _buffer.append_uint32_be(value);
+    }
+    
+    public void write_int64(int64 value) {
+        _buffer.append_byte(0x0A);
+        _buffer.append_int64_be(value);
+    }
+    
+    public void write_uint64(uint64 value) {
+        _buffer.append_byte(0x0B);
+        _buffer.append_uint64_be(value);
+    }
+    
+    public void write_float(float value) {
+        _buffer.append_byte(0x0C);
+        _buffer.append_float_be(value);
+    }
+    
+    public void write_double(double value) {
+        _buffer.append_byte(0x0D);
+        _buffer.append_double_be(value);
+    }
+    
+    public void write_length(int64 length) {
+        // Variable-length encoding for lengths
+        if (length < 0x80) {
+            _buffer.append_byte((uint8) length);
+        } else if (length < 0x4000) {
+            _buffer.append_byte((uint8) ((length >> 8) | 0x80));
+            _buffer.append_byte((uint8) (length & 0xFF));
+        } else {
+            _buffer.append_byte(0xFF);
+            _buffer.append_int64_be(length);
+        }
+    }
+    
+    public void write_array(Invercargill.ReadOnlyCollection<Invercargill.Element> array) {
+        _buffer.append_byte(0x0E);
+        write_length(array.count);
+        foreach (var element in array) {
+            write_element(element);
+        }
+    }
+    
+    public void write_dictionary(Invercargill.ReadOnlyAssociative<string, Invercargill.Element> dict) {
+        _buffer.append_byte(0x0F);
+        write_length(dict.count);
+        foreach (var entry in dict.entries) {
+            write_string(entry.key);
+            write_element(entry.value);
+        }
+    }
+    
+    public uint8[] to_bytes() {
+        return _buffer.to_bytes();
+    }
+    
+    public void reset() {
+        _buffer.clear();
+    }
+}
+
+} // namespace Implexus.Serialization
+```
+
+### ElementReader
+
+```vala
+namespace Implexus.Serialization {
+
+public class ElementReader {
+    
+    private uint8[] _data;
+    private int _position;
+    
+    public ElementReader(uint8[] data) {
+        _data = data;
+        _position = 0;
+    }
+    
+    public Invercargill.Element? read_element() throws SerializationError {
+        if (_position >= _data.length) {
+            throw new SerializationError.UNEXPECTED_END("Unexpected end of data");
+        }
+        
+        var type_code = _data[_position++];
+        
+        switch (type_code) {
+            case 0x00: return new Invercargill.NullElement();
+            case 0x02: return new Invercargill.ValueElement(read_string());
+            case 0x03: return new Invercargill.ValueElement(read_bool());
+            case 0x04: return new Invercargill.ValueElement(read_int8());
+            case 0x05: return new Invercargill.ValueElement(read_uint8());
+            case 0x06: return new Invercargill.ValueElement(read_int16());
+            case 0x07: return new Invercargill.ValueElement(read_uint16());
+            case 0x08: return new Invercargill.ValueElement(read_int32());
+            case 0x09: return new Invercargill.ValueElement(read_uint32());
+            case 0x0A: return new Invercargill.ValueElement(read_int64());
+            case 0x0B: return new Invercargill.ValueElement(read_uint64());
+            case 0x0C: return new Invercargill.ValueElement(read_float());
+            case 0x0D: return new Invercargill.ValueElement(read_double());
+            case 0x0E: return read_array();
+            case 0x0F: return read_dictionary();
+            default:
+                throw new SerializationError.UNKNOWN_TYPE("Unknown type code: 0x%02X", type_code);
+        }
+    }
+    
+    private string read_string() {
+        var length = read_length();
+        var bytes = new uint8[length];
+        Memory.copy(bytes, &_data[_position], length);
+        _position += length;
+        return (string) bytes;
+    }
+    
+    private bool read_bool() {
+        return _data[_position++] != 0;
+    }
+    
+    private int8 read_int8() {
+        return (int8) _data[_position++];
+    }
+    
+    private uint8 read_uint8() {
+        return _data[_position++];
+    }
+    
+    private int16 read_int16() {
+        var value = (int16) (_data[_position] << 8 | _data[_position + 1]);
+        _position += 2;
+        return value;
+    }
+    
+    private uint16 read_uint16() {
+        var value = (uint16) (_data[_position] << 8 | _data[_position + 1]);
+        _position += 2;
+        return value;
+    }
+    
+    private int32 read_int32() {
+        var value = (int32) (
+            _data[_position] << 24 |
+            _data[_position + 1] << 16 |
+            _data[_position + 2] << 8 |
+            _data[_position + 3]
+        );
+        _position += 4;
+        return value;
+    }
+    
+    private uint32 read_uint32() {
+        var value = (uint32) (
+            _data[_position] << 24 |
+            _data[_position + 1] << 16 |
+            _data[_position + 2] << 8 |
+            _data[_position + 3]
+        );
+        _position += 4;
+        return value;
+    }
+    
+    private int64 read_int64() {
+        var value = (int64) (
+            (int64) _data[_position] << 56 |
+            (int64) _data[_position + 1] << 48 |
+            (int64) _data[_position + 2] << 40 |
+            (int64) _data[_position + 3] << 32 |
+            (int64) _data[_position + 4] << 24 |
+            (int64) _data[_position + 5] << 16 |
+            (int64) _data[_position + 6] << 8 |
+            (int64) _data[_position + 7]
+        );
+        _position += 8;
+        return value;
+    }
+    
+    private uint64 read_uint64() {
+        var value = (uint64) (
+            (uint64) _data[_position] << 56 |
+            (uint64) _data[_position + 1] << 48 |
+            (uint64) _data[_position + 2] << 40 |
+            (uint64) _data[_position + 3] << 32 |
+            (uint64) _data[_position + 4] << 24 |
+            (uint64) _data[_position + 5] << 16 |
+            (uint64) _data[_position + 6] << 8 |
+            (uint64) _data[_position + 7]
+        );
+        _position += 8;
+        return value;
+    }
+    
+    private float read_float() {
+        var bytes = new uint8[4];
+        Memory.copy(bytes, &_data[_position], 4);
+        _position += 4;
+        return *((float*) bytes);
+    }
+    
+    private double read_double() {
+        var bytes = new uint8[8];
+        Memory.copy(bytes, &_data[_position], 8);
+        _position += 8;
+        return *((double*) bytes);
+    }
+    
+    private int64 read_length() {
+        var first = _data[_position++];
+        if (first < 0x80) {
+            return first;
+        } else if (first < 0xFF) {
+            var second = _data[_position++];
+            return ((first & 0x7F) << 8) | second;
+        } else {
+            return read_int64();
+        }
+    }
+    
+    private Invercargill.Element read_array() throws SerializationError {
+        var length = read_length();
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        for (int64 i = 0; i < length; i++) {
+            array.add(read_element());
+        }
+        return new Invercargill.NativeElement(array);
+    }
+    
+    private Invercargill.Element read_dictionary() throws SerializationError {
+        var length = read_length();
+        var dict = new Invercargill.DataStructures.Dictionary<string, Invercargill.Element>();
+        for (int64 i = 0; i < length; i++) {
+            var key = read_string();
+            var value = read_element();
+            dict.set(key, value);
+        }
+        return new Invercargill.NativeElement(dict);
+    }
+}
+
+} // namespace Implexus.Serialization
+```
+
+## Entity Serialization
+
+### EntitySerializer
+
+```vala
+namespace Implexus.Serialization {
+
+public class EntitySerializer {
+    
+    public uint8[] serialize(Entity entity) throws SerializationError {
+        var writer = new ElementWriter();
+        
+        // Write header
+        write_header(writer, entity);
+        
+        // Write entity-specific data
+        switch (entity.entity_type) {
+            case EntityType.CONTAINER:
+                write_container(writer, (Container) entity);
+                break;
+            case EntityType.DOCUMENT:
+                write_document(writer, (Document) entity);
+                break;
+            case EntityType.CATEGORY:
+                write_category(writer, (Category) entity);
+                break;
+            case EntityType.INDEX:
+                write_index(writer, (Index) entity);
+                break;
+        }
+        
+        return writer.to_bytes();
+    }
+    
+    private void write_header(ElementWriter writer, Entity entity) {
+        // Magic
+        writer._buffer.append_byte(0x49); // 'I'
+        writer._buffer.append_byte(0x4D); // 'M'
+        writer._buffer.append_byte(0x50); // 'P'
+        writer._buffer.append_byte(0x58); // 'X'
+        
+        // Version
+        writer._buffer.append_uint16_be(1);
+        
+        // Entity type
+        writer._buffer.append_byte((uint8) entity.entity_type);
+        
+        // Flags (reserved)
+        writer._buffer.append_uint16_be(0);
+    }
+    
+    private void write_container(ElementWriter writer, Container container) {
+        // Categories only need path - children are tracked separately
+        writer.write_string(container.path.to_string());
+    }
+    
+    private void write_document(ElementWriter writer, Document document) {
+        // Write path
+        writer.write_string(document.path.to_string());
+        
+        // Write type label
+        writer.write_string(document.type_label);
+        
+        // Write properties
+        var props = document.properties;
+        var prop_dict = new Invercargill.DataStructures.Dictionary<string, Invercargill.Element>();
+        foreach (var key in props.keys) {
+            prop_dict.set(key, props.get(key));
+        }
+        writer.write_dictionary(prop_dict);
+    }
+    
+    private void write_category(ElementWriter writer, Category category) {
+        // Write path
+        writer.write_string(category.path.to_string());
+        
+        // Write type label
+        writer.write_string(category.configured_type_label);
+        
+        // Write expression
+        writer.write_string(category.configured_expression);
+    }
+    
+    private void write_index(ElementWriter writer, Index index) {
+        // Write path
+        writer.write_string(index.path.to_string());
+        
+        // Write type label
+        writer.write_string(index.configured_type_label);
+        
+        // Write expression
+        writer.write_string(index.configured_expression);
+    }
+}
+
+} // namespace Implexus.Serialization
+```
+
+### EntityDeserializer
+
+```vala
+namespace Implexus.Serialization {
+
+public class EntityDeserializer {
+    
+    public Entity deserialize(uint8[] data, Engine engine, Path path) throws SerializationError {
+        var reader = new ElementReader(data);
+        
+        // Read and validate header
+        var header = read_header(reader);
+        validate_header(header);
+        
+        // Read entity-specific data
+        switch (header.entity_type) {
+            case EntityType.CONTAINER:
+                return read_container(reader, engine, path);
+            case EntityType.DOCUMENT:
+                return read_document(reader, engine, path);
+            case EntityType.CATEGORY:
+                return read_category(reader, engine, path);
+            case EntityType.INDEX:
+                return read_index(reader, engine, path);
+            default:
+                throw new SerializationError.UNKNOWN_TYPE(
+                    "Unknown entity type: %d", (int) header.entity_type
+                );
+        }
+    }
+    
+    private Header read_header(ElementReader reader) throws SerializationError {
+        var header = new Header();
+        
+        // Magic
+        header.magic[0] = reader._data[reader._position++];
+        header.magic[1] = reader._data[reader._position++];
+        header.magic[2] = reader._data[reader._position++];
+        header.magic[3] = reader._data[reader._position++];
+        
+        // Version
+        header.version = reader.read_uint16();
+        
+        // Entity type
+        header.entity_type = (EntityType) reader._data[reader._position++];
+        
+        // Flags
+        header.flags = reader.read_uint16();
+        
+        return header;
+    }
+    
+    private void validate_header(Header header) throws SerializationError {
+        if (header.magic[0] != 'I' || header.magic[1] != 'M' || 
+            header.magic[2] != 'P' || header.magic[3] != 'X') {
+            throw new SerializationError.INVALID_FORMAT("Invalid magic number");
+        }
+        if (header.version != 1) {
+            throw new SerializationError.UNSUPPORTED_VERSION(
+                "Unsupported version: %d", header.version
+            );
+        }
+    }
+    
+    private Container read_container(ElementReader reader, Engine engine, Path path) {
+        // Path is stored but we already have it
+        var stored_path = reader.read_string();
+        return new Container(engine, path);
+    }
+    
+    private Document read_document(ElementReader reader, Engine engine, Path path) {
+        // Path
+        var stored_path = reader.read_string();
+        
+        // Type label
+        var type_label = reader.read_string();
+        
+        // Create document
+        var doc = new Document(engine, path, type_label);
+        
+        // Properties
+        var props_dict = reader.read_dictionary();
+        var dict = props_dict as Invercargill.DataStructures.Dictionary<string, Invercargill.Element>;
+        if (dict != null) {
+            foreach (var entry in dict.entries) {
+                doc.set_property(entry.key, entry.value);
+            }
+        }
+        
+        return doc;
+    }
+    
+    private Category read_category(ElementReader reader, Engine engine, Path path) {
+        // Path
+        var stored_path = reader.read_string();
+        
+        // Type label
+        var type_label = reader.read_string();
+        
+        // Expression
+        var expression = reader.read_string();
+        
+        return new Category(engine, path, type_label, expression);
+    }
+    
+    private Index read_index(ElementReader reader, Engine engine, Path path) {
+        // Path
+        var stored_path = reader.read_string();
+        
+        // Type label
+        var type_label = reader.read_string();
+        
+        // Expression
+        var expression = reader.read_string();
+        
+        return new Index(engine, path, type_label, expression);
+    }
+}
+
+private class Header {
+    public uint8[] magic = new uint8[4];
+    public uint16 version;
+    public EntityType entity_type;
+    public uint16 flags;
+}
+
+} // namespace Implexus.Serialization
+```
+
+## SerializationError
+
+```vala
+namespace Implexus.Serialization {
+
+public errordomain SerializationError {
+    INVALID_FORMAT,
+    UNSUPPORTED_VERSION,
+    UNKNOWN_TYPE,
+    UNEXPECTED_END,
+    CORRUPT_DATA;
+}
+
+} // namespace Implexus.Serialization
+```
+
+## Storage Configuration
+
+```vala
+namespace Implexus.Storage {
+
+public class StorageConfiguration : Object {
+    
+    private Storage _storage;
+    
+    public StorageConfiguration(Storage storage) {
+        _storage = storage;
+    }
+    
+    public Storage storage { get { return _storage; } }
+    
+    // Configuration options
+    public bool auto_sync { get; set; default = true; }
+    public int cache_size { get; set; default = 1000; }
+    public bool enable_compression { get; set; default = false; }
+}
+
+} // namespace Implexus.Storage
+```
+
+## Hooks
+
+Hooks remain synchronous and run in the DBM worker thread context. This allows hooks to perform additional database operations without thread-safety concerns.
+
+```vala
+// Hooks run synchronously in the DBM worker thread
+// They can perform additional DB operations directly
+public delegate void EntityHook(Entity entity);
+
+// Example hook registration
+engine.hook_manager.register_create_hook("Task", (entity) => {
+    // This runs in the DBM worker thread
+    // Can perform synchronous DB operations
+    update_indexes(entity);
+});
+```
+
+**Important**: Since hooks run in the worker thread, they should not call async methods or block indefinitely. Long-running operations should be offloaded to separate threads.

+ 418 - 0
Architecture/08-Set-Operations.md

@@ -0,0 +1,418 @@
+# Set Operations
+
+This document describes the API for set operations over entity children sets.
+
+## Overview
+
+Implexus provides set operations over entity children, following the `Invercargill.Set` interface pattern. Operations include union, intersection, difference, and symmetric difference.
+
+## EntitySet Class
+
+The `EntitySet` class wraps an entity and provides set operations over its children.
+
+```vala
+namespace Implexus.Entities {
+
+public class EntitySet : Object, Invercargill.ReadOnlySet<Entity> {
+    
+    private Entity _entity;
+    private Invercargill.DataStructures.HashSet<Entity>? _cached_children;
+    
+    public EntitySet(Entity entity) {
+        _entity = entity;
+        _cached_children = null;
+    }
+    
+    // Lazy-load and cache children
+    private Invercargill.DataStructures.HashSet<Entity> get_children() {
+        if (_cached_children == null) {
+            _cached_children = new Invercargill.DataStructures.HashSet<Entity>();
+            foreach (var child in _entity.get_children()) {
+                _cached_children.add(child);
+            }
+        }
+        return (!) _cached_children;
+    }
+    
+    // ReadOnlySet implementation
+    public int count { get { return (int) get_children().count; } }
+    
+    public bool is_empty { get { return count == 0; } }
+    
+    public bool has(Entity item) {
+        return get_children().has(item);
+    }
+    
+    public bool has_all(Invercargill.ReadOnlyCollection<Entity> items) {
+        foreach (var item in items) {
+            if (!has(item)) return false;
+        }
+        return true;
+    }
+    
+    public bool has_any(Invercargill.ReadOnlyCollection<Entity> items) {
+        foreach (var item in items) {
+            if (has(item)) return true;
+        }
+        return false;
+    }
+    
+    public Entity? find(Entity item) {
+        if (has(item)) return item;
+        return null;
+    }
+    
+    public bool try_find(Entity item, out Entity result) {
+        result = item;
+        return has(item);
+    }
+    
+    public bool equals(Invercargill.ReadOnlySet<Entity> other) {
+        if (count != other.count) return false;
+        foreach (var item in get_children()) {
+            if (!other.has(item)) return false;
+        }
+        return true;
+    }
+    
+    public bool is_subset_of(Invercargill.ReadOnlySet<Entity> other) {
+        foreach (var item in get_children()) {
+            if (!other.has(item)) return false;
+        }
+        return true;
+    }
+    
+    public bool is_superset_of(Invercargill.ReadOnlySet<Entity> other) {
+        return other.is_subset_of(this);
+    }
+    
+    public bool is_proper_subset_of(Invercargill.ReadOnlySet<Entity> other) {
+        return count < other.count && is_subset_of(other);
+    }
+    
+    public bool is_proper_superset_of(Invercargill.ReadOnlySet<Entity> other) {
+        return count > other.count && is_superset_of(other);
+    }
+    
+    public bool overlaps(Invercargill.ReadOnlySet<Entity> other) {
+        foreach (var item in get_children()) {
+            if (other.has(item)) return true;
+        }
+        return false;
+    }
+    
+    public Invercargill.Enumerable<Entity> as_enumerable() {
+        return get_children().as_enumerable();
+    }
+    
+    public Invercargill.Lot<Entity> as_lot() {
+        return get_children().as_lot();
+    }
+    
+    // Set operations - return new EntitySet
+    public EntitySet union(EntitySet other) {
+        var result = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var item in get_children()) {
+            result.add(item);
+        }
+        foreach (var item in other.get_children()) {
+            result.add(item);
+        }
+        return new EntitySet.from_set(result);
+    }
+    
+    public EntitySet intersect(EntitySet other) {
+        var result = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var item in get_children()) {
+            if (other.has(item)) {
+                result.add(item);
+            }
+        }
+        return new EntitySet.from_set(result);
+    }
+    
+    public EntitySet except(EntitySet other) {
+        var result = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var item in get_children()) {
+            if (!other.has(item)) {
+                result.add(item);
+            }
+        }
+        return new EntitySet.from_set(result);
+    }
+    
+    public EntitySet symmetric_except(EntitySet other) {
+        var result = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var item in get_children()) {
+            if (!other.has(item)) {
+                result.add(item);
+            }
+        }
+        foreach (var item in other.get_children()) {
+            if (!has(item)) {
+                result.add(item);
+            }
+        }
+        return new EntitySet.from_set(result);
+    }
+    
+    // Factory from pre-built set
+    public EntitySet.from_set(Invercargill.DataStructures.HashSet<Entity> set) {
+        _entity = null;
+        _cached_children = set;
+    }
+}
+
+} // namespace Implexus.Entities
+```
+
+## Set Operation Methods
+
+The Entity interface provides convenient access to set operations:
+
+```vala
+public interface Entity : Object {
+    // ...
+    
+    // Get this entity's children as a set
+    public abstract EntitySet as_set();
+    
+    // Direct set operations
+    public EntitySet union_with(Entity other) {
+        return as_set().union(other.as_set());
+    }
+    
+    public EntitySet intersect_with(Entity other) {
+        return as_set().intersect(other.as_set());
+    }
+    
+    public EntitySet except_with(Entity other) {
+        return as_set().except(other.as_set());
+    }
+    
+    public EntitySet symmetric_except_with(Entity other) {
+        return as_set().symmetric_except(other.as_set());
+    }
+}
+```
+
+## Set Operation Diagram
+
+```mermaid
+graph LR
+    subgraph Set A - Container A children
+        A1[Doc 1]
+        A2[Doc 2]
+        A3[Doc 3]
+    end
+    
+    subgraph Set B - Container B children
+        B2[Doc 2]
+        B3[Doc 3]
+        B4[Doc 4]
+    end
+    
+    subgraph Union
+        U1[Doc 1]
+        U2[Doc 2]
+        U3[Doc 3]
+        U4[Doc 4]
+    end
+    
+    subgraph Intersection
+        I2[Doc 2]
+        I3[Doc 3]
+    end
+    
+    subgraph A except B
+        E1[Doc 1]
+    end
+    
+    subgraph Symmetric Diff
+        S1[Doc 1]
+        S4[Doc 4]
+    end
+```
+
+## Usage Examples
+
+### Basic Set Operations
+
+```vala
+// Create categories with some overlapping children
+var container_a = engine.get_root().create_container("set_a");
+container_a.create_document("doc1", "Item");
+container_a.create_document("doc2", "Item");
+container_a.create_document("doc3", "Item");
+
+var container_b = engine.get_root().create_container("set_b");
+container_b.create_document("doc2", "Item");  // Overlap
+container_b.create_document("doc3", "Item");  // Overlap
+container_b.create_document("doc4", "Item");
+
+// Union - all documents from both categories
+var union = container_a.union_with(container_b);
+print("Union count: %d\n", union.count);  // 4
+
+// Intersection - documents in both categories
+var intersection = container_a.intersect_with(container_b);
+print("Intersection count: %d\n", intersection.count);  // 2
+
+// Difference - documents in A but not in B
+var difference = container_a.except_with(container_b);
+print("Difference count: %d\n", difference.count);  // 1 (doc1)
+
+// Symmetric difference - documents in A or B but not both
+var sym_diff = container_a.symmetric_except_with(container_b);
+print("Symmetric diff count: %d\n", sym_diff.count);  // 2 (doc1, doc4)
+```
+
+### Combining with Categorys
+
+```vala
+// Create tasks with different statuses
+var tasks = engine.get_root().create_container("tasks");
+var task1 = tasks.create_document("task1", "Task");
+task1.set_property("status", new ValueElement("open"));
+task1.set_property("priority", new ValueElement("high"));
+
+var task2 = tasks.create_document("task2", "Task");
+task2.set_property("status", new ValueElement("open"));
+task2.set_property("priority", new ValueElement("low"));
+
+var task3 = tasks.create_document("task3", "Task");
+task3.set_property("status", new ValueElement("closed"));
+task3.set_property("priority", new ValueElement("high"));
+
+// Create categorys for different groupings
+var by_status = tasks.create_category("by_status", "Task", "status");
+var by_priority = tasks.create_category("by_priority", "Task", "priority");
+
+// Get high priority open tasks using intersection
+var open_tasks = by_status.get_child("open");
+var high_priority = by_priority.get_child("high");
+var high_priority_open = open_tasks.intersect_with(high_priority);
+
+print("High priority open tasks: %d\n", high_priority_open.count);  // 1 (task1)
+```
+
+### Set Membership Tests
+
+```vala
+var container = engine.get_root().create_container("test");
+var doc1 = container.create_document("doc1", "Item");
+var doc2 = container.create_document("doc2", "Item");
+
+var set = container.as_set();
+
+// Check membership
+assert(set.has(doc1));
+assert(set.has(doc2));
+
+// Check multiple
+var check = new Invercargill.DataStructures.Vector<Entity>();
+check.add(doc1);
+check.add(doc2);
+assert(set.has_all(check));
+
+// Subset tests
+var other_container = engine.get_root().create_container("other");
+other_container.create_document("doc1", "Item");
+
+assert(set.is_superset_of(other_container.as_set()));
+assert(other_container.as_set().is_subset_of(set));
+```
+
+### Chaining Operations
+
+```vala
+// Multiple set operations can be chained
+var result = container_a
+    .union_with(container_b)
+    .intersect_with(container_c)
+    .except_with(container_d);
+
+// Iterate results
+foreach (var entity in result.as_enumerable()) {
+    print("Result: %s\n", entity.name);
+}
+```
+
+## Set Operations with Categorys
+
+Categorys are particularly useful with set operations:
+
+```vala
+// Find tasks that are both high priority AND assigned to a specific user
+var by_priority = tasks.create_category("by_priority", "Task", "priority");
+var by_assignee = tasks.create_category("by_assignee", "Task", "assignee");
+
+var high_priority = by_priority.get_child("high");
+var assigned_john = by_assignee.get_child("john");
+
+var johns_high_priority = high_priority.intersect_with(assigned_john);
+
+// Find tasks that are high priority OR urgent
+var urgent = by_priority.get_child("urgent");
+var high_or_urgent = high_priority.union_with(urgent);
+
+// Find tasks that are high priority but NOT assigned to john
+var high_not_john = high_priority.except_with(assigned_john);
+```
+
+## Performance Considerations
+
+### Caching
+
+EntitySet caches children on first access:
+
+```vala
+public class EntitySet {
+    private Invercargill.DataStructures.HashSet<Entity>? _cached_children;
+    
+    // Children loaded once, then cached
+    private Invercargill.DataStructures.HashSet<Entity> get_children() {
+        if (_cached_children == null) {
+            _cached_children = new Invercargill.DataStructures.HashSet<Entity>();
+            foreach (var child in _entity.get_children()) {
+                _cached_children.add(child);
+            }
+        }
+        return (!) _cached_children;
+    }
+}
+```
+
+### Large Sets
+
+For large result sets, use `Enumerable` directly instead of materializing:
+
+```vala
+// Instead of:
+var set = container.as_set();  // Materializes all children
+
+// Use streaming:
+var filtered = container.get_children()
+    .where(entity => entity.type_label == "Task")
+    .select(entity => entity as Document);
+```
+
+## Set Operation Summary
+
+| Operation | Symbol | Result |
+|-----------|--------|--------|
+| Union | A ∪ B | All elements in A or B |
+| Intersection | A ∩ B | Elements in both A and B |
+| Difference | A \ B | Elements in A but not B |
+| Symmetric Difference | A △ B | Elements in A or B but not both |
+
+| Method | Returns |
+|--------|---------|
+| `union_with(other)` | EntitySet |
+| `intersect_with(other)` | EntitySet |
+| `except_with(other)` | EntitySet |
+| `symmetric_except_with(other)` | EntitySet |
+| `as_set()` | EntitySet |
+| `is_subset_of(other)` | bool |
+| `is_superset_of(other)` | bool |
+| `overlaps(other)` | bool |

+ 702 - 0
Architecture/09-Client-Server-Protocol.md

@@ -0,0 +1,702 @@
+# Client/Server Protocol
+
+This document describes the TCP protocol design for remote mode operation.
+
+## Protocol Overview
+
+The Implexus client/server protocol uses a simple binary message format over TCP. Each message consists of a header and payload.
+
+```mermaid
+sequenceDiagram
+    participant Client
+    participant Server
+    
+    Client->>Server: Connect
+    Server-->>Client: Welcome Message
+    
+    loop Operations
+        Client->>Server: Request
+        Server-->>Client: Response
+    end
+    
+    Client->>Server: Disconnect
+    Server-->>Client: Goodbye
+```
+
+## Message Format
+
+### Header
+
+All messages start with a common header:
+
+```
+Offset  Size  Field
+0       4     Magic: 0x49 0x4D 0x50 0x58 ("IMPX")
+4       1     Message type
+5       4     Payload length (big-endian)
+9       2     Request ID (for request/response matching)
+11      ...   Payload
+```
+
+### Message Types
+
+| Code | Type | Direction |
+|------|------|-----------|
+| 0x00 | WELCOME | Server → Client |
+| 0x01 | GOODBYE | Server → Client |
+| 0x10 | GET_ENTITY | Client → Server |
+| 0x11 | ENTITY_RESPONSE | Server → Client |
+| 0x12 | ENTITY_NOT_FOUND | Server → Client |
+| 0x13 | ENTITY_EXISTS | Client → Server |
+| 0x14 | BOOLEAN_RESPONSE | Server → Client |
+| 0x20 | CREATE_CONTAINER | Client → Server |
+| 0x21 | CREATE_DOCUMENT | Client → Server |
+| 0x22 | CREATE_CATEGORY | Client → Server |
+| 0x23 | CREATE_INDEX | Client → Server |
+| 0x30 | SET_PROPERTY | Client → Server |
+| 0x31 | GET_PROPERTY | Client → Server |
+| 0x32 | REMOVE_PROPERTY | Client → Server |
+| 0x40 | DELETE_ENTITY | Client → Server |
+| 0x41 | GET_CHILDREN | Client → Server |
+| 0x42 | GET_CHILD_NAMES | Client → Server |
+| 0x50 | QUERY_BY_TYPE | Client → Server |
+| 0x51 | QUERY_BY_EXPRESSION | Client → Server |
+| 0x60 | BEGIN_TRANSACTION | Client → Server |
+| 0x61 | COMMIT_TRANSACTION | Client → Server |
+| 0x62 | ROLLBACK_TRANSACTION | Client → Server |
+| 0x70 | ERROR | Server → Client |
+| 0x7F | SUCCESS | Server → Client |
+
+## Protocol Classes
+
+### Message Base
+
+```vala
+namespace Implexus.Protocol {
+
+public interface Message : Object {
+    public abstract uint8 message_type { get; }
+    public abstract uint8[] serialize();
+    public abstract void deserialize(uint8[] data) throws ProtocolError;
+}
+
+public class MessageHeader {
+    public uint8[] magic { get; set; }
+    public uint8 message_type { get; set; }
+    public uint32 payload_length { get; set; }
+    public uint16 request_id { get; set; }
+    
+    public static const int SIZE = 11;
+    
+    public MessageHeader() {
+        magic = new uint8[] { 0x49, 0x4D, 0x50, 0x58 };
+    }
+    
+    public uint8[] serialize() {
+        var data = new uint8[SIZE];
+        data[0] = magic[0];
+        data[1] = magic[1];
+        data[2] = magic[2];
+        data[3] = magic[3];
+        data[4] = message_type;
+        data[5] = (uint8) (payload_length >> 24);
+        data[6] = (uint8) (payload_length >> 16);
+        data[7] = (uint8) (payload_length >> 8);
+        data[8] = (uint8) payload_length;
+        data[9] = (uint8) (request_id >> 8);
+        data[10] = (uint8) request_id;
+        return data;
+    }
+    
+    public static MessageHeader deserialize(uint8[] data) throws ProtocolError {
+        if (data.length < SIZE) {
+            throw new ProtocolError.INVALID_MESSAGE("Header too short");
+        }
+        
+        var header = new MessageHeader();
+        header.magic = data[0:4];
+        
+        if (header.magic[0] != 'I' || header.magic[1] != 'M' || 
+            header.magic[2] != 'P' || header.magic[3] != 'X') {
+            throw new ProtocolError.INVALID_MESSAGE("Invalid magic");
+        }
+        
+        header.message_type = data[4];
+        header.payload_length = 
+            ((uint32) data[5] << 24) |
+            ((uint32) data[6] << 16) |
+            ((uint32) data[7] << 8) |
+            ((uint32) data[8]);
+        header.request_id = (uint16) ((data[9] << 8) | data[10]);
+        
+        return header;
+    }
+}
+
+} // namespace Implexus.Protocol
+```
+
+### Request Interface
+
+```vala
+namespace Implexus.Protocol {
+
+public interface Request : Object, Message {
+    public abstract uint16 request_id { get; set; }
+}
+
+} // namespace Implexus.Protocol
+```
+
+### Response Interface
+
+```vala
+namespace Implexus.Protocol {
+
+public interface Response : Object, Message {
+    public abstract uint16 request_id { get; set; }
+    public abstract bool is_success { get; }
+}
+
+} // namespace Implexus.Protocol
+```
+
+## Request/Response Classes
+
+### GetEntityRequest
+
+```vala
+namespace Implexus.Protocol {
+
+public class GetEntityRequest : Object, Request {
+    
+    private uint16 _request_id;
+    private Path _path;
+    
+    public uint8 message_type { get { return 0x10; } }
+    
+    public uint16 request_id {
+        get { return _request_id; }
+        set { _request_id = value; }
+    }
+    
+    public Path path {
+        get { return _path; }
+        set { _path = value; }
+    }
+    
+    public GetEntityRequest() {
+        _path = new Path.root();
+    }
+    
+    public GetEntityRequest.for_path(Path path) {
+        _path = path;
+    }
+    
+    public uint8[] serialize() {
+        var writer = new ElementWriter();
+        writer.write_string(_path.to_string());
+        return writer.to_bytes();
+    }
+    
+    public void deserialize(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new ElementReader(data);
+            var path_str = reader.read_string();
+            _path = new Path(path_str);
+        } catch (SerializationError e) {
+            throw new ProtocolError.INVALID_MESSAGE("Failed to deserialize: %s", e.message);
+        }
+    }
+}
+
+} // namespace Implexus.Protocol
+```
+
+### EntityResponse
+
+```vala
+namespace Implexus.Protocol {
+
+public class EntityResponse : Object, Response {
+    
+    private uint16 _request_id;
+    private EntityData _entity_data;
+    
+    public uint8 message_type { get { return 0x11; } }
+    
+    public uint16 request_id {
+        get { return _request_id; }
+        set { _request_id = value; }
+    }
+    
+    public bool is_success { get { return true; } }
+    
+    public EntityData entity_data {
+        get { return _entity_data; }
+        set { _entity_data = value; }
+    }
+    
+    public uint8[] serialize() {
+        var writer = new ElementWriter();
+        
+        // Entity type
+        writer.write_uint8((uint8) _entity_data.entity_type);
+        
+        // Path
+        writer.write_string(_entity_data.path.to_string());
+        
+        // Type label (for documents)
+        writer.write_string(_entity_data.type_label ?? "");
+        
+        // Expression (for category/index)
+        writer.write_string(_entity_data.expression ?? "");
+        
+        // Properties (for documents)
+        if (_entity_data.properties != null) {
+            writer.write_dictionary(_entity_data.properties);
+        } else {
+            writer.write_null();
+        }
+        
+        return writer.to_bytes();
+    }
+    
+    public void deserialize(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new ElementReader(data);
+            
+            var entity_type = (EntityType) reader.read_uint8();
+            var path = new Path(reader.read_string());
+            var type_label = reader.read_string();
+            var expression = reader.read_string();
+            
+            _entity_data = new EntityData();
+            _entity_data.entity_type = entity_type;
+            _entity_data.path = path;
+            _entity_data.type_label = type_label.length > 0 ? type_label : null;
+            _entity_data.expression = expression.length > 0 ? expression : null;
+            
+            // Properties
+            var props_element = reader.read_element();
+            if (props_element != null && !props_element.is_null()) {
+                // Convert to dictionary
+            }
+        } catch (SerializationError e) {
+            throw new ProtocolError.INVALID_MESSAGE("Failed to deserialize: %s", e.message);
+        }
+    }
+}
+
+public class EntityData {
+    public EntityType entity_type;
+    public Path path;
+    public string? type_label;
+    public string? expression;
+    public Invercargill.DataStructures.Dictionary<string, Invercargill.Element>? properties;
+}
+
+} // namespace Implexus.Protocol
+```
+
+### ErrorResponse
+
+```vala
+namespace Implexus.Protocol {
+
+public class ErrorResponse : Object, Response {
+    
+    private uint16 _request_id;
+    private EngineError _error;
+    
+    public uint8 message_type { get { return 0x70; } }
+    
+    public uint16 request_id {
+        get { return _request_id; }
+        set { _request_id = value; }
+    }
+    
+    public bool is_success { get { return false; } }
+    
+    public EngineError error {
+        get { return _error; }
+        set { _error = value; }
+    }
+    
+    public uint8[] serialize() {
+        var writer = new ElementWriter();
+        
+        // Error code
+        writer.write_uint8((uint8) _error.code);
+        
+        // Message
+        writer.write_string(_error.message);
+        
+        return writer.to_bytes();
+    }
+    
+    public void deserialize(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new ElementReader(data);
+            
+            var code = (EngineError.Code) reader.read_uint8();
+            var message = reader.read_string();
+            
+            _error = new EngineError(code, message);
+        } catch (SerializationError e) {
+            throw new ProtocolError.INVALID_MESSAGE("Failed to deserialize: %s", e.message);
+        }
+    }
+}
+
+} // namespace Implexus.Protocol
+```
+
+## MessageReader
+
+```vala
+namespace Implexus.Protocol {
+
+public class MessageReader {
+    
+    private InputStream _stream;
+    private uint8[] _header_buffer;
+    
+    public MessageReader(InputStream stream) {
+        _stream = stream;
+        _header_buffer = new uint8[MessageHeader.SIZE];
+    }
+    
+    public Message? read_message() throws ProtocolError {
+        try {
+            // Read header
+            size_t bytes_read;
+            _stream.read_all(_header_buffer, out bytes_read);
+            
+            if (bytes_read == 0) {
+                return null; // Connection closed
+            }
+            
+            if (bytes_read < MessageHeader.SIZE) {
+                throw new ProtocolError.INVALID_MESSAGE("Incomplete header");
+            }
+            
+            var header = MessageHeader.deserialize(_header_buffer);
+            
+            // Read payload
+            var payload = new uint8[header.payload_length];
+            if (header.payload_length > 0) {
+                _stream.read_all(payload, out bytes_read);
+                if (bytes_read < header.payload_length) {
+                    throw new ProtocolError.INVALID_MESSAGE("Incomplete payload");
+                }
+            }
+            
+            // Create message based on type
+            return create_message(header, payload);
+            
+        } catch (IOError e) {
+            throw new ProtocolError.IO_ERROR("Read error: %s", e.message);
+        }
+    }
+    
+    private Message create_message(MessageHeader header, uint8[] payload) throws ProtocolError {
+        Message message;
+        
+        switch (header.message_type) {
+            case 0x00: message = new WelcomeMessage(); break;
+            case 0x11: message = new EntityResponse(); break;
+            case 0x12: message = new EntityNotFoundResponse(); break;
+            case 0x14: message = new BooleanResponse(); break;
+            case 0x70: message = new ErrorResponse(); break;
+            case 0x7F: message = new SuccessResponse(); break;
+            default:
+                throw new ProtocolError.UNKNOWN_MESSAGE_TYPE(
+                    "Unknown message type: 0x%02X", header.message_type
+                );
+        }
+        
+        message.request_id = header.request_id;
+        if (payload.length > 0) {
+            message.deserialize(payload);
+        }
+        
+        return message;
+    }
+    
+    public Response read_response() throws ProtocolError {
+        var message = read_message();
+        if (message == null) {
+            throw new ProtocolError.IO_ERROR("Connection closed");
+        }
+        if (!(message is Response)) {
+            throw new ProtocolError.INVALID_MESSAGE("Expected response");
+        }
+        return (Response) message;
+    }
+}
+
+} // namespace Implexus.Protocol
+```
+
+## MessageWriter
+
+```vala
+namespace Implexus.Protocol {
+
+public class MessageWriter {
+    
+    private OutputStream _stream;
+    private uint16 _next_request_id;
+    
+    public MessageWriter(OutputStream stream) {
+        _stream = stream;
+        _next_request_id = 1;
+    }
+    
+    public void write_message(Message message) throws ProtocolError {
+        try {
+            // Get payload
+            var payload = message.serialize();
+            
+            // Create header
+            var header = new MessageHeader();
+            header.message_type = message.message_type;
+            header.payload_length = (uint32) payload.length;
+            header.request_id = message.request_id;
+            
+            // Write header
+            var header_data = header.serialize();
+            _stream.write(header_data);
+            
+            // Write payload
+            if (payload.length > 0) {
+                _stream.write(payload);
+            }
+            
+            _stream.flush();
+            
+        } catch (IOError e) {
+            throw new ProtocolError.IO_ERROR("Write error: %s", e.message);
+        }
+    }
+    
+    public uint16 write_request(Request request) throws ProtocolError {
+        request.request_id = _next_request_id++;
+        write_message(request);
+        return request.request_id;
+    }
+}
+
+} // namespace Implexus.Protocol
+```
+
+## Server Implementation
+
+### Server Class
+
+```vala
+namespace Implexus.Server {
+
+public class Server : Object {
+    
+    private Engine _engine;
+    private SocketService _socket_service;
+    private uint16 _port;
+    private bool _running;
+    
+    public signal void client_connected();
+    public signal void client_disconnected();
+    
+    public Server(Engine engine, uint16 port = 9090) {
+        _engine = engine;
+        _port = port;
+        _running = false;
+    }
+    
+    public void start() throws ServerError {
+        try {
+            _socket_service = new SocketService();
+            _socket_service.add_inet_port(_port, null);
+            _socket_service.incoming.connect(on_incoming);
+            _running = true;
+            
+            print("Implexus server listening on port %d\n", _port);
+            
+        } catch (Error e) {
+            throw new ServerError.STARTUP_FAILED("Failed to start server: %s", e.message);
+        }
+    }
+    
+    public void stop() {
+        if (_socket_service != null) {
+            _socket_service.stop();
+            _socket_service = null;
+        }
+        _running = false;
+    }
+    
+    private bool on_incoming(SocketConnection connection) {
+        client_connected();
+        
+        // Handle in background
+        new Thread<void*>("client", () => {
+            handle_client(connection);
+            return null;
+        });
+        
+        return true;
+    }
+    
+    private void handle_client(SocketConnection connection) {
+        var input = connection.get_input_stream();
+        var output = connection.get_output_stream();
+        
+        var reader = new MessageReader(input);
+        var writer = new MessageWriter(output);
+        
+        try {
+            // Send welcome
+            var welcome = new WelcomeMessage();
+            welcome.server_version = 1;
+            writer.write_message(welcome);
+            
+            // Process requests
+            while (true) {
+                var message = reader.read_message();
+                if (message == null) break;
+                
+                var response = process_request(message);
+                writer.write_message(response);
+            }
+            
+        } catch (ProtocolError e) {
+            warning("Protocol error: %s", e.message);
+        }
+        
+        client_disconnected();
+    }
+    
+    private Response process_request(Message request) throws ProtocolError {
+        try {
+            switch (request.message_type) {
+                case 0x10: // GET_ENTITY
+                    return handle_get_entity((GetEntityRequest) request);
+                case 0x13: // ENTITY_EXISTS
+                    return handle_entity_exists((EntityExistsRequest) request);
+                case 0x20: // CREATE_CONTAINER
+                    return handle_create_container((CreateContainerRequest) request);
+                // ... other handlers
+                default:
+                    return new ErrorResponse(EngineError.Code.PROTOCOL_ERROR, 
+                        "Unknown request type");
+            }
+        } catch (EngineError e) {
+            return new ErrorResponse.from_error(e);
+        }
+    }
+    
+    private Response handle_get_entity(GetEntityRequest request) throws EngineError {
+        var entity = _engine.get_entity(request.path);
+        
+        var response = new EntityResponse();
+        response.request_id = request.request_id;
+        response.entity_data = entity_to_data(entity);
+        
+        return response;
+    }
+    
+    private Response handle_entity_exists(EntityExistsRequest request) {
+        var response = new BooleanResponse();
+        response.request_id = request.request_id;
+        response.value = _engine.entity_exists(request.path);
+        return response;
+    }
+    
+    private EntityData entity_to_data(Entity entity) {
+        var data = new EntityData();
+        data.entity_type = entity.entity_type;
+        data.path = entity.path;
+        data.type_label = entity.type_label;
+        data.expression = entity.configured_expression;
+        
+        if (entity.entity_type == EntityType.DOCUMENT) {
+            data.properties = new Invercargill.DataStructures.Dictionary<string, Invercargill.Element>();
+            foreach (var key in entity.properties.keys) {
+                data.properties.set(key, entity.get_property(key));
+            }
+        }
+        
+        return data;
+    }
+}
+
+} // namespace Implexus.Server
+```
+
+## Protocol Error
+
+```vala
+namespace Implexus.Protocol {
+
+public errordomain ProtocolError {
+    INVALID_MESSAGE,
+    UNKNOWN_MESSAGE_TYPE,
+    IO_ERROR,
+    TIMEOUT,
+    CONNECTION_CLOSED;
+}
+
+} // namespace Implexus.Protocol
+```
+
+## Connection Flow
+
+```mermaid
+sequenceDiagram
+    participant C as Client
+    participant S as Server
+    participant E as Engine
+    
+    C->>S: TCP Connect
+    S->>C: WELCOME version=1
+    
+    C->>S: GET_ENTITY path=/users
+    S->>E: get_entity with /users
+    E-->>S: Entity
+    S->>C: ENTITY_RESPONSE
+    
+    C->>S: CREATE_DOCUMENT path=/users/john type=User
+    S->>E: create_document
+    E-->>S: Document
+    S->>C: ENTITY_RESPONSE
+    
+    C->>S: SET_PROPERTY path=/users/john name=email value=john@ex.com
+    S->>E: set_property
+    S->>C: SUCCESS
+    
+    C->>S: GET_ENTITY path=/nonexistent
+    S->>E: get_entity
+    E-->>S: throws ENTITY_NOT_FOUND
+    S->>C: ENTITY_NOT_FOUND
+    
+    C->>S: TCP Close
+```
+
+## Configuration
+
+```vala
+namespace Implexus.Server {
+
+public class ServerConfiguration : Object {
+    
+    public uint16 port { get; set; default = 9090; }
+    public int max_connections { get; set; default = 100; }
+    public int timeout_seconds { get; set; default = 30; }
+    public bool enable_tls { get; set; default = false; }
+    public string? tls_cert_path { get; set; default = null; }
+    public string? tls_key_path { get; set; default = null; }
+}
+
+} // namespace Implexus.Server
+```

+ 453 - 0
Architecture/10-File-Organization.md

@@ -0,0 +1,453 @@
+# File Organization
+
+This document maps out the source files to create for the Implexus project.
+
+## Project Structure
+
+```
+implexus/
+├── meson.build                    # Main build configuration
+├── MANIFEST.usm                   # USM package manifest
+├── README.md                      # Project readme
+├── LICENSE                        # License file
+│
+├── src/                           # Source files
+│   ├── meson.build               # Source build configuration
+│   │
+│   ├── core/                     # Implexus.Core namespace
+│   │   ├── entity.vala           # Entity interface
+│   │   ├── entity-type.vala      # EntityType enum
+│   │   ├── engine.vala           # Engine interface
+│   │   ├── path.vala             # Path class
+│   │   ├── transaction.vala      # Transaction interface
+│   │   ├── errors.vala           # EngineError domain
+│   │   ├── embedded-engine.vala  # EmbeddedEngine implementation
+│   │   └── remote-engine.vala    # RemoteEngine implementation
+│   │
+│   ├── entities/                 # Implexus.Entities namespace
+│   │   ├── abstract-entity.vala  # AbstractEntity base class
+│   │   ├── container.vala         # Container entity
+│   │   ├── document.vala         # Document entity
+│   │   ├── category.vala        # Category entity
+│   │   ├── category-container.vala # Virtual container from category
+│   │   ├── index.vala            # Index entity
+│   │   ├── index-result.vala     # IndexResult entity
+│   │   └── entity-set.vala       # EntitySet for set operations
+│   │
+│   ├── storage/                  # Implexus.Storage namespace
+│   │   ├── storage.vala          # Storage interface
+│   │   ├── dbm.vala              # DBM interface
+│   │   ├── filesystem-dbm.vala   # FilesystemDbm implementation
+│   │   ├── default-storage.vala  # DefaultStorage implementation
+│   │   ├── storage-configuration.vala # Configuration class
+│   │   └── errors.vala           # StorageError domain
+│   │
+│   ├── serialization/            # Implexus.Serialization namespace
+│   │   ├── element-writer.vala   # ElementWriter class
+│   │   ├── element-reader.vala   # ElementReader class
+│   │   ├── entity-serializer.vala # EntitySerializer class
+│   │   ├── entity-deserializer.vala # EntityDeserializer class
+│   │   ├── errors.vala           # SerializationError domain
+│   │   └── format.vala           # Format constants
+│   │
+│   ├── protocol/                 # Implexus.Protocol namespace
+│   │   ├── message.vala          # Message interface and header
+│   │   ├── request.vala          # Request interface
+│   │   ├── response.vala         # Response interface
+│   │   ├── message-reader.vala   # MessageReader class
+│   │   ├── message-writer.vala   # MessageWriter class
+│   │   ├── requests.vala         # All request classes
+│   │   ├── responses.vala        # All response classes
+│   │   └── errors.vala           # ProtocolError domain
+│   │
+│   └── server/                   # Implexus.Server namespace
+│       ├── server.vala           # Server class
+│       ├── client-connection.vala # ClientConnection class
+│       ├── request-handler.vala  # RequestHandler class
+│       ├── server-configuration.vala # ServerConfiguration class
+│       └── errors.vala           # ServerError domain
+│
+├── vapi/                         # VAPI files
+│   ├── implexus.deps             # Dependencies file
+│   └── implexus.vapi             # Generated VAPI
+│
+├── examples/                     # Example programs
+│   ├── meson.build
+│   ├── basic-usage.vala          # Basic usage example
+│   ├── category-example.vala    # Category usage
+│   ├── index-example.vala        # Index usage
+│   ├── set-operations.vala       # Set operations example
+│   └── remote-client.vala        # Remote client example
+│
+├── tools/                        # Command-line tools
+│   ├── meson.build
+│   ├── implexusd.vala            # Server daemon
+│   └── implexus-cli.vala         # Command-line client
+│
+└── tests/                        # Test suite
+    ├── meson.build
+    ├── test-path.vala            # Path tests
+    ├── test-entity.vala          # Entity tests
+    ├── test-storage.vala         # Storage tests
+    ├── test-serialization.vala   # Serialization tests
+    ├── test-protocol.vala        # Protocol tests
+    └── test-integration.vala     # Integration tests
+```
+
+## File Details
+
+### Core Files
+
+| File | Namespace | Purpose |
+|------|-----------|---------|
+| `entity.vala` | Implexus.Core | Entity interface definition |
+| `entity-type.vala` | Implexus.Core | EntityType enum |
+| `engine.vala` | Implexus.Core | Engine interface |
+| `path.vala` | Implexus.Core | Path parsing and manipulation |
+| `transaction.vala` | Implexus.Core | Transaction interface |
+| `errors.vala` | Implexus.Core | EngineError errordomain |
+| `embedded-engine.vala` | Implexus.Core | EmbeddedEngine class |
+| `remote-engine.vala` | Implexus.Core | RemoteEngine class |
+
+### Entity Files
+
+| File | Namespace | Purpose |
+|------|-----------|---------|
+| `abstract-entity.vala` | Implexus.Entities | AbstractEntity base class |
+| `container.vala` | Implexus.Entities | Container entity |
+| `document.vala` | Implexus.Entities | Document entity |
+| `category.vala` | Implexus.Entities | Category entity |
+| `category-container.vala` | Implexus.Entities | Virtual container from category |
+| `index.vala` | Implexus.Entities | Index entity |
+| `index-result.vala` | Implexus.Entities | IndexResult entity |
+| `entity-set.vala` | Implexus.Entities | EntitySet for set operations |
+
+### Storage Files
+
+| File | Namespace | Purpose |
+|------|-----------|---------|
+| `storage.vala` | Implexus.Storage | Storage interface |
+| `dbm.vala` | Implexus.Storage | DBM interface |
+| `filesystem-dbm.vala` | Implexus.Storage | FilesystemDbm file-based implementation |
+| `default-storage.vala` | Implexus.Storage | DefaultStorage implementation |
+| `storage-configuration.vala` | Implexus.Storage | Configuration class |
+| `errors.vala` | Implexus.Storage | StorageError errordomain |
+
+### Serialization Files
+
+| File | Namespace | Purpose |
+|------|-----------|---------|
+| `element-writer.vala` | Implexus.Serialization | ElementWriter class |
+| `element-reader.vala` | Implexus.Serialization | ElementReader class |
+| `entity-serializer.vala` | Implexus.Serialization | EntitySerializer class |
+| `entity-deserializer.vala` | Implexus.Serialization | EntityDeserializer class |
+| `errors.vala` | Implexus.Serialization | SerializationError errordomain |
+| `format.vala` | Implexus.Serialization | Format constants |
+
+### Protocol Files
+
+| File | Namespace | Purpose |
+|------|-----------|---------|
+| `message.vala` | Implexus.Protocol | Message interface and header |
+| `request.vala` | Implexus.Protocol | Request interface |
+| `response.vala` | Implexus.Protocol | Response interface |
+| `message-reader.vala` | Implexus.Protocol | MessageReader class |
+| `message-writer.vala` | Implexus.Protocol | MessageWriter class |
+| `requests.vala` | Implexus.Protocol | All request classes |
+| `responses.vala` | Implexus.Protocol | All response classes |
+| `errors.vala` | Implexus.Protocol | ProtocolError errordomain |
+
+### Server Files
+
+| File | Namespace | Purpose |
+|------|-----------|---------|
+| `server.vala` | Implexus.Server | Server class |
+| `client-connection.vala` | Implexus.Server | ClientConnection class |
+| `request-handler.vala` | Implexus.Server | RequestHandler class |
+| `server-configuration.vala` | Implexus.Server | ServerConfiguration class |
+| `errors.vala` | Implexus.Server | ServerError errordomain |
+
+## Build Configuration
+
+### Root meson.build
+
+```meson
+project('implexus', 'vala', 'c',
+    version: '0.1.0',
+    meson_version: '>= 0.50.0'
+)
+
+gnome = import('gnome')
+
+glib_dep = dependency('glib-2.0')
+gobject_dep = dependency('gobject-2.0')
+invercargill_dep = dependency('invercargill-1')
+
+dependencies = [
+    glib_dep,
+    gobject_dep,
+    invercargill_dep
+]
+
+subdir('src')
+subdir('examples')
+subdir('tools')
+subdir('tests')
+```
+
+### src/meson.build
+
+```meson
+lib_sources = files(
+    'core/entity.vala',
+    'core/entity-type.vala',
+    'core/engine.vala',
+    'core/path.vala',
+    'core/transaction.vala',
+    'core/errors.vala',
+    'core/embedded-engine.vala',
+    'core/remote-engine.vala',
+    'entities/abstract-entity.vala',
+    'entities/container.vala',
+    'entities/document.vala',
+    'entities/category.vala',
+    'entities/category-container.vala',
+    'entities/index.vala',
+    'entities/index-result.vala',
+    'entities/entity-set.vala',
+    'storage/storage.vala',
+    'storage/dbm.vala',
+    'storage/filesystem-dbm.vala',
+    'storage/default-storage.vala',
+    'storage/storage-configuration.vala',
+    'storage/errors.vala',
+    'serialization/element-writer.vala',
+    'serialization/element-reader.vala',
+    'serialization/entity-serializer.vala',
+    'serialization/entity-deserializer.vala',
+    'serialization/errors.vala',
+    'serialization/format.vala',
+    'protocol/message.vala',
+    'protocol/request.vala',
+    'protocol/response.vala',
+    'protocol/message-reader.vala',
+    'protocol/message-writer.vala',
+    'protocol/requests.vala',
+    'protocol/responses.vala',
+    'protocol/errors.vala',
+    'server/server.vala',
+    'server/client-connection.vala',
+    'server/request-handler.vala',
+    'server/server-configuration.vala',
+    'server/errors.vala'
+)
+
+implexus_lib = library('implexus',
+    lib_sources,
+    dependencies: dependencies,
+    version: '0.1.0',
+    install: true
+)
+
+implexus_dep = declare_dependency(
+    link_with: implexus_lib,
+    dependencies: dependencies
+)
+
+install_data('vapi/implexus.deps', install_dir: get_option('datadir') / 'vala/vapi')
+
+gnome.generate_vapi('implexus',
+    sources: lib_sources,
+    packages: ['glib-2.0', 'gobject-2.0', 'invercargill-1'],
+    install: true
+)
+```
+
+## Dependencies File
+
+### vapi/implexus.deps
+
+```
+glib-2.0
+gobject-2.0
+invercargill-1
+```
+
+## Example Files
+
+### examples/basic-usage.vala
+
+```vala
+using Implexus.Core;
+using Implexus.Entities;
+using Implexus.Storage;
+
+public int main(string[] args) {
+    // Create storage
+    var dbm = new FilesystemDbm("/tmp/implexus-demo");
+    var storage = new DefaultStorage(dbm);
+    storage.open();
+    
+    // Create embedded engine
+    var engine = new EmbeddedEngine(storage);
+    
+    // Create a container hierarchy
+    var root = engine.get_root();
+    var users = root.create_container("users");
+    
+    // Create a document
+    var john = users.create_document("john", "User");
+    john.set_property("email", new Invercargill.ValueElement("john@example.com"));
+    john.set_property("age", new Invercargill.ValueElement(30));
+    
+    // Read it back
+    var loaded = engine.get_entity(new Path("/users/john")) as Document;
+    print("Email: %s\n", loaded.get_property("email").to_string());
+    print("Age: %s\n", loaded.get_property("age").to_string());
+    
+    // Clean up
+    storage.close();
+    return 0;
+}
+```
+
+## Tool Files
+
+### tools/implexusd.vala
+
+```vala
+using Implexus.Core;
+using Implexus.Server;
+using Implexus.Storage;
+
+public int main(string[] args) {
+    string data_dir = "/var/lib/implexus";
+    uint16 port = 9090;
+    
+    // Parse arguments
+    for (int i = 1; i < args.length; i++) {
+        if (args[i] == "--port" && i + 1 < args.length) {
+            port = (uint16) int.parse(args[++i]);
+        } else if (args[i] == "--data-dir" && i + 1 < args.length) {
+            data_dir = args[++i];
+        }
+    }
+    
+    // Initialize storage
+    var dbm = new FilesystemDbm(data_dir);
+    var storage = new DefaultStorage(dbm);
+    
+    try {
+        storage.open();
+    } catch (StorageError e) {
+        stderr.printf("Failed to open storage: %s\n", e.message);
+        return 1;
+    }
+    
+    // Create engine and server
+    var engine = new EmbeddedEngine(storage);
+    var config = new ServerConfiguration();
+    config.port = port;
+    
+    var server = new Server(engine, config.port);
+    
+    try {
+        server.start();
+        print("Implexus server started on port %u\n", port);
+        
+        // Wait for termination
+        var loop = new MainLoop();
+        loop.run();
+        
+    } catch (ServerError e) {
+        stderr.printf("Server error: %s\n", e.message);
+        return 1;
+    }
+    
+    storage.close();
+    return 0;
+}
+```
+
+## Test Files
+
+### tests/test-path.vala
+
+```vala
+using Implexus.Core;
+
+public class PathTests : Object {
+    
+    public void test_root_path() {
+        var root = new Path.root();
+        assert(root.is_root);
+        assert(root.depth == 0);
+        assert(root.to_string() == "/");
+    }
+    
+    public void test_simple_path() {
+        var path = new Path("/users/john");
+        assert(!path.is_root);
+        assert(path.depth == 2);
+        assert(path.name == "john");
+        assert(path.parent.name == "users");
+    }
+    
+    public void test_child_operations() {
+        var parent = new Path("/users");
+        var child = parent.child("john");
+        
+        assert(child.depth == 2);
+        assert(child.is_descendant_of(parent));
+        assert(parent.is_ancestor_of(child));
+    }
+    
+    public void test_equality() {
+        var a = new Path("/users/john");
+        var b = new Path("/users/john");
+        var c = new Path("/users/jane");
+        
+        assert(a.equals(b));
+        assert(!a.equals(c));
+    }
+    
+    public void test_resolution() {
+        var base = new Path("/users");
+        var resolved = base.resolve(new Path("john/profile"));
+        
+        assert(resolved.to_string() == "/users/john/profile");
+    }
+    
+    public static int main(string[] args) {
+        var tests = new PathTests();
+        
+        tests.test_root_path();
+        tests.test_simple_path();
+        tests.test_child_operations();
+        tests.test_equality();
+        tests.test_resolution();
+        
+        print("All path tests passed!\n");
+        return 0;
+    }
+}
+```
+
+## File Creation Order
+
+When implementing, create files in this order:
+
+1. **Core Interfaces** - `entity.vala`, `entity-type.vala`, `engine.vala`, `errors.vala`
+2. **Path System** - `path.vala`
+3. **Storage Layer** - `storage.vala`, `dbm.vala`, `basic-dbm.vala`, `default-storage.vala`
+4. **Serialization** - `format.vala`, `element-writer.vala`, `element-reader.vala`, `entity-serializer.vala`, `entity-deserializer.vala`
+5. **Entity Base** - `abstract-entity.vala`
+6. **Entity Types** - `container.vala`, `document.vala`, `category.vala`, `index.vala`
+7. **Set Operations** - `entity-set.vala`
+8. **Engine Implementations** - `embedded-engine.vala`, `transaction.vala`
+9. **Protocol** - `message.vala`, `request.vala`, `response.vala`, message reader/writer, request/response classes
+10. **Remote Engine** - `remote-engine.vala`
+11. **Server** - `server.vala`, `client-connection.vala`, `request-handler.vala`
+12. **Tools** - `implexusd.vala`, `implexus-cli.vala`
+13. **Examples** - All example files
+14. **Tests** - All test files

+ 843 - 0
Architecture/11-Indexed-Entities.md

@@ -0,0 +1,843 @@
+# Indexed Entities Design
+
+This document describes the design for indexed entity types in Implexus: Category, Catalogue, and Index. These entities use pre-built indices for fast lookups instead of scanning all documents.
+
+## Overview
+
+### Problem Statement
+
+The current implementation of virtual entities has performance issues:
+
+| Entity | Current Behavior | Problem |
+|--------|-----------------|---------|
+| Category | Scans all documents, evaluates expression per document | O(n) per query |
+| Index | Scans all documents, does substring matching | O(n) per query |
+| Catalogue | Does not exist | Need key-based groupings |
+
+### Solution: Pre-built Indices
+
+All indexed entities will:
+
+1. **Build indices at creation time** - Scan existing documents and populate index structures
+2. **Update indices on changes** - Via hooks when documents are created, updated, or deleted
+3. **Use fast key lookups** - O(1) or O(log n) queries instead of O(n) scans
+
+### Entity Type Semantics
+
+```mermaid
+graph TB
+    subgraph Entity Types
+        Container[Container - Arbitrary children]
+        Document[Document - Properties]
+        Category[Category - Predicate filter]
+        Catalogue[Catalogue - Key groupings]
+        Index[Index - Text search]
+    end
+    
+    subgraph Category Behavior
+        Category -->|contains| DocCat[Documents matching predicate]
+    end
+    
+    subgraph Catalogue Behavior
+        Catalogue -->|has child| Group1[Key Group: john]
+        Catalogue -->|has child| Group2[Key Group: jane]
+        Group1 -->|contains| Doc1[Post by john]
+        Group2 -->|contains| Doc2[Post by jane]
+    end
+    
+    subgraph Index Behavior
+        Index -->|search| Result[Search Result Container]
+        Result -->|contains| Doc3[Matching documents]
+    end
+```
+
+## Entity Definitions
+
+### Container (Unchanged)
+
+- **Purpose**: Arbitrary child storage like a filesystem folder
+- **Children**: Any entity type, explicitly added
+- **Implementation**: Current implementation is correct, no changes needed
+
+### Category (Redesigned)
+
+- **Purpose**: Contains documents matching a boolean predicate expression
+- **Type Label**: Filters documents by application-defined type
+- **Expression**: Boolean expression (e.g., `!draft`, `status == "active"`, `age > 18`)
+- **Children**: Documents where predicate evaluates to `true`
+- **Example**: Category at `/posts/active` with expression `!draft` returns all posts where `draft == false`
+
+### Catalogue (New Entity Type)
+
+- **Purpose**: Groups documents by unique values of an expression
+- **Type Label**: Filters documents by application-defined type
+- **Expression**: Value expression (e.g., `author`, `category`, `status`)
+- **Children**: Virtual containers keyed by expression result
+- **Example**: Catalogue at `/posts/by-author` with expression `author`:
+  - `/posts/by-author/john` → all posts where `author == "john"`
+  - `/posts/by-author/jane` → all posts where `author == "jane"`
+- **Semantics**: Similar to `Dictionary<TKey, List<TItem>>` or `GROUP BY` in SQL
+
+### Index (Redesigned)
+
+- **Purpose**: Full-text search on a field with SQL LIKE-style patterns
+- **Type Label**: Filters documents by application-defined type
+- **Expression**: Field name to index (e.g., `content`, `title`)
+- **Children**: Search results as virtual containers
+- **Patterns Supported**:
+  - `*world*` - contains "world"
+  - `hello*` - starts with "hello"
+  - `*goodbye` - ends with "goodbye"
+- **Configuration**: Case sensitivity option (default: insensitive)
+
+## Storage Schema
+
+### Key Prefixes
+
+All index data uses scoped keys to prevent collisions between entities:
+
+```
+index:<entity_type>:<entity_path>:<data_type>:<key>
+```
+
+### Type Label Index
+
+To efficiently find all documents of a given type, maintain a global type index:
+
+```
+typeidx:<type_label> → [doc_path1, doc_path2, ...]
+```
+
+**Storage**: `typeidx:User` → `["/users/john", "/users/jane", ...]`
+
+### Category Storage Schema
+
+Category stores document paths that match the predicate:
+
+```
+cat:<entity_path>:members → [doc_path1, doc_path2, ...]
+cat:<entity_path>:config → {type_label: "...", expression: "..."}
+```
+
+**Example**:
+```
+cat:/posts/active:members → ["/posts/post1", "/posts/post3"]
+cat:/posts/active:config → {type_label: "Post", expression: "!draft"}
+```
+
+### Catalogue Storage Schema
+
+Catalogue stores groupings keyed by expression result:
+
+```
+catl:<entity_path>:group:<key_value> → [doc_path1, doc_path2, ...]
+catl:<entity_path>:keys → ["key1", "key2", ...]
+catl:<entity_path>:config → {type_label: "...", expression: "..."}
+```
+
+**Example**:
+```
+catl:/posts/by-author:group:john → ["/posts/post1", "/posts/post3"]
+catl:/posts/by-author:group:jane → ["/posts/post2"]
+catl:/posts/by-author:keys → ["john", "jane"]
+catl:/posts/by-author:config → {type_label: "Post", expression: "author"}
+```
+
+### Index Storage Schema (Hierarchical N-gram)
+
+The Index uses a hierarchical n-gram structure for arbitrary-length pattern matching:
+
+#### Primary Trigram Index
+
+```
+idx:<entity_path>:tri:<trigram> → [doc_path1, doc_path2, ...]
+```
+
+**Example**: Text "hello world" produces trigrams: `hel`, `ell`, `llo`, `lo `, `o w`, ` wo`, `wor`, `orl`, `rld`
+
+```
+idx:/posts/search:tri:hel → ["/posts/post1", "/posts/post2"]
+idx:/posts/search:tri:ell → ["/posts/post1"]
+...
+```
+
+#### Bigram Reverse Index
+
+Maps bigrams to trigrams containing them (for 2-character queries):
+
+```
+idx:<entity_path>:bi:<bigram> → [trigram1, trigram2, ...]
+```
+
+**Example**:
+```
+idx:/posts/search:bi:he → ["hel"]
+idx:/posts/search:bi:el → ["hel", "ell"]
+idx:/posts/search:bi:ll → ["ell", "llo"]
+```
+
+#### Unigram Reverse Index
+
+Maps characters to bigrams starting with them (for 1-character queries):
+
+```
+idx:<entity_path>:uni:<char> → [bigram1, bigram2, ...]
+```
+
+**Example**:
+```
+idx:/posts/search:uni:h → ["he"]
+idx:/posts/search:uni:e → ["el"]
+```
+
+#### Document Content Cache
+
+Stores the indexed field value for each document (used for verification and rebuilds):
+
+```
+idx:<entity_path>:doc:<doc_path> → "indexed field value"
+```
+
+#### Configuration
+
+```
+idx:<entity_path>:config → {type_label: "...", expression: "...", case_sensitive: false}
+```
+
+### Index Size Analysis
+
+| Index Type | Entries Per Doc | Stores | Relative Size |
+|------------|-----------------|--------|---------------|
+| Trigram→Docs | O(L) where L = text length | Doc paths | Primary cost |
+| Bigram→Trigrams | O(L) | Strings only | Small |
+| Unigram→Bigrams | O(unique chars) | Strings only | Tiny |
+| Doc Content | 1 | Field value | Moderate |
+
+**Example**: For 10,000 documents averaging 1KB text:
+- Trigram index: ~10M entries (document paths)
+- Bigram reverse: ~2,500 entries (just strings)
+- Unigram reverse: ~50 entries (just strings)
+
+The reverse indexes have minimal overhead since they store short strings, not document paths.
+
+## Query Algorithms
+
+### Category Query
+
+```
+INPUT: Category at path P
+OUTPUT: All documents matching predicate
+
+1. Load config to get type_label and expression
+2. Lookup cat:P:members → [doc_paths]
+3. For each doc_path, instantiate Document entity
+4. Return documents
+```
+
+**Complexity**: O(k) where k = number of matching documents
+
+### Catalogue Query
+
+```
+INPUT: Catalogue at path P, optional key K
+OUTPUT: If K provided: documents with that key value
+        Otherwise: list of available keys
+
+# Get specific group
+get_child(K):
+1. Lookup catl:P:group:K → [doc_paths]
+2. Return VirtualContainer with documents
+
+# List available keys
+child_names:
+1. Lookup catl:P:keys → [key1, key2, ...]
+2. Return keys
+```
+
+**Complexity**: O(1) for key lookup, O(k) for listing groups
+
+### Index Query
+
+```
+INPUT: Index at path P, search pattern S
+OUTPUT: Documents matching pattern
+
+parse_pattern(S):
+  if S starts with * and ends with *:
+    return CONTAINS(S[1:-1])
+  if S starts with *:
+    return ENDS_WITH(S[1:])
+  if S ends with *:
+    return STARTS_WITH(S[:-1])
+  return EXACT(S)
+
+search(pattern):
+  terms = parse_pattern(pattern)
+  
+  if terms.type == CONTAINS and len(terms.value) >= 3:
+    # Use trigram index
+    trigrams = extract_trigrams(terms.value)
+    doc_sets = [lookup idx:P:tri:t for t in trigrams]
+    candidates = intersect(doc_sets)
+    return [d for d in candidates if d contains terms.value]
+  
+  if terms.type == CONTAINS and len(terms.value) == 2:
+    # Use bigram→trigram→docs
+    bigram = terms.value
+    trigrams = lookup idx:P:bi:bigram
+    doc_sets = [lookup idx:P:tri:t for t in trigrams]
+    candidates = union(doc_sets)
+    return [d for d in candidates if d contains terms.value]
+  
+  if terms.type == CONTAINS and len(terms.value) == 1:
+    # Use unigram→bigram→trigram→docs
+    char = terms.value
+    bigrams = lookup idx:P:uni:char
+    trigrams = union([lookup idx:P:bi:b for b in bigrams])
+    doc_sets = [lookup idx:P:tri:t for t in trigrams]
+    candidates = union(doc_sets)
+    return [d for d in candidates if d contains terms.value]
+  
+  if terms.type == STARTS_WITH:
+    # Prefix search using first trigram
+    prefix = terms.value
+    first_tri = prefix[0:3]
+    candidates = lookup idx:P:tri:first_tri
+    return [d for d in candidates if d starts_with prefix]
+  
+  if terms.type == ENDS_WITH:
+    # Suffix search using last trigram
+    suffix = terms.value
+    last_tri = suffix[-3:]
+    candidates = lookup idx:P:tri:last_tri
+    return [d for d in candidates if d ends_with suffix]
+```
+
+**Complexity**:
+- 3+ char patterns: O(k) where k = candidates from trigram intersection
+- 1-2 char patterns: O(k) with more indirection but still indexed
+- Prefix/suffix: O(k) using boundary trigrams
+
+## Hook/Notification System
+
+### Overview
+
+The hook system notifies indexed entities when documents are created, modified, or deleted so they can update their indices.
+
+```mermaid
+sequenceDiagram
+    participant App as Application
+    participant Engine as Engine
+    participant Doc as Document
+    participant Hook as HookManager
+    participant Cat as Category
+    participant Catl as Catalogue
+    participant Idx as Index
+    
+    App->>Engine: create_document with type_label
+    Engine->>Doc: create
+    Engine->>Hook: entity_created(doc)
+    Hook->>Hook: find_interested_entities(type_label)
+    loop For each interested entity
+        Hook->>Cat: on_document_created(doc)
+        Hook->>Catl: on_document_created(doc)
+        Hook->>Idx: on_document_created(doc)
+    end
+```
+
+### Hook Registration
+
+Indexed entities register interest in specific type labels:
+
+```vala
+interface IndexHook {
+    // The type label this entity is interested in
+    public abstract string type_label { get; }
+    
+    // The entity path for index scoping
+    public abstract EntityPath entity_path { get; }
+    
+    // Called when a document is created
+    public abstract void on_document_created(Entity doc) throws Error;
+    
+    // Called when a document is modified
+    public abstract void on_document_modified(Entity doc, Set<string> changed_properties) throws Error;
+    
+    // Called when a document is deleted
+    public abstract void on_document_deleted(EntityPath doc_path) throws Error;
+}
+```
+
+### HookManager Implementation
+
+```vala
+class HookManager {
+    // type_label → list of registered hooks
+    private Dictionary<string, List<IndexHook>> _hooks_by_type;
+    
+    // Register a hook for a type label
+    public void register_hook(IndexHook hook) {
+        var hooks = _hooks_by_type.get_or_default(hook.type_label, new List());
+        hooks.add(hook);
+    }
+    
+    // Unregister a hook
+    public void unregister_hook(IndexHook hook) {
+        var hooks = _hooks_by_type.get(hook.type_label);
+        if (hooks != null) {
+            hooks.remove(hook);
+        }
+    }
+    
+    // Notify hooks of document creation
+    public void notify_created(Entity doc) {
+        var hooks = _hooks_by_type.get(doc.type_label);
+        if (hooks != null) {
+            foreach (var hook in hooks) {
+                hook.on_document_created(doc);
+            }
+        }
+    }
+    
+    // Notify hooks of document modification
+    public void notify_modified(Entity doc, Set<string> changed_properties) {
+        var hooks = _hooks_by_type.get(doc.type_label);
+        if (hooks != null) {
+            foreach (var hook in hooks) {
+                hook.on_document_modified(doc, changed_properties);
+            }
+        }
+    }
+    
+    // Notify hooks of document deletion
+    public void notify_deleted(EntityPath doc_path, string type_label) {
+        var hooks = _hooks_by_type.get(type_label);
+        if (hooks != null) {
+            foreach (var hook in hooks) {
+                hook.on_document_deleted(doc_path);
+            }
+        }
+    }
+}
+```
+
+### Engine Integration
+
+The Engine emits signals that the HookManager listens to:
+
+```vala
+class EmbeddedEngine : Object, Engine {
+    private HookManager _hooks;
+    
+    // Connect signals to hook manager
+    construct {
+        _hooks = new HookManager();
+        this.entity_created.connect((entity) => {
+            if (entity.entity_type == EntityType.DOCUMENT) {
+                _hooks.notify_created(entity);
+            }
+        });
+        this.entity_modified.connect((entity) => {
+            if (entity.entity_type == EntityType.DOCUMENT) {
+                _hooks.notify_modified(entity, entity.modified_properties);
+            }
+        });
+        this.entity_deleted.connect((path, type_label) => {
+            _hooks.notify_deleted(path, type_label);
+        });
+    }
+}
+```
+
+## Index Update Logic
+
+### Category Index Update
+
+```vala
+class Category : AbstractEntity, IndexHook {
+    
+    public void on_document_created(Entity doc) {
+        update_index_for_document(doc, true);
+    }
+    
+    public void on_document_modified(Entity doc, Set<string> changed_properties) {
+        // Re-evaluate predicate and update index
+        update_index_for_document(doc, evaluate_predicate(doc));
+    }
+    
+    public void on_document_deleted(EntityPath doc_path) {
+        remove_from_index(doc_path);
+    }
+    
+    private void update_index_for_document(Entity doc, bool should_include) {
+        var members_key = "cat:%s:members".printf(_path.to_string());
+        var current_members = load_member_set(members_key);
+        
+        if (should_include) {
+            current_members.add(doc.path.to_string());
+        } else {
+            current_members.remove(doc.path.to_string());
+        }
+        
+        save_member_set(members_key, current_members);
+    }
+    
+    private bool evaluate_predicate(Entity doc) {
+        // Evaluate the boolean expression against the document
+        return ExpressionEvaluator.evaluate_boolean(_expression, doc.properties);
+    }
+}
+```
+
+### Catalogue Index Update
+
+```vala
+class Catalogue : AbstractEntity, IndexHook {
+    
+    public void on_document_created(Entity doc) {
+        update_index_for_document(doc);
+    }
+    
+    public void on_document_modified(Entity doc, Set<string> changed_properties) {
+        // Check if the grouped expression changed
+        if (changed_properties.contains(_expression)) {
+            // Get old value and remove from old group
+            var old_value = get_old_value(doc);
+            remove_from_group(old_value, doc.path);
+            
+            // Add to new group
+            update_index_for_document(doc);
+        }
+    }
+    
+    public void on_document_deleted(EntityPath doc_path) {
+        // Need to look up the value before removing
+        var value = get_indexed_value(doc_path);
+        remove_from_group(value, doc_path);
+    }
+    
+    private void update_index_for_document(Entity doc) {
+        var value = evaluate_expression(doc);
+        if (value != null) {
+            add_to_group(value, doc.path);
+        }
+    }
+    
+    private void add_to_group(string key, EntityPath doc_path) {
+        var group_key = "catl:%s:group:%s".printf(_path.to_string(), key);
+        var members = load_member_set(group_key);
+        members.add(doc_path.to_string());
+        save_member_set(group_key, members);
+        
+        // Also update keys list if this is a new key
+        var keys_key = "catl:%s:keys".printf(_path.to_string());
+        var keys = load_key_set(keys_key);
+        if (!keys.contains(key)) {
+            keys.add(key);
+            save_key_set(keys_key, keys);
+        }
+    }
+}
+```
+
+### Index Entity Update (N-gram)
+
+```vala
+class Index : AbstractEntity, IndexHook {
+    
+    private bool _case_sensitive;
+    
+    public void on_document_created(Entity doc) {
+        index_document(doc);
+    }
+    
+    public void on_document_modified(Entity doc, Set<string> changed_properties) {
+        if (changed_properties.contains(_expression)) {
+            // Re-index with new content
+            reindex_document(doc);
+        }
+    }
+    
+    public void on_document_deleted(EntityPath doc_path) {
+        unindex_document(doc_path);
+    }
+    
+    private void index_document(Entity doc) {
+        var content = get_field_value(doc);
+        if (content == null) return;
+        
+        var normalized = _case_sensitive ? content : content.down();
+        var doc_path_str = doc.path.to_string();
+        
+        // Store content cache
+        var cache_key = "idx:%s:doc:%s".printf(_path.to_string(), doc_path_str);
+        storage.set(cache_key, normalized);
+        
+        // Index trigrams
+        var trigrams = extract_trigrams(normalized);
+        foreach (var tri in trigrams) {
+            add_to_trigram_index(tri, doc_path_str);
+        }
+        
+        // Update bigram reverse index
+        var bigrams = extract_bigrams(normalized);
+        foreach (var bi in bigrams) {
+            add_to_bigram_reverse(bi, trigrams);
+        }
+        
+        // Update unigram reverse index
+        var unigrams = extract_unique_chars(normalized);
+        foreach (var uni in unigrams) {
+            add_to_unigram_reverse(uni, bigrams);
+        }
+    }
+    
+    private void unindex_document(EntityPath doc_path) {
+        var doc_path_str = doc_path.to_string();
+        var cache_key = "idx:%s:doc:%s".printf(_path.to_string(), doc_path_str);
+        
+        // Get content to unindex
+        var content = storage.get(cache_key);
+        if (content == null) return;
+        
+        var trigrams = extract_trigrams(content);
+        foreach (var tri in trigrams) {
+            remove_from_trigram_index(tri, doc_path_str);
+        }
+        
+        // Note: We don't remove from reverse indexes as they're just string mappings
+        // and don't reference specific documents
+        
+        storage.delete(cache_key);
+    }
+    
+    private void reindex_document(Entity doc) {
+        unindex_document(doc.path);
+        index_document(doc);
+    }
+}
+```
+
+## API Changes
+
+### EntityType Enum
+
+Add `CATALOGUE` to the entity type enumeration:
+
+```vala
+public enum EntityType {
+    CONTAINER,
+    DOCUMENT,
+    CATEGORY,
+    CATALOGUE,  // NEW
+    INDEX;
+    
+    public bool is_indexed() {
+        return this == CATEGORY || this == CATALOGUE || this == INDEX;
+    }
+}
+```
+
+### Entity Interface Changes
+
+```vala
+public interface Entity : Object {
+    // ... existing members ...
+    
+    // New method for indexed entities
+    public abstract void rebuild_index() throws EngineError;
+    
+    // Configuration for case sensitivity (Index only)
+    public abstract bool case_sensitive { get; }
+}
+```
+
+### Container Interface Changes
+
+Add method to create Catalogue:
+
+```vala
+public interface Entity : Object {
+    // ... existing methods ...
+    
+    // New factory method for Catalogue
+    public abstract Entity? create_catalogue(
+        string name,
+        string type_label,
+        string expression
+    ) throws EngineError;
+    
+    // Index creation with options
+    public abstract Entity? create_index_with_options(
+        string name,
+        string type_label,
+        string expression,
+        IndexOptions options
+    ) throws EngineError;
+}
+
+public class IndexOptions : Object {
+    public bool case_sensitive { get; set; default = false; }
+}
+```
+
+### Engine Interface Changes
+
+```vala
+public interface Engine : Object {
+    // ... existing members ...
+    
+    // Get all indexed entities for a type label
+    public abstract Invercargill.Enumerable<Entity> get_indexes_for_type(string type_label);
+    
+    // Rebuild all indices (maintenance operation)
+    public abstract void rebuild_all_indices() throws EngineError;
+    
+    // New signal with type_label for deleted entities
+    public signal void entity_deleted(EntityPath path, string type_label);
+}
+```
+
+### Storage Interface Changes
+
+```vala
+public interface Storage : Object {
+    // ... existing methods ...
+    
+    // Index operations
+    public abstract void add_to_index(string index_key, string doc_path) throws StorageError;
+    public abstract void remove_from_index(string index_key, string doc_path) throws StorageError;
+    public abstract Invercargill.Enumerable<string> get_index_members(string index_key) throws StorageError;
+    
+    // Type index operations
+    public abstract void register_document_type(EntityPath doc_path, string type_label) throws StorageError;
+    public abstract void unregister_document_type(EntityPath doc_path, string type_label) throws StorageError;
+    public abstract Invercargill.Enumerable<EntityPath> get_documents_by_type(string type_label) throws StorageError;
+    
+    // Catalogue operations
+    public abstract void catalogue_add_to_group(EntityPath catalogue_path, string key, EntityPath doc_path) throws StorageError;
+    public abstract void catalogue_remove_from_group(EntityPath catalogue_path, string key, EntityPath doc_path) throws StorageError;
+    public abstract Invercargill.Enumerable<string> catalogue_get_keys(EntityPath catalogue_path) throws StorageError;
+    public abstract Invercargill.Enumerable<EntityPath> catalogue_get_group(EntityPath catalogue_path, string key) throws StorageError;
+    
+    // N-gram index operations
+    public abstract void ngram_add_trigram(EntityPath index_path, string trigram, EntityPath doc_path) throws StorageError;
+    public abstract void ngram_remove_trigram(EntityPath index_path, string trigram, EntityPath doc_path) throws StorageError;
+    public abstract Invercargill.Enumerable<EntityPath> ngram_get_documents(EntityPath index_path, string trigram) throws StorageError;
+    
+    public abstract void ngram_add_bigram_reverse(EntityPath index_path, string bigram, string trigram) throws StorageError;
+    public abstract Invercargill.Enumerable<string> ngram_get_trigrams_for_bigram(EntityPath index_path, string bigram) throws StorageError;
+    
+    public abstract void ngram_add_unigram_reverse(EntityPath index_path, char unigram, string bigram) throws StorageError;
+    public abstract Invercargill.Enumerable<string> ngram_get_bigrams_for_unigram(EntityPath index_path, char unigram) throws StorageError;
+}
+```
+
+## Implementation Notes
+
+### Index Building on Creation
+
+When an indexed entity is created, it must build its index from existing documents:
+
+```vala
+public override Entity? create_category(string name, string type_label, string expression) throws EngineError {
+    // ... create entity metadata ...
+    
+    var category = new Category(_engine, child_path, type_label, expression);
+    
+    // Build initial index
+    category.build_initial_index();
+    
+    // Register with hook manager
+    _engine.hooks.register_hook(category);
+    
+    _engine.entity_created(category);
+    return category;
+}
+```
+
+### Expression Evaluation
+
+The expression system needs to support:
+
+1. **Boolean expressions** (Category): `!draft`, `status == "active"`, `age > 18`
+2. **Value expressions** (Catalogue): `author`, `category`, `status.year`
+3. **Field access** (Index): `content`, `title`
+
+Consider using or extending Invercargill's expression system if available.
+
+### Transaction Safety
+
+Index updates should be atomic with document changes:
+
+```vala
+public void set_entity_property(string name, Element value) throws EngineError {
+    var tx = _engine.begin_transaction();
+    try {
+        // Update property
+        _properties.set(name, value);
+        save_properties();
+        
+        // Notify hooks (which update indices)
+        _engine.notify_modified(this, new Set.from_values(name));
+        
+        tx.commit();
+    } catch (Error e) {
+        tx.rollback();
+        throw e;
+    }
+}
+```
+
+### Performance Considerations
+
+1. **Batch Updates**: When importing many documents, consider a bulk index build mode
+2. **Lazy Index Building**: For large datasets, build indices asynchronously
+3. **Index Compaction**: Periodically compact index storage to remove fragmentation
+4. **Caching**: Cache frequently accessed index data in memory
+
+### Error Handling
+
+```vala
+public enum IndexError {
+    // Expression parsing failed
+    EXPRESSION_ERROR,
+    // Index corruption detected
+    CORRUPT_INDEX,
+    // Index build failed
+    BUILD_FAILED,
+    // Hook registration failed
+    HOOK_ERROR;
+}
+```
+
+## Migration Path
+
+### From Current Implementation
+
+1. **Add CATALOGUE to EntityType enum**
+2. **Create Catalogue entity class**
+3. **Update Storage interface with index methods**
+4. **Implement HookManager in Engine**
+5. **Refactor Category to use indexed storage**
+6. **Refactor Index to use n-gram storage**
+7. **Update Container to support create_catalogue**
+8. **Add index rebuild utility**
+
+### Backward Compatibility
+
+- Existing databases will not have index data
+- On first access, indexed entities should detect missing indices and rebuild
+- Provide a migration tool to pre-build indices for large databases
+
+## Summary
+
+| Entity | Purpose | Index Type | Query Complexity |
+|--------|---------|------------|------------------|
+| Container | Arbitrary children | None (explicit) | O(k) |
+| Document | Properties | None | O(1) |
+| Category | Boolean filter | Member set | O(k) |
+| Catalogue | Key groupings | Key→members map | O(1) per key |
+| Index | Text search | Hierarchical n-gram | O(k) |
+
+Where k = number of results/candidates, not total documents.

+ 668 - 0
Architecture/12-Storage-Redesign.md

@@ -0,0 +1,668 @@
+# Storage Layer Redesign
+
+## Document Status
+- **Created**: 2026-03-13
+- **Status**: Draft
+- **Author**: Collaborative design session
+
+## Problems with Current Architecture
+
+### 1. Confusing Separation of Concerns
+The current design splits storage operations between `Storage` and `IndexManager` without clear boundaries:
+- `Storage` handles entity metadata, properties, children, AND configs
+- `IndexManager` handles type indices, category members, catalogue groups, AND n-grams
+- It's unclear which class should handle what
+
+### 2. Awkward Coupling in EmbeddedEngine
+```vala
+// EmbeddedEngine must cast to BasicStorage to get Dbm for IndexManager
+var basic_storage = (_storage as Storage.BasicStorage);
+if (basic_storage != null) {
+    _index_manager = new Storage.IndexManager(basic_storage.dbm);
+}
+```
+This is a code smell indicating the abstraction is broken.
+
+### 3. Unclear Value of Storage Interface
+The `Storage` interface has only one implementation (`BasicStorage`). It's unclear what alternative implementations would look like or why an interface is needed.
+
+### 4. Naming Confusion
+- `Storage.add_child()` / `get_children()` - Structural child names
+- `IndexManager.add_to_category()` / `get_category_members()` - Indexed member documents
+- Both deal with "children" but mean different things
+
+### 5. Key Prefix Sprawl
+Both classes define key prefixes independently, making it hard to see the overall storage schema.
+
+## Proposed Architecture
+
+### Design Principles
+
+1. **One class per prefix** - Each key prefix gets its own focused class
+2. **Entity facades compose prefix stores** - High-level APIs for each entity type
+3. **Engine holds facade references** - Clean dependency graph
+4. **All stores are concrete classes** - No unnecessary interfaces
+
+### Architecture Overview
+
+```mermaid
+graph TB
+    subgraph Low-Level Prefix Stores
+        EMS[EntityMetadataStorage<br/>entity: prefix]
+        PS[PropertiesStorage<br/>props: prefix]
+        CS[ChildrenStorage<br/>children: prefix]
+        CCS[CategoryConfigStorage<br/>config: prefix]
+        CLCS[CatalogueConfigStorage<br/>catcfg: prefix]
+        TIS[TypeIndexStorage<br/>typeidx: prefix]
+        CATIS[CategoryIndexStorage<br/>cat: prefix]
+        CLIS[CatalogueIndexStorage<br/>catl: prefix]
+        TXIS[TextIndexStorage<br/>idx: prefix]
+    end
+    
+    subgraph High-Level Entity Facades
+        ES[EntityStore<br/>metadata + type index]
+        DS[DocumentStore<br/>properties]
+        CNS[ContainerStore<br/>children]
+        CAS[CategoryStore<br/>config + index + children]
+        CLS[CatalogueStore<br/>config + index]
+        IDS[IndexStore<br/>config + text index]
+    end
+    
+    subgraph Engine
+        E[EmbeddedEngine]
+    end
+    
+    EMS --> ES
+    TIS --> ES
+    PS --> DS
+    CS --> CNS
+    CCS --> CAS
+    CATIS --> CAS
+    CS --> CAS
+    CLCS --> CLS
+    CLIS --> CLS
+    CCS --> IDS
+    TXIS --> IDS
+    
+    E --> ES
+    E --> DS
+    E --> CNS
+    E --> CAS
+    E --> CLS
+    E --> IDS
+```
+
+## Low-Level Prefix Stores
+
+Each prefix store handles exactly one key prefix and provides type-safe operations.
+
+**Naming Convention:** Prefix stores use the `Storage` suffix (e.g., `EntityMetadataStorage`).
+
+### EntityMetadataStorage
+**Prefix:** `entity:`
+
+**Key Format:** `entity:<path>`
+
+**Value:** Serialized `(EntityType type, string? type_label)`
+
+```vala
+public class EntityMetadataStorage : Object {
+    public EntityMetadataStorage(Dbm dbm);
+    
+    public void store_metadata(EntityPath path, EntityType type, string? type_label) throws StorageError;
+    public EntityType? get_type(EntityPath path) throws StorageError;
+    public string? get_type_label(EntityPath path) throws StorageError;
+    public bool exists(EntityPath path);
+    public void delete(EntityPath path) throws StorageError;
+}
+```
+
+### PropertiesStorage
+**Prefix:** `props:`
+
+**Key Format:** `props:<path>`
+
+**Value:** Serialized `Properties` dictionary
+
+```vala
+public class PropertiesStorage : Object {
+    public PropertiesStorage(Dbm dbm);
+    
+    public void store(EntityPath path, Properties properties) throws StorageError;
+    public Properties? load(EntityPath path) throws StorageError;
+    public void delete(EntityPath path) throws StorageError;
+}
+```
+
+### ChildrenStorage
+**Prefix:** `children:`
+
+**Key Format:** `children:<parent_path>`
+
+**Value:** Serialized array of child names
+
+```vala
+public class ChildrenStorage : Object {
+    public ChildrenStorage(Dbm dbm);
+    
+    public void add_child(EntityPath parent, string child_name) throws StorageError;
+    public void remove_child(EntityPath parent, string child_name) throws StorageError;
+    public bool has_child(EntityPath parent, string child_name) throws StorageError;
+    public Enumerable<string> get_children(EntityPath parent) throws StorageError;
+    public void delete(EntityPath parent) throws StorageError;
+}
+```
+
+### CategoryConfigStorage
+**Prefix:** `config:`
+
+**Key Format:** `config:<path>`
+
+**Value:** Serialized `(string type_label, string expression)`
+
+```vala
+public class CategoryConfigStorage : Object {
+    public CategoryConfigStorage(Dbm dbm);
+    
+    public void store(EntityPath path, string type_label, string expression) throws StorageError;
+    public CategoryConfig? load(EntityPath path) throws StorageError;
+    public void delete(EntityPath path) throws StorageError;
+}
+
+public class CategoryConfig : Object {
+    public string type_label { get; construct set; }
+    public string expression { get; construct set; }
+}
+```
+
+### CatalogueConfigStorage
+**Prefix:** `catcfg:`
+
+**Key Format:** `catcfg:<path>`
+
+**Value:** Serialized `(string type_label, string expression)`
+
+```vala
+public class CatalogueConfigStorage : Object {
+    public CatalogueConfigStorage(Dbm dbm);
+    
+    public void store(EntityPath path, string type_label, string expression) throws StorageError;
+    public CatalogueConfig? load(EntityPath path) throws StorageError;
+    public void delete(EntityPath path) throws StorageError;
+}
+
+public class CatalogueConfig : Object {
+    public string type_label { get; construct set; }
+    public string expression { get; construct set; }
+}
+```
+
+### TypeIndexStorage
+**Prefix:** `typeidx:`
+
+**Key Format:** `typeidx:<type_label>`
+
+**Value:** Serialized array of document paths
+
+```vala
+public class TypeIndexStorage : Object {
+    public TypeIndexStorage(Dbm dbm);
+    
+    public void add_document(string type_label, string doc_path) throws StorageError;
+    public void remove_document(string type_label, string doc_path) throws StorageError;
+    public Enumerable<string> get_documents(string type_label);
+}
+```
+
+### CategoryIndexStorage
+**Prefix:** `cat:`
+
+**Key Format:** `cat:<category_path>:members`
+
+**Value:** Serialized array of document paths
+
+```vala
+public class CategoryIndexStorage : Object {
+    public CategoryIndexStorage(Dbm dbm);
+    
+    // Single member operations
+    public void add_member(string category_path, string doc_path) throws StorageError;
+    public void remove_member(string category_path, string doc_path) throws StorageError;
+    
+    // Batch member operations
+    public void add_members(string category_path, Enumerable<string> doc_paths) throws StorageError;
+    public void remove_members(string category_path, Enumerable<string> doc_paths) throws StorageError;
+    public void set_members(string category_path, Enumerable<string> doc_paths) throws StorageError;
+    
+    // Query and lifecycle
+    public Enumerable<string> get_members(string category_path);
+    public void clear(string category_path) throws StorageError;
+}
+```
+
+### CatalogueIndexStorage
+**Prefix:** `catl:`
+
+**Key Formats:**
+- `catl:<catalogue_path>:keys` - List of group keys
+- `catl:<catalogue_path>:group:<key>` - Document paths in group
+
+```vala
+public class CatalogueIndexStorage : Object {
+    public CatalogueIndexStorage(Dbm dbm);
+    
+    // Group operations
+    public void add_to_group(string catalogue_path, string key, string doc_path) throws StorageError;
+    public void remove_from_group(string catalogue_path, string key, string doc_path) throws StorageError;
+    public Enumerable<string> get_group_members(string catalogue_path, string key);
+    
+    // Key operations
+    public void add_key(string catalogue_path, string key) throws StorageError;
+    public void remove_key(string catalogue_path, string key) throws StorageError;
+    public Enumerable<string> get_keys(string catalogue_path);
+    
+    // Clear all
+    public void clear(string catalogue_path) throws StorageError;
+}
+```
+
+### TextIndexStorage
+**Prefix:** `idx:`
+
+**Key Formats:**
+- `idx:<index_path>:tri:<trigram>` - Document paths containing trigram
+- `idx:<index_path>:bi:<bigram>` - Trigrams containing bigram
+- `idx:<index_path>:uni:<unigram>` - Bigrams starting with unigram
+- `idx:<index_path>:doc:<doc_path>` - Cached document content
+
+```vala
+public class TextIndexStorage : Object {
+    public TextIndexStorage(Dbm dbm);
+    
+    // Trigram index
+    public void add_trigram(string index_path, string trigram, string doc_path) throws StorageError;
+    public void remove_trigram(string index_path, string trigram, string doc_path) throws StorageError;
+    public Enumerable<string> get_documents_for_trigram(string index_path, string trigram);
+    
+    // Bigram reverse index
+    public void add_bigram_mapping(string index_path, string bigram, string trigram) throws StorageError;
+    public Enumerable<string> get_trigrams_for_bigram(string index_path, string bigram);
+    
+    // Unigram reverse index
+    public void add_unigram_mapping(string index_path, string unigram, string bigram) throws StorageError;
+    public Enumerable<string> get_bigrams_for_unigram(string index_path, string unigram);
+    
+    // Document content cache
+    public void store_document_content(string index_path, string doc_path, string content) throws StorageError;
+    public string? get_document_content(string index_path, string doc_path);
+    public void remove_document_content(string index_path, string doc_path) throws StorageError;
+    
+    // Clear all
+    public void clear(string index_path) throws StorageError;
+}
+```
+
+## High-Level Entity Facades
+
+Facades compose prefix stores to provide entity-specific APIs.
+
+**Naming Convention:** Entity facades use the `Store` suffix (e.g., `EntityStore`).
+
+### EntityStore
+**Composition:** `EntityMetadataStorage` + `TypeIndexStorage`
+
+```vala
+public class EntityStore : Object {
+    public EntityStore(Dbm dbm);
+    
+    // Metadata operations
+    public void store_metadata(EntityPath path, EntityType type, string? type_label) throws StorageError;
+    public EntityType? get_type(EntityPath path) throws StorageError;
+    public string? get_type_label(EntityPath path) throws StorageError;
+    public bool exists(EntityPath path);
+    public void delete(EntityPath path) throws StorageError;
+    
+    // Type index operations
+    public void register_document_type(string type_label, string doc_path) throws StorageError;
+    public void unregister_document_type(string type_label, string doc_path) throws StorageError;
+    public Enumerable<string> get_documents_by_type(string type_label);
+}
+```
+
+### DocumentStore
+**Composition:** `PropertiesStorage`
+
+```vala
+public class DocumentStore : Object {
+    public DocumentStore(Dbm dbm);
+    
+    public void store_properties(EntityPath path, Properties properties) throws StorageError;
+    public Properties? load_properties(EntityPath path) throws StorageError;
+    public void delete(EntityPath path) throws StorageError;
+}
+```
+
+### ContainerStore
+**Composition:** `ChildrenStorage`
+
+```vala
+public class ContainerStore : Object {
+    public ContainerStore(Dbm dbm);
+    
+    public void add_child(EntityPath parent, string child_name) throws StorageError;
+    public void remove_child(EntityPath parent, string child_name) throws StorageError;
+    public bool has_child(EntityPath parent, string child_name) throws StorageError;
+    public Enumerable<string> get_children(EntityPath parent) throws StorageError;
+}
+```
+
+### CategoryStore
+**Composition:** `CategoryConfigStorage` + `CategoryIndexStorage` + `ChildrenStorage`
+
+```vala
+public class CategoryStore : Object {
+    public CategoryStore(Dbm dbm);
+    
+    // Configuration
+    public void store_config(EntityPath path, string type_label, string expression) throws StorageError;
+    public CategoryConfig? load_config(EntityPath path) throws StorageError;
+    
+    // Single member operations
+    public void add_member(EntityPath category_path, string doc_path) throws StorageError;
+    public void remove_member(EntityPath category_path, string doc_path) throws StorageError;
+    
+    // Batch member operations
+    public void add_members(EntityPath category_path, Enumerable<string> doc_paths) throws StorageError;
+    public void remove_members(EntityPath category_path, Enumerable<string> doc_paths) throws StorageError;
+    public void set_members(EntityPath category_path, Enumerable<string> doc_paths) throws StorageError;
+    public Enumerable<string> get_members(EntityPath category_path);
+    
+    // Structural children (for when entities are created inside category)
+    public void add_child(EntityPath parent, string child_name) throws StorageError;
+    public void remove_child(EntityPath parent, string child_name) throws StorageError;
+    public Enumerable<string> get_children(EntityPath parent) throws StorageError;
+    
+    // Lifecycle
+    public void delete(EntityPath path) throws StorageError;
+}
+```
+
+### CatalogueStore
+**Composition:** `CatalogueConfigStorage` + `CatalogueIndexStorage`
+
+```vala
+public class CatalogueStore : Object {
+    public CatalogueStore(Dbm dbm);
+    
+    // Configuration
+    public void store_config(EntityPath path, string type_label, string expression) throws StorageError;
+    public CatalogueConfig? load_config(EntityPath path) throws StorageError;
+    
+    // Group operations
+    public void add_to_group(EntityPath catalogue_path, string key, string doc_path) throws StorageError;
+    public void remove_from_group(EntityPath catalogue_path, string key, string doc_path) throws StorageError;
+    public Enumerable<string> get_group_members(EntityPath catalogue_path, string key);
+    public Enumerable<string> get_group_keys(EntityPath catalogue_path);
+    
+    // Lifecycle
+    public void delete(EntityPath path) throws StorageError;
+}
+```
+
+### IndexStore
+**Composition:** `CategoryConfigStorage` + `TextIndexStorage`
+
+```vala
+public class IndexStore : Object {
+    public IndexStore(Dbm dbm);
+    
+    // Configuration (reuses CategoryConfigStorage with config: prefix)
+    public void store_config(EntityPath path, string type_label, string expression) throws StorageError;
+    public CategoryConfig? load_config(EntityPath path) throws StorageError;
+    
+    // Trigram index
+    public void add_trigram(EntityPath index_path, string trigram, string doc_path) throws StorageError;
+    public void remove_trigram(EntityPath index_path, string trigram, string doc_path) throws StorageError;
+    public Enumerable<string> get_documents_for_trigram(EntityPath index_path, string trigram);
+    
+    // Reverse indices
+    public void add_bigram_mapping(EntityPath index_path, string bigram, string trigram) throws StorageError;
+    public Enumerable<string> get_trigrams_for_bigram(EntityPath index_path, string bigram);
+    public void add_unigram_mapping(EntityPath index_path, string unigram, string bigram) throws StorageError;
+    public Enumerable<string> get_bigrams_for_unigram(EntityPath index_path, string unigram);
+    
+    // Content cache
+    public void store_document_content(EntityPath index_path, string doc_path, string content) throws StorageError;
+    public string? get_document_content(EntityPath index_path, string doc_path);
+    public void remove_document_content(EntityPath index_path, string doc_path) throws StorageError;
+    
+    // Lifecycle
+    public void delete(EntityPath path) throws StorageError;
+}
+```
+
+## Engine Integration
+
+The `EmbeddedEngine` holds references to all entity facades:
+
+```vala
+public class EmbeddedEngine : Object, Core.Engine {
+    private EntityStore _entity_store;
+    private DocumentStore _document_store;
+    private ContainerStore _container_store;
+    private CategoryStore _category_store;
+    private CatalogueStore _catalogue_store;
+    private IndexStore _index_store;
+    
+    public EmbeddedEngine.with_path(string storage_path) {
+        var dbm = new FilesystemDbm(storage_path);
+        
+        _entity_store = new EntityStore(dbm);
+        _document_store = new DocumentStore(dbm);
+        _container_store = new ContainerStore(dbm);
+        _category_store = new CategoryStore(dbm);
+        _catalogue_store = new CatalogueStore(dbm);
+        _index_store = new IndexStore(dbm);
+        
+        // ... rest of initialization
+    }
+    
+    // Public access for entity classes
+    public EntityStore entity_store { get { return _entity_store; } }
+    public DocumentStore document_store { get { return _document_store; } }
+    public ContainerStore container_store { get { return _container_store; } }
+    public CategoryStore category_store { get { return _category_store; } }
+    public CatalogueStore catalogue_store { get { return _catalogue_store; } }
+    public IndexStore index_store { get { return _index_store; } }
+}
+```
+
+## Key Schema Summary
+
+| Prefix | Storage Class | Description |
+|--------|---------------|-------------|
+| `entity:` | EntityMetadataStorage | Entity type and type_label |
+| `props:` | PropertiesStorage | Document properties |
+| `children:` | ChildrenStorage | Structural child names |
+| `config:` | CategoryConfigStorage | Category/Index configuration |
+| `catcfg:` | CatalogueConfigStorage | Catalogue configuration |
+| `typeidx:` | TypeIndexStorage | Global type → documents index |
+| `cat:` | CategoryIndexStorage | Category members index |
+| `catl:` | CatalogueIndexStorage | Catalogue groups and keys |
+| `idx:` | TextIndexStorage | N-gram indices and content cache |
+
+## Implementation Strategy
+
+Since this is a greenfields project, we can implement directly without backward compatibility concerns.
+
+### Phase 1: Create Prefix Storage Classes
+1. Create `EntityMetadataStorage` in `src/Storage/EntityMetadataStorage.vala`
+2. Create `PropertiesStorage` in `src/Storage/PropertiesStorage.vala`
+3. Create `ChildrenStorage` in `src/Storage/ChildrenStorage.vala`
+4. Create `CategoryConfigStorage` in `src/Storage/CategoryConfigStorage.vala`
+5. Create `CatalogueConfigStorage` in `src/Storage/CatalogueConfigStorage.vala`
+6. Create `TypeIndexStorage` in `src/Storage/TypeIndexStorage.vala`
+7. Create `CategoryIndexStorage` in `src/Storage/CategoryIndexStorage.vala`
+8. Create `CatalogueIndexStorage` in `src/Storage/CatalogueIndexStorage.vala`
+9. Create `TextIndexStorage` in `src/Storage/TextIndexStorage.vala`
+
+### Phase 2: Create Entity Facade Classes
+1. Create `EntityStore` in `src/Storage/EntityStore.vala`
+2. Create `DocumentStore` in `src/Storage/DocumentStore.vala`
+3. Create `ContainerStore` in `src/Storage/ContainerStore.vala`
+4. Create `CategoryStore` in `src/Storage/CategoryStore.vala`
+5. Create `CatalogueStore` in `src/Storage/CatalogueStore.vala`
+6. Create `IndexStore` in `src/Storage/IndexStore.vala`
+
+### Phase 3: Update EmbeddedEngine
+1. Add `with_write_transaction()` helper method
+2. Add facade references for all stores
+3. Remove old `Storage` and `IndexManager` references
+
+### Phase 4: Update Entity Classes
+1. Update `Container.vala` to use `ContainerStore` and `EntityStore`
+2. Update `Document.vala` to use `DocumentStore` and `EntityStore`
+3. Update `Category.vala` to use `CategoryStore` and `EntityStore`
+4. Update `Catalogue.vala` to use `CatalogueStore` and `EntityStore`
+5. Update `Index.vala` to use `IndexStore` and `EntityStore`
+
+### Phase 5: Remove Old Classes
+1. Delete `src/Storage/Storage.vala`
+2. Delete `src/Storage/IndexManager.vala`
+
+### Phase 6: Update Tests
+1. Update `tests/Storage/StorageTest.vala` to test new storage classes
+2. Add tests for each prefix storage class
+3. Add tests for each entity facade class
+
+## Benefits of New Design
+
+1. **Clear Separation**: Each prefix store has one responsibility
+2. **Composable**: Facades compose stores as needed
+3. **Testable**: Small, focused classes are easier to test
+4. **Discoverable**: `engine.category_store.add_member()` is self-documenting
+5. **Flexible**: Can use low-level stores directly if needed
+6. **No Broken Abstractions**: No casting to concrete types
+7. **Clear Key Schema**: All prefixes documented in one place
+
+## Transaction Model
+
+The new architecture uses a **transaction-per-write-request** model:
+
+### Behavior
+
+| Operation Type | Transaction Behavior |
+|----------------|---------------------|
+| **Write operations** | Automatically wrapped in a transaction |
+| **Read operations** | No transaction (no overhead) |
+| **Hooks** | Run within the same transaction as the triggering write |
+
+### Benefits
+
+1. **Atomicity where needed**: Create document + add to container = one transaction
+2. **Hooks are atomic**: Update document + category membership updates = one transaction
+3. **No explicit transaction management**: Caller doesn't need to think about transactions
+4. **Consistent behavior**: Same model works for both EmbeddedEngine and RemoteEngine
+5. **No read overhead**: Reads don't pay transaction cost
+
+### Implementation
+
+#### EmbeddedEngine
+
+Write operations on entities wrap their work in a transaction:
+
+```vala
+// In Container.create_child()
+public Document create_child(string name) throws EntityError {
+    Document? doc = null;
+    _engine.with_write_transaction(() => {
+        // Create entity metadata
+        _engine.entity_store.store_metadata(path.child(name), EntityType.DOCUMENT, type_label);
+        
+        // Add to container's children
+        _engine.container_store.add_child(path, name);
+        
+        // Create document entity
+        doc = new Document(_engine, path.child(name));
+        
+        // Hooks run within same transaction
+        _engine.hooks.run_after_create(doc);
+    });
+    return doc;
+}
+```
+
+The Engine provides a helper method:
+
+```vala
+public class EmbeddedEngine : Object, Core.Engine {
+    private Dbm _dbm;
+    
+    public void with_write_transaction(WriteTransactionDelegate delegate) throws Error {
+        _dbm.with_transaction(() => delegate());
+    }
+}
+```
+
+#### RemoteEngine (Server Side)
+
+The server wraps each write request in a transaction:
+
+```vala
+// In ClientHandler
+void handle_create_document(Message request) {
+    _engine.with_write_transaction(() => {
+        // Process the entire request in one transaction
+        var path = request.get_path();
+        var type_label = request.get_type_label();
+        
+        _engine.entity_store.store_metadata(path, EntityType.DOCUMENT, type_label);
+        _engine.container_store.add_child(path.parent, path.name);
+        
+        // Hooks run within same transaction
+        var doc = new Document(_engine, path);
+        _engine.hooks.run_after_create(doc);
+    });
+}
+```
+
+### Examples
+
+#### Creating a Document
+
+```vala
+// Single transaction covers:
+// 1. Create entity metadata
+// 2. Add to container's children list
+// 3. Store initial properties
+// 4. Run after_create hooks (e.g., add to categories)
+var doc = container.create_child("my-doc");
+```
+
+#### Updating a Document
+
+```vala
+// Single transaction covers:
+// 1. Update properties
+// 2. Run after_update hooks (e.g., update category memberships, update indices)
+doc.set_property("status", "active");
+```
+
+#### Deleting a Document
+
+```vala
+// Single transaction covers:
+// 1. Remove from container's children list
+// 2. Delete entity metadata
+// 3. Delete properties
+// 4. Run after_delete hooks (e.g., remove from categories, remove from indices)
+doc.delete();
+```
+
+## Decisions Made
+
+1. **Caching**: Deferred - no caching layer needed in the initial implementation. Can be added later if performance profiling indicates it's needed.
+
+2. **Batch Operations**: Added `add_members()` and `remove_members()` batch methods to `CategoryIndexStorage` and `CategoryStore` for efficient bulk updates during reindexing operations.
+
+3. **Transaction Model**: Transaction-per-write-request model selected. Write operations are automatically wrapped in transactions, reads have no transaction overhead, and hooks run within the same transaction as the triggering write.

+ 1264 - 0
Architecture/13-Transaction-Batching.md

@@ -0,0 +1,1264 @@
+# Transaction Batching Design
+
+This document describes the design for batching hook execution at transaction commit time to improve performance by avoiding individual hook invocations for each insert operation.
+
+## Problem Statement
+
+### Current Hook Execution Flow
+
+The current implementation invokes hooks immediately for each entity change:
+
+```mermaid
+sequenceDiagram
+    participant App as Application
+    participant Engine as EmbeddedEngine
+    participant Hook as HookManager
+    participant Cat as Category
+    participant Storage as IndexManager
+    
+    App->>Engine: create_document with type_label
+    Engine->>Engine: store document
+    Engine->>Hook: notify_entity_change CREATED
+    Hook->>Cat: on_entity_change CREATED
+    Cat->>Storage: add_to_category
+    Storage->>Storage: write to DBM
+    
+    App->>Engine: set_property on document
+    Engine->>Engine: store property
+    Engine->>Hook: notify_document_property_change
+    Hook->>Cat: on_document_property_change
+    Cat->>Storage: update_category_membership
+    Storage->>Storage: write to DBM
+    
+    Note over App,Storage: Each operation triggers immediate storage I/O
+```
+
+### Performance Issues
+
+From performance benchmarks in PERF.md:
+
+| Operation | Avg Time | Ops/sec |
+|-----------|----------|---------|
+| create_document_small | ~1.5ms | ~640 |
+| create_category | ~17ms | ~58 |
+| create_catalogue | ~58ms | ~17 |
+| create_index | ~4000ms | ~0.25 |
+
+The issues are:
+
+1. **Immediate Hook Invocation**: Each document operation triggers all registered hooks immediately
+2. **Multiple Storage Writes**: Each hook invocation performs separate storage I/O
+3. **No Batching Opportunity**: Hooks cannot optimize for bulk operations
+4. **Transaction Overhead**: Each hook may start its own transaction or write outside transaction scope
+
+### Example: Bulk Document Insert
+
+When inserting 1000 documents in a transaction:
+
+```vala
+var tx = engine.begin_transaction();
+try {
+    for (int i = 0; i < 1000; i++) {
+        var doc = container.create_document("doc%d".printf(i), "Post");
+        doc.set_entity_property("title", new ValueElement("Title %d".printf(i)));
+        doc.set_entity_property("author", new ValueElement("user123"));
+        // Each create and set_property triggers hooks immediately!
+    }
+    tx.commit();
+} catch (Error e) {
+    tx.rollback();
+}
+```
+
+Current behavior:
+- 1000 document creations × N hooks = 1000N hook invocations
+- 2000 property sets × M hooks = 2000M hook invocations
+- Each invocation does storage I/O
+
+## Proposed Solution
+
+### Batched Hook Execution
+
+Hooks should be accumulated during a transaction and executed in batch at commit time:
+
+```mermaid
+sequenceDiagram
+    participant App as Application
+    participant Engine as EmbeddedEngine
+    participant Batch as HookBatch
+    participant Hook as HookManager
+    participant Cat as Category
+    participant Storage as IndexManager
+    
+    App->>Engine: begin_transaction
+    Engine->>Batch: create new HookBatch
+    
+    loop For each document
+        App->>Engine: create_document
+        Engine->>Engine: store document
+        Engine->>Batch: record CREATED event
+        Note over Batch: Event queued, no hook call
+        
+        App->>Engine: set_property
+        Engine->>Engine: store property
+        Engine->>Batch: record MODIFIED event
+        Note over Batch: Event queued, no hook call
+    end
+    
+    App->>Engine: commit_transaction
+    Engine->>Batch: flush
+    Batch->>Hook: execute_batch events
+    Hook->>Cat: on_batch_change events
+    Cat->>Storage: batch_update_category
+    Storage->>Storage: single batched write
+    
+    Note over App,Storage: All hooks executed once with all changes
+```
+
+### Key Components
+
+#### 1. HookEvent Record
+
+Represents a single entity change event:
+
+```vala
+namespace Implexus.Engine {
+
+/**
+ * Represents a queued hook event for batched processing.
+ */
+public class HookEvent : Object {
+    /**
+     * The type of change that occurred.
+     */
+    public EntityChangeType change_type { get; construct set; }
+    
+    /**
+     * The entity path affected.
+     */
+    public Core.EntityPath entity_path { get; construct set; }
+    
+    /**
+     * The entity type.
+     */
+    public Core.EntityType entity_type { get; construct set; }
+    
+    /**
+     * The type label for documents.
+     */
+    public string? type_label { get; construct set; }
+    
+    /**
+     * The property name for property changes.
+     */
+    public string? property_name { get; construct set; }
+    
+    /**
+     * The old property value.
+     */
+    public Invercargill.Element? old_value { get; construct set; }
+    
+    /**
+     * The new property value.
+     */
+    public Invercargill.Element? new_value { get; construct set; }
+    
+    /**
+     * Creates a new hook event.
+     */
+    public HookEvent(
+        EntityChangeType change_type,
+        Core.EntityPath entity_path,
+        Core.EntityType entity_type,
+        string? type_label = null,
+        string? property_name = null,
+        Invercargill.Element? old_value = null,
+        Invercargill.Element? new_value = null
+    ) {
+        Object(
+            change_type: change_type,
+            entity_path: entity_path,
+            entity_type: entity_type,
+            type_label: type_label,
+            property_name: property_name,
+            old_value: old_value,
+            new_value: new_value
+        );
+    }
+}
+
+} // namespace Implexus.Engine
+```
+
+#### 2. HookBatch Class
+
+Accumulates events during a transaction:
+
+```vala
+namespace Implexus.Engine {
+
+/**
+ * Accumulates hook events during a transaction for batched execution.
+ *
+ * The HookBatch collects entity change events during a transaction
+ * and provides methods to consolidate and execute them efficiently.
+ */
+public class HookBatch : Object {
+    
+    // === Private Fields ===
+    
+    /**
+     * Accumulated events in order of occurrence.
+     */
+    private Invercargill.DataStructures.Vector<HookEvent> _events;
+    
+    /**
+     * Map of entity path to final state for consolidation.
+     */
+    private Invercargill.DataStructures.Dictionary<string, EntityFinalState> _entity_states;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new empty HookBatch.
+     */
+    public HookBatch() {
+        _events = new Invercargill.DataStructures.Vector<HookEvent>();
+        _entity_states = new Invercargill.DataStructures.Dictionary<string, EntityFinalState>();
+    }
+    
+    // === Event Recording ===
+    
+    /**
+     * Records an entity creation event.
+     *
+     * @param entity The entity that was created
+     */
+    public void record_created(Core.Entity entity) {
+        var evt = new HookEvent(
+            EntityChangeType.CREATED,
+            entity.path,
+            entity.entity_type,
+            entity.type_label
+        );
+        _events.add(evt);
+        update_entity_state(entity.path.to_string(), StateChangeType.CREATED, entity);
+    }
+    
+    /**
+     * Records an entity modification event.
+     *
+     * @param entity The entity that was modified
+     */
+    public void record_modified(Core.Entity entity) {
+        var evt = new HookEvent(
+            EntityChangeType.MODIFIED,
+            entity.path,
+            entity.entity_type,
+            entity.type_label
+        );
+        _events.add(evt);
+        update_entity_state(entity.path.to_string(), StateChangeType.MODIFIED, entity);
+    }
+    
+    /**
+     * Records an entity deletion event.
+     *
+     * @param path The path of the deleted entity
+     * @param entity_type The type of the deleted entity
+     * @param type_label The type label if it was a document
+     */
+    public void record_deleted(
+        Core.EntityPath path,
+        Core.EntityType entity_type,
+        string? type_label
+    ) {
+        var evt = new HookEvent(
+            EntityChangeType.DELETED,
+            path,
+            entity_type,
+            type_label
+        );
+        _events.add(evt);
+        update_entity_state(path.to_string(), StateChangeType.DELETED, null);
+    }
+    
+    /**
+     * Records a property change event.
+     *
+     * @param document The document whose property changed
+     * @param property_name The name of the property
+     * @param old_value The previous value
+     * @param new_value The new value
+     */
+    public void record_property_change(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        var evt = new HookEvent(
+            EntityChangeType.MODIFIED,
+            document.path,
+            document.entity_type,
+            document.type_label,
+            property_name,
+            old_value,
+            new_value
+        );
+        _events.add(evt);
+        
+        // Track property changes in entity state
+        var path = document.path.to_string();
+        if (!_entity_states.has_key(path)) {
+            _entity_states.set(path, new EntityFinalState(document));
+        }
+        var state = _entity_states.get(path);
+        state.record_property_change(property_name, old_value, new_value);
+    }
+    
+    // === Event Consolidation ===
+    
+    /**
+     * Gets consolidated events for efficient batch processing.
+     *
+     * This method consolidates multiple events for the same entity:
+     * - CREATED followed by MODIFIED → just CREATED with final state
+     * - Multiple MODIFIED → single MODIFIED with final state
+     * - CREATED followed by DELETED → no events (cancelled out)
+     *
+     * @return Consolidated vector of events
+     */
+    public Invercargill.DataStructures.Vector<HookEvent> get_consolidated_events() {
+        var consolidated = new Invercargill.DataStructures.Vector<HookEvent>();
+        
+        // Group by entity path
+        var events_by_path = new Invercargill.DataStructures.Dictionary<
+            string, 
+            Invercargill.DataStructures.Vector<HookEvent>
+        >();
+        
+        foreach (var evt in _events) {
+            var path = evt.entity_path.to_string();
+            if (!events_by_path.has_key(path)) {
+                events_by_path.set(path, new Invercargill.DataStructures.Vector<HookEvent>());
+            }
+            events_by_path.get(path).add(evt);
+        }
+        
+        // Consolidate each entity's events
+        foreach (var entry in events_by_path.entries) {
+            var path = entry.key;
+            var events = entry.value;
+            var final_event = consolidate_entity_events(events);
+            
+            if (final_event != null) {
+                consolidated.add((!) final_event);
+            }
+        }
+        
+        return consolidated;
+    }
+    
+    /**
+     * Consolidates events for a single entity.
+     */
+    private HookEvent? consolidate_entity_events(
+        Invercargill.DataStructures.Vector<HookEvent> events
+    ) {
+        if (events.peek_count() == 0) {
+            return null;
+        }
+        
+        bool was_created = false;
+        bool was_deleted = false;
+        HookEvent? last_event = null;
+        
+        foreach (var evt in events) {
+            switch (evt.change_type) {
+                case EntityChangeType.CREATED:
+                    was_created = true;
+                    break;
+                case EntityChangeType.DELETED:
+                    was_deleted = true;
+                    break;
+                case EntityChangeType.MODIFIED:
+                    break;
+            }
+            last_event = evt;
+        }
+        
+        // If created and deleted in same transaction, cancel out
+        if (was_created && was_deleted) {
+            return null;
+        }
+        
+        // Return appropriate event
+        if (was_created) {
+            return new HookEvent(
+                EntityChangeType.CREATED,
+                last_event.entity_path,
+                last_event.entity_type,
+                last_event.type_label
+            );
+        }
+        
+        if (was_deleted) {
+            return new HookEvent(
+                EntityChangeType.DELETED,
+                last_event.entity_path,
+                last_event.entity_type,
+                last_event.type_label
+            );
+        }
+        
+        // Just modifications
+        return new HookEvent(
+            EntityChangeType.MODIFIED,
+            last_event.entity_path,
+            last_event.entity_type,
+            last_event.type_label
+        );
+    }
+    
+    // === Batch Execution ===
+    
+    /**
+     * Executes all batched events through the hook manager.
+     *
+     * @param hook_manager The hook manager to notify
+     */
+    public void execute(HookManager hook_manager) {
+        var consolidated = get_consolidated_events();
+        
+        foreach (var evt in consolidated) {
+            switch (evt.change_type) {
+                case EntityChangeType.CREATED:
+                case EntityChangeType.MODIFIED:
+                case EntityChangeType.DELETED:
+                    // Get entity if not deleted
+                    Core.Entity? entity = null;
+                    if (evt.change_type != EntityChangeType.DELETED) {
+                        entity = hook_manager.engine.get_entity_or_null(evt.entity_path);
+                    }
+                    
+                    if (entity != null || evt.change_type == EntityChangeType.DELETED) {
+                        hook_manager.notify_entity_change_from_event(evt, entity);
+                    }
+                    break;
+            }
+        }
+        
+        // Execute property change events
+        execute_property_changes(hook_manager);
+    }
+    
+    /**
+     * Executes property change events.
+     */
+    private void execute_property_changes(HookManager hook_manager) {
+        foreach (var entry in _entity_states.entries) {
+            var state = entry.value;
+            foreach (var prop_change in state.property_changes.entries) {
+                var property_name = prop_change.key;
+                var change = prop_change.value;
+                
+                if (state.entity != null) {
+                    hook_manager.notify_document_property_change(
+                        state.entity,
+                        property_name,
+                        change.old_value,
+                        change.new_value
+                    );
+                }
+            }
+        }
+    }
+    
+    // === Utility Methods ===
+    
+    /**
+     * Updates the entity state tracking.
+     */
+    private void update_entity_state(
+        string path,
+        StateChangeType change_type,
+        Core.Entity? entity
+    ) {
+        if (!_entity_states.has_key(path)) {
+            if (entity != null) {
+                _entity_states.set(path, new EntityFinalState((!) entity));
+            }
+        }
+        
+        var state = _entity_states.get(path);
+        if (state != null) {
+            state.record_change(change_type);
+        }
+    }
+    
+    /**
+     * Clears all accumulated events.
+     */
+    public void clear() {
+        _events.clear();
+        _entity_states.clear();
+    }
+    
+    /**
+     * Gets the number of accumulated events.
+     */
+    public int event_count {
+        get { return (int) _events.peek_count(); }
+    }
+    
+    /**
+     * Checks if there are any events to process.
+     */
+    public bool has_events {
+        get { return _events.peek_count() > 0; }
+    }
+}
+
+/**
+ * Tracks the final state of an entity during a transaction.
+ */
+internal class EntityFinalState : Object {
+    public Core.Entity entity;
+    public bool was_created = false;
+    public bool was_deleted = false;
+    public Invercargill.DataStructures.Dictionary<string, PropertyChange> property_changes;
+    
+    public EntityFinalState(Core.Entity entity) {
+        this.entity = entity;
+        this.property_changes = new Invercargill.DataStructures.Dictionary<string, PropertyChange>();
+    }
+    
+    public void record_change(StateChangeType change_type) {
+        switch (change_type) {
+            case StateChangeType.CREATED:
+                was_created = true;
+                break;
+            case StateChangeType.DELETED:
+                was_deleted = true;
+                break;
+            case StateChangeType.MODIFIED:
+                break;
+        }
+    }
+    
+    public void record_property_change(
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        if (!property_changes.has_key(property_name)) {
+            property_changes.set(property_name, new PropertyChange(old_value, new_value));
+        } else {
+            // Update the new value, keep the original old value
+            var existing = property_changes.get(property_name);
+            existing.new_value = new_value;
+        }
+    }
+}
+
+/**
+ * Represents a property change with old and new values.
+ */
+internal class PropertyChange : Object {
+    public Invercargill.Element? old_value;
+    public Invercargill.Element? new_value;
+    
+    public PropertyChange(Invercargill.Element? old_value, Invercargill.Element? new_value) {
+        this.old_value = old_value;
+        this.new_value = new_value;
+    }
+}
+
+internal enum StateChangeType {
+    CREATED,
+    MODIFIED,
+    DELETED
+}
+
+} // namespace Implexus.Engine
+```
+
+#### 3. BatchedHookHandler Interface
+
+New interface for hooks that support batch processing:
+
+```vala
+namespace Implexus.Engine {
+
+/**
+ * Interface for hooks that can process batched events efficiently.
+ *
+ * Implementing this interface allows hooks to optimize their index
+ * updates when processing multiple changes at once.
+ */
+public interface BatchedHookHandler : Object, EntityChangeHandler {
+    
+    /**
+     * Called with a batch of entity changes.
+     *
+     * This method receives all changes for entities matching the
+     * hook's type filter. The hook can optimize storage writes
+     * by processing all changes together.
+     *
+     * @param events The consolidated events for matching entities
+     */
+    public abstract void on_batch_change(Invercargill.DataStructures.Vector<HookEvent> events);
+    
+    /**
+     * Called with batched property changes.
+     *
+     * @param document The document that changed
+     * @param changes Map of property name to old/new values
+     */
+    public abstract void on_batch_property_change(
+        Core.Entity document,
+        Invercargill.DataStructures.Dictionary<string, PropertyChange> changes
+    );
+    
+    /**
+     * Indicates whether this handler prefers batch processing.
+     *
+     * If true, on_batch_change will be called instead of individual
+     * on_entity_change calls.
+     */
+    public abstract bool supports_batch { get; }
+}
+
+} // namespace Implexus.Engine
+```
+
+#### 4. Modified HookManager
+
+Updated HookManager with batch support:
+
+```vala
+namespace Implexus.Engine {
+
+/**
+ * Manages hooks for entity change notifications with batch support.
+ */
+public class HookManager : Object {
+    
+    // === Private Fields ===
+    
+    private GLib.List<EntityChangeHandler> _handlers;
+    private GLib.List<DocumentPropertyChangeHandler> _property_handlers;
+    private GLib.List<BatchedHookHandler> _batched_handlers;
+    
+    /**
+     * The engine this hook manager is associated with.
+     */
+    public weak Core.Engine engine { get; set; }
+    
+    /**
+     * The current batch for transaction mode, or null if not in transaction.
+     */
+    private HookBatch? _current_batch = null;
+    
+    /**
+     * Whether batch mode is active (i.e., within a transaction).
+     */
+    private bool _batch_mode = false;
+    
+    // === Constructors ===
+    
+    public HookManager() {
+        _handlers = new GLib.List<EntityChangeHandler>();
+        _property_handlers = new GLib.List<DocumentPropertyChangeHandler>();
+        _batched_handlers = new GLib.List<BatchedHookHandler>();
+    }
+    
+    // === Batch Mode Control ===
+    
+    /**
+     * Begins batch mode for transaction processing.
+     *
+     * In batch mode, all events are accumulated instead of being
+     * immediately dispatched to handlers.
+     */
+    public void begin_batch() {
+        _batch_mode = true;
+        _current_batch = new HookBatch();
+    }
+    
+    /**
+     * Commits the current batch, executing all accumulated events.
+     */
+    public void commit_batch() {
+        if (_current_batch == null) {
+            return;
+        }
+        
+        // Execute batch for batched handlers
+        execute_batch_for_handlers((!) _current_batch);
+        
+        // Also execute individual events for non-batched handlers
+        ((!) _current_batch).execute(this);
+        
+        // Clear batch
+        _current_batch = null;
+        _batch_mode = false;
+    }
+    
+    /**
+     * Rolls back the current batch, discarding all accumulated events.
+     */
+    public void rollback_batch() {
+        if (_current_batch != null) {
+            ((!) _current_batch).clear();
+        }
+        _current_batch = null;
+        _batch_mode = false;
+    }
+    
+    // === Event Notification ===
+    
+    /**
+     * Notifies handlers of an entity change.
+     *
+     * In batch mode, events are queued. Otherwise, handlers are
+     * invoked immediately.
+     */
+    public void notify_entity_change(Core.Entity entity, EntityChangeType change_type) {
+        if (_batch_mode && _current_batch != null) {
+            // Queue the event
+            switch (change_type) {
+                case EntityChangeType.CREATED:
+                    ((!) _current_batch).record_created(entity);
+                    break;
+                case EntityChangeType.MODIFIED:
+                    ((!) _current_batch).record_modified(entity);
+                    break;
+                case EntityChangeType.DELETED:
+                    ((!) _current_batch).record_deleted(
+                        entity.path,
+                        entity.entity_type,
+                        entity.type_label
+                    );
+                    break;
+            }
+        } else {
+            // Immediate dispatch
+            notify_entity_change_immediate(entity, change_type);
+        }
+    }
+    
+    /**
+     * Notifies handlers of a document property change.
+     */
+    public void notify_document_property_change(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        if (_batch_mode && _current_batch != null) {
+            ((!) _current_batch).record_property_change(
+                document,
+                property_name,
+                old_value,
+                new_value
+            );
+        } else {
+            notify_property_change_immediate(document, property_name, old_value, new_value);
+        }
+    }
+    
+    // === Internal Methods ===
+    
+    /**
+     * Notifies handlers from a stored event.
+     */
+    internal void notify_entity_change_from_event(HookEvent evt, Core.Entity? entity) {
+        if (entity != null) {
+            notify_entity_change_immediate((!) entity, evt.change_type);
+        }
+    }
+    
+    /**
+     * Immediately notifies all handlers.
+     */
+    private void notify_entity_change_immediate(Core.Entity entity, EntityChangeType change_type) {
+        foreach (var handler in _handlers) {
+            try {
+                handler.on_entity_change(entity, change_type);
+            } catch (Error e) {
+                warning("Hook handler threw error for %s: %s",
+                    entity.path.to_string(), e.message);
+            }
+        }
+    }
+    
+    /**
+     * Immediately notifies all property handlers.
+     */
+    private void notify_property_change_immediate(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        foreach (var handler in _property_handlers) {
+            try {
+                handler.on_document_property_change(document, property_name, old_value, new_value);
+            } catch (Error e) {
+                warning("Property hook handler threw error for %s.%s: %s",
+                    document.path.to_string(), property_name, e.message);
+            }
+        }
+    }
+    
+    /**
+     * Executes batch for handlers that support batch processing.
+     */
+    private void execute_batch_for_handlers(HookBatch batch) {
+        var consolidated = batch.get_consolidated_events();
+        
+        foreach (var handler in _batched_handlers) {
+            if (handler.supports_batch) {
+                try {
+                    // Filter events by type_label if handler is type-specific
+                    var filtered = filter_events_for_handler(consolidated, handler);
+                    if (filtered.peek_count() > 0) {
+                        handler.on_batch_change(filtered);
+                    }
+                } catch (Error e) {
+                    warning("Batched hook handler threw error: %s", e.message);
+                }
+            }
+        }
+    }
+    
+    /**
+     * Filters events to only those relevant to a handler.
+     */
+    private Invercargill.DataStructures.Vector<HookEvent> filter_events_for_handler(
+        Invercargill.DataStructures.Vector<HookEvent> events,
+        BatchedHookHandler handler
+    ) {
+        // If handler implements TypeFilteredHook, filter by type_label
+        var filtered = new Invercargill.DataStructures.Vector<HookEvent>();
+        
+        // For now, return all events - handlers can filter internally
+        foreach (var evt in events) {
+            filtered.add(evt);
+        }
+        
+        return filtered;
+    }
+    
+    // === Handler Registration ===
+    
+    /**
+     * Registers a handler for entity changes.
+     */
+    public void register_handler(EntityChangeHandler handler) {
+        _handlers.append(handler);
+        
+        // Also track as batched handler if applicable
+        if (handler is BatchedHookHandler) {
+            _batched_handlers.append((BatchedHookHandler) handler);
+        }
+    }
+    
+    /**
+     * Unregisters a handler.
+     */
+    public void unregister_handler(EntityChangeHandler handler) {
+        _handlers.remove(handler);
+        
+        if (handler is BatchedHookHandler) {
+            _batched_handlers.remove((BatchedHookHandler) handler);
+        }
+    }
+    
+    /**
+     * Registers a handler for property changes.
+     */
+    public void register_property_handler(DocumentPropertyChangeHandler handler) {
+        _property_handlers.append(handler);
+    }
+    
+    /**
+     * Unregisters a property handler.
+     */
+    public void unregister_property_handler(DocumentPropertyChangeHandler handler) {
+        _property_handlers.remove(handler);
+    }
+    
+    // === Utility Methods ===
+    
+    public void clear_all() {
+        _handlers = new GLib.List<EntityChangeHandler>();
+        _property_handlers = new GLib.List<DocumentPropertyChangeHandler>();
+        _batched_handlers = new GLib.List<BatchedHookHandler>();
+    }
+    
+    public uint handler_count {
+        get { return _handlers.length(); }
+    }
+    
+    public uint property_handler_count {
+        get { return _property_handlers.length(); }
+    }
+}
+
+} // namespace Implexus.Engine
+```
+
+### Modified EmbeddedTransaction
+
+The EmbeddedTransaction needs to integrate with the batch system:
+
+```vala
+namespace Implexus.Engine {
+
+public class EmbeddedTransaction : Object, Core.Transaction {
+    
+    private weak EmbeddedEngine _engine;
+    private bool _active = true;
+    private Invercargill.DataStructures.Vector<PendingOperation> _operations;
+    private Invercargill.DataStructures.Dictionary<string, Invercargill.Element?> _snapshots;
+    
+    public EmbeddedTransaction(EmbeddedEngine engine) throws Core.EngineError {
+        _engine = engine;
+        _operations = new Invercargill.DataStructures.Vector<PendingOperation>();
+        _snapshots = new Invercargill.DataStructures.Dictionary<string, Invercargill.Element?>();
+        
+        // Begin hook batching
+        engine.hook_manager.begin_batch();
+        
+        // Notify engine that transaction started
+        engine.begin_transaction_internal();
+    }
+    
+    public bool active {
+        get { return _active; }
+    }
+    
+    public void commit() throws Core.EngineError {
+        if (!_active) {
+            throw new Core.EngineError.TRANSACTION_ERROR("Transaction is not active");
+        }
+        
+        try {
+            // Apply all pending operations
+            foreach (var op in _operations) {
+                apply_operation(op);
+            }
+            
+            // Commit hook batch - executes all accumulated hooks
+            _engine.hook_manager.commit_batch();
+            
+            // Clear and deactivate
+            _operations.clear();
+            _snapshots.clear();
+            _active = false;
+            
+            // Notify engine that transaction ended
+            _engine.end_transaction_internal();
+        } catch (Core.EngineError e) {
+            rollback();
+            throw e;
+        }
+    }
+    
+    public void rollback() {
+        if (!_active) {
+            return;
+        }
+        
+        // Rollback hook batch - discards all accumulated hooks
+        _engine.hook_manager.rollback_batch();
+        
+        // Restore snapshots
+        foreach (var key in _snapshots.keys) {
+            var value = _snapshots.get(key);
+            if (value != null && !((!) value).is_null()) {
+                // Would restore the snapshot
+            }
+        }
+        
+        // Clear and deactivate
+        _operations.clear();
+        _snapshots.clear();
+        _active = false;
+        
+        // Notify engine that transaction ended
+        _engine.end_transaction_internal();
+    }
+    
+    // ... rest of the class remains the same ...
+}
+
+} // namespace Implexus.Engine
+```
+
+### Updated Indexed Entity Implementations
+
+Indexed entities should implement BatchedHookHandler for optimal performance:
+
+```vala
+namespace Implexus.Entities {
+
+public class Category : AbstractEntity, Engine.BatchedHookHandler {
+    
+    // ... existing code ...
+    
+    /**
+     * Indicates this handler supports batch processing.
+     */
+    public bool supports_batch {
+        get { return true; }
+    }
+    
+    /**
+     * Handles a batch of entity changes efficiently.
+     *
+     * This method processes all changes in one pass, then performs
+     * a single batch update to the category index.
+     */
+    public void on_batch_change(Invercargill.DataStructures.Vector<Engine.HookEvent> events) {
+        ensure_config_loaded();
+        
+        var to_add = new Invercargill.DataStructures.Vector<string>();
+        var to_remove = new Invercargill.DataStructures.Vector<string>();
+        
+        foreach (var evt in events) {
+            // Skip non-documents and wrong type
+            if (evt.entity_type != Core.EntityType.DOCUMENT) {
+                continue;
+            }
+            if (evt.type_label != _type_label) {
+                continue;
+            }
+            
+            var doc_path = evt.entity_path.to_string();
+            
+            switch (evt.change_type) {
+                case Engine.EntityChangeType.CREATED:
+                    // Evaluate predicate and add if matches
+                    var entity = _engine.get_entity_or_null(evt.entity_path);
+                    if (entity != null && evaluate_predicate((!) entity)) {
+                        to_add.add(doc_path);
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.MODIFIED:
+                    // Re-evaluate and update membership
+                    var entity = _engine.get_entity_or_null(evt.entity_path);
+                    if (entity != null) {
+                        bool should_include = evaluate_predicate((!) entity);
+                        bool is_included = contains_document(doc_path);
+                        
+                        if (should_include && !is_included) {
+                            to_add.add(doc_path);
+                        } else if (!should_include && is_included) {
+                            to_remove.add(doc_path);
+                        }
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.DELETED:
+                    to_remove.add(doc_path);
+                    break;
+            }
+        }
+        
+        // Batch update the index
+        try {
+            batch_update_members(to_add, to_remove);
+        } catch (Storage.StorageError e) {
+            warning("Failed to batch update category: %s", e.message);
+        }
+    }
+    
+    /**
+     * Batch updates the category membership.
+     */
+    private void batch_update_members(
+        Invercargill.DataStructures.Vector<string> to_add,
+        Invercargill.DataStructures.Vector<string> to_remove
+    ) throws Storage.StorageError {
+        var index_manager = get_index_manager();
+        if (index_manager == null) {
+            return;
+        }
+        
+        // Add all new members
+        foreach (var path in to_add) {
+            ((!) index_manager).add_to_category(_path.to_string(), path);
+        }
+        
+        // Remove all deleted members
+        foreach (var path in to_remove) {
+            ((!) index_manager).remove_from_category(_path.to_string(), path);
+        }
+    }
+    
+    /**
+     * Handles batched property changes.
+     */
+    public void on_batch_property_change(
+        Core.Entity document,
+        Invercargill.DataStructures.Dictionary<string, Engine.PropertyChange> changes
+    ) {
+        // For category, we just need to re-evaluate the predicate
+        // This is handled by on_batch_change
+    }
+    
+    // ... rest of the class ...
+}
+
+} // namespace Implexus.Entities
+```
+
+## API Changes
+
+### New Interfaces
+
+| Interface | Purpose |
+|-----------|---------|
+| `BatchedHookHandler` | Hooks that support batch processing |
+| `HookEvent` | Represents a single entity change event |
+| `HookBatch` | Accumulates events during transaction |
+
+### Modified Classes
+
+| Class | Changes |
+|-------|---------|
+| `HookManager` | Added batch mode support |
+| `EmbeddedTransaction` | Integrates with batch system |
+| `Category` | Implements `BatchedHookHandler` |
+| `Catalogue` | Implements `BatchedHookHandler` |
+| `Index` | Implements `BatchedHookHandler` |
+
+### No Public API Changes
+
+The batching is entirely internal - applications continue to use:
+
+```vala
+var tx = engine.begin_transaction();
+try {
+    // ... operations ...
+    tx.commit();
+} catch (Error e) {
+    tx.rollback();
+}
+```
+
+## Thread-Safety Considerations
+
+### Single-Threaded Design
+
+The current design assumes single-threaded access:
+
+1. **Transaction Isolation**: Only one transaction per engine at a time
+2. **Batch Ownership**: Batch belongs to the current transaction
+3. **Handler Registration**: Handlers should be registered before transactions
+
+### Thread-Safety Recommendations
+
+If multi-threading is needed in the future:
+
+```vala
+public class HookManager : Object {
+    private GLib.Mutex _batch_mutex;
+    
+    public void begin_batch() {
+        _batch_mutex.lock();
+        try {
+            // ... existing code ...
+        } finally {
+            _batch_mutex.unlock();
+        }
+    }
+    
+    // ... other methods with mutex protection ...
+}
+```
+
+### Event Accumulation Safety
+
+The HookBatch uses Invercargill data structures which are not thread-safe:
+- Events should only be added from the transaction thread
+- Batch execution happens on the commit thread
+- No concurrent access to batches
+
+## Performance Implications
+
+### Expected Improvements
+
+| Scenario | Current | With Batching | Improvement |
+|----------|---------|---------------|-------------|
+| 1000 doc inserts | 1000N hook calls | 1 batch call | ~1000× |
+| 1000 property sets | 1000M hook calls | 1 batch call | ~1000× |
+| Mixed operations | O(n) hook calls | O(1) batch | ~n× |
+
+### Memory Overhead
+
+During a transaction:
+- Events stored in memory until commit
+- Entity states tracked for consolidation
+- Property changes accumulated
+
+For 10,000 operations:
+- ~10,000 HookEvent objects (~160 bytes each) = ~1.6MB
+- Entity state dictionary ~500KB
+- Total overhead: ~2MB (acceptable)
+
+### Trade-offs
+
+| Aspect | Benefit | Cost |
+|--------|---------|------|
+| Latency | Lower per-operation | Higher at commit |
+| Throughput | Much higher overall | Slightly higher memory |
+| Complexity | Transparent to users | Internal complexity |
+| Error Handling | Atomic batch | All-or-nothing |
+
+## Migration Path
+
+### Phase 1: Add Batch Infrastructure
+
+1. Add `HookEvent`, `HookBatch` classes
+2. Add `BatchedHookHandler` interface
+3. Update `HookManager` with batch mode
+4. No behavior changes yet
+
+### Phase 2: Integrate with Transactions
+
+1. Update `EmbeddedTransaction` to use batch mode
+2. Enable batch mode in `begin_transaction()`
+3. Execute batch in `commit()`
+4. Discard batch in `rollback()`
+
+### Phase 3: Update Indexed Entities
+
+1. Update `Category` to implement `BatchedHookHandler`
+2. Update `Catalogue` to implement `BatchedHookHandler`
+3. Update `Index` to implement `BatchedHookHandler`
+4. Optimize batch update methods in `IndexManager`
+
+### Phase 4: Testing and Optimization
+
+1. Add unit tests for batch processing
+2. Add performance benchmarks
+3. Optimize hot paths
+4. Profile and tune memory usage
+
+## Summary
+
+This design introduces batched hook execution for transactions:
+
+1. **Accumulate Events**: Hook events are queued during transactions
+2. **Consolidate Events**: Multiple events for same entity are merged
+3. **Batch Execute**: All events processed at commit time
+4. **Optimized Updates**: Indexed entities can batch their storage writes
+
+The result is significantly improved performance for bulk operations while maintaining the same public API and transaction semantics.

+ 1434 - 0
Architecture/14-Migration-System.md

@@ -0,0 +1,1434 @@
+# Migration System Architecture
+
+## Document Status
+- **Created**: 2026-03-13
+- **Status**: Draft
+- **Author**: Architecture design session
+
+## Overview
+
+This document describes the architecture for a comprehensive migration system that supports application-defined migrations for the Implexus database. The system provides ordered execution, transaction safety, and migration history tracking.
+
+## Requirements
+
+The migration system must:
+1. **Handle initial setup routine** - Support an initial setup/bootstrap migration
+2. **Single transaction execution** - Run migration logic within a single transaction
+3. **Track migration history** - Keep track of which migrations have been run
+4. **Migration interface** - Provide `up()` and `down()` functions for each migration
+5. **Ordered execution** - Run pending migrations in the correct order
+
+## Architecture Overview
+
+```mermaid
+graph TB
+    subgraph Application Layer
+        AM[Application Migrations<br/>User-defined classes]
+    end
+    
+    subgraph Migration System
+        MI[Migration Interface<br/>up/down methods]
+        MR[MigrationRunner<br/>Discovery and execution]
+        MS[MigrationStorage<br/>History tracking]
+        ME[MigrationError<br/>Error domain]
+    end
+    
+    subgraph Engine Layer
+        EE[EmbeddedEngine]
+        TX[Transaction Support]
+    end
+    
+    subgraph Storage Layer
+        DBM[Dbm Interface]
+    end
+    
+    AM --> MI
+    MI --> MR
+    MR --> MS
+    MR --> TX
+    MS --> DBM
+    TX --> DBM
+    EE --> MR
+    EE --> MS
+```
+
+## Components
+
+### 1. Migration Interface
+
+The `Migration` interface defines the contract that all migrations must implement.
+
+**File:** `src/Migrations/Migration.vala`
+
+```vala
+namespace Implexus.Migrations {
+
+/**
+ * Interface for database migrations.
+ *
+ * Migrations are application-defined classes that modify the database
+ * schema or data in a controlled, versioned manner.
+ *
+ * Each migration must implement:
+ * - A unique identifier (version)
+ * - An up() method to apply the migration
+ * - A down() method to reverse the migration (optional for irreversible migrations)
+ *
+ * Example:
+ * {{{
+ * public class CreateUsersTable : Object, Migration {
+ *     public string version { get { return "2026031301"; } }
+ *     public string description { get { return "Create users container"; } }
+ *     
+ *     public void up(Core.Engine engine) throws MigrationError {
+ *         var root = engine.get_root();
+ *         try {
+ *             root.create_container("users");
+ *         } catch (Core.EngineError e) {
+ *             throw new MigrationError.EXECUTION_FAILED(
+ *                 "Failed to create users container: %s".printf(e.message)
+ *             );
+ *         }
+ *     }
+ *     
+ *     public void down(Core.Engine engine) throws MigrationError {
+ *         var root = engine.get_root();
+ *         try {
+ *             var users = root.get_child("users");
+ *             if (users != null) {
+ *                 users.delete();
+ *             }
+ *         } catch (Core.EngineError e) {
+ *             throw new MigrationError.EXECUTION_FAILED(
+ *                 "Failed to delete users container: %s".printf(e.message)
+ *             );
+ *         }
+ *     }
+ * }
+ * }}}
+ */
+public interface Migration : Object {
+    
+    /**
+     * Unique identifier for this migration.
+     *
+     * Best practice: Use a timestamp format like YYYYMMDDNN where NN is a
+     * sequence number within the day. This ensures proper ordering.
+     *
+     * Examples: "2026031301", "2026031302", "2026031501"
+     */
+    public abstract string version { owned get; }
+    
+    /**
+     * Human-readable description of what this migration does.
+     */
+    public abstract string description { owned get; }
+    
+    /**
+     * Applies the migration.
+     *
+     * This method is called when migrating forward. All operations
+     * are executed within a single transaction.
+     *
+     * @param engine The database engine to operate on
+     * @throws MigrationError if the migration fails
+     */
+    public abstract void up(Core.Engine engine) throws MigrationError;
+    
+    /**
+     * Reverses the migration.
+     *
+     * This method is called when rolling back. All operations
+     * are executed within a single transaction.
+     *
+     * Implementations may throw MigrationError.IRREVERSIBLE if the
+     * migration cannot be safely reversed.
+     *
+     * @param engine The database engine to operate on
+     * @throws MigrationError if the rollback fails
+     */
+    public abstract void down(Core.Engine engine) throws MigrationError;
+}
+
+} // namespace Implexus.Migrations
+```
+
+### 2. MigrationRunner
+
+The `MigrationRunner` discovers and executes migrations in the correct order.
+
+**File:** `src/Migrations/MigrationRunner.vala`
+
+```vala
+namespace Implexus.Migrations {
+
+/**
+ * Delegate for migration progress notifications.
+ */
+public delegate void MigrationProgressDelegate(Migration migration, bool is_up);
+
+/**
+ * Discovers and executes migrations in version order.
+ *
+ * The MigrationRunner is responsible for:
+ * - Registering available migrations
+ * - Determining which migrations need to run
+ * - Executing migrations in the correct order within transactions
+ * - Recording migration history
+ *
+ * Example usage:
+ * {{{
+ * var runner = new MigrationRunner(engine);
+ * 
+ * // Register migrations
+ * runner.register_migration(new CreateUsersTable());
+ * runner.register_migration(new AddEmailIndex());
+ * runner.register_migration(new SeedInitialData());
+ * 
+ * // Run all pending migrations
+ * try {
+ *     int count = runner.run_pending();
+ *     print("Ran %d migrations\n", count);
+ * } catch (MigrationError e) {
+ *     stderr.printf("Migration failed: %s\n", e.message);
+ * }
+ * }}}
+ */
+public class MigrationRunner : Object {
+    
+    // === Private Fields ===
+    
+    private weak Core.Engine _engine;
+    private MigrationStorage _storage;
+    private Invercargill.DataStructures.Dictionary<string, Migration> _migrations;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new MigrationRunner for the given engine.
+     *
+     * @param engine The database engine to migrate
+     */
+    public MigrationRunner(Core.Engine engine) {
+        _engine = engine;
+        _storage = new MigrationStorage(engine);
+        _migrations = new Invercargill.DataStructures.Dictionary<string, Migration>();
+    }
+    
+    // === Migration Registration ===
+    
+    /**
+     * Registers a migration to be available for execution.
+     *
+     * Migrations must be registered before calling run_pending() or rollback_to().
+     *
+     * @param migration The migration to register
+     * @throws MigrationError.VERSION_CONFLICT if a migration with the same version exists
+     */
+    public void register_migration(Migration migration) throws MigrationError {
+        var version = migration.version;
+        if (_migrations.has(version)) {
+            throw new MigrationError.VERSION_CONFLICT(
+                "Migration version %s is already registered".printf(version)
+            );
+        }
+        _migrations.set(version, migration);
+    }
+    
+    /**
+     * Registers multiple migrations at once.
+     *
+     * @param migrations Array of migrations to register
+     * @throws MigrationError if any registration fails
+     */
+    public void register_migrations(Migration[] migrations) throws MigrationError {
+        foreach (var migration in migrations) {
+            register_migration(migration);
+        }
+    }
+    
+    // === Status Queries ===
+    
+    /**
+     * Gets all migration versions that have been applied.
+     *
+     * @return Sorted list of applied migration versions
+     */
+    public Invercargill.Enumerable<string> get_applied_versions() {
+        return _storage.get_applied_versions();
+    }
+    
+    /**
+     * Gets all migration versions that are registered but not yet applied.
+     *
+     * @return Sorted list of pending migration versions
+     */
+    public Invercargill.Enumerable<string> get_pending_versions() {
+        var pending = new Invercargill.DataStructures.Vector<string>();
+        var applied = _storage.get_applied_set();
+        
+        // Get all registered versions and sort them
+        var all_versions = new Invercargill.DataStructures.Vector<string>();
+        foreach (var version in _migrations.keys) {
+            all_versions.add(version);
+        }
+        all_versions.sort((a, b) => a.compare(b));
+        
+        // Filter to only pending
+        foreach (var version in all_versions) {
+            if (!applied.contains(version)) {
+                pending.add(version);
+            }
+        }
+        
+        return pending.as_enumerable();
+    }
+    
+    /**
+     * Checks if a specific migration version has been applied.
+     *
+     * @param version The migration version to check
+     * @return true if the migration has been applied
+     */
+    public bool is_applied(string version) {
+        return _storage.is_applied(version);
+    }
+    
+    /**
+     * Gets the count of pending migrations.
+     *
+     * @return Number of migrations waiting to be applied
+     */
+    public int get_pending_count() {
+        int count = 0;
+        var applied = _storage.get_applied_set();
+        foreach (var version in _migrations.keys) {
+            if (!applied.contains(version)) {
+                count++;
+            }
+        }
+        return count;
+    }
+    
+    // === Execution ===
+    
+    /**
+     * Runs all pending migrations in version order.
+     *
+     * Each migration runs in its own transaction. If a migration fails,
+     * the process stops and the failing migration is rolled back.
+     *
+     * @param progress Optional callback for progress notifications
+     * @return Number of migrations that were run
+     * @throws MigrationError if any migration fails
+     */
+    public int run_pending(MigrationProgressDelegate? progress = null) throws MigrationError {
+        var pending = get_pending_versions();
+        int count = 0;
+        
+        foreach (var version in pending) {
+            var migration = _migrations.get(version);
+            if (migration == null) {
+                throw new MigrationError.NOT_FOUND(
+                    "Migration %s not found".printf(version)
+                );
+            }
+            
+            run_single((!) migration, true, progress);
+            count++;
+        }
+        
+        return count;
+    }
+    
+    /**
+     * Runs a specific migration.
+     *
+     * @param version The version of the migration to run
+     * @param progress Optional callback for progress notification
+     * @throws MigrationError if the migration fails or is already applied
+     */
+    public void run_one(string version, MigrationProgressDelegate? progress = null) throws MigrationError {
+        if (_storage.is_applied(version)) {
+            throw new MigrationError.ALREADY_APPLIED(
+                "Migration %s is already applied".printf(version)
+            );
+        }
+        
+        var migration = _migrations.get(version);
+        if (migration == null) {
+            throw new MigrationError.NOT_FOUND(
+                "Migration %s not found".printf(version)
+            );
+        }
+        
+        run_single((!) migration, true, progress);
+    }
+    
+    /**
+     * Rolls back to a specific version.
+     *
+     * Runs the down() method of all migrations after the target version,
+     * in reverse order.
+     *
+     * @param target_version The version to roll back to (exclusive - this version remains applied)
+     * @param progress Optional callback for progress notifications
+     * @return Number of migrations that were rolled back
+     * @throws MigrationError if any rollback fails
+     */
+    public int rollback_to(string target_version, MigrationProgressDelegate? progress = null) throws MigrationError {
+        var applied = _storage.get_applied_versions();
+        int count = 0;
+        
+        // Roll back in reverse order
+        var to_rollback = new Invercargill.DataStructures.Vector<string>();
+        foreach (var version in applied) {
+            if (version.compare(target_version) > 0) {
+                to_rollback.add(version);
+            }
+        }
+        
+        // Sort in reverse order
+        to_rollback.sort((a, b) => b.compare(a));
+        
+        foreach (var version in to_rollback) {
+            var migration = _migrations.get(version);
+            if (migration == null) {
+                throw new MigrationError.NOT_FOUND(
+                    "Migration %s not found for rollback".printf(version)
+                );
+            }
+            
+            run_single((!) migration, false, progress);
+            count++;
+        }
+        
+        return count;
+    }
+    
+    /**
+     * Rolls back the most recently applied migration.
+     *
+     * @param progress Optional callback for progress notification
+     * @throws MigrationError if no migrations are applied or rollback fails
+     */
+    public void rollback_last(MigrationProgressDelegate? progress = null) throws MigrationError {
+        var applied = _storage.get_applied_versions();
+        
+        string? last_version = null;
+        foreach (var version in applied) {
+            last_version = version; // Last one due to sorted order
+        }
+        
+        if (last_version == null) {
+            throw new MigrationError.NO_MIGRATIONS(
+                "No migrations have been applied"
+            );
+        }
+        
+        var migration = _migrations.get((!) last_version);
+        if (migration == null) {
+            throw new MigrationError.NOT_FOUND(
+                "Migration %s not found for rollback".printf((!) last_version)
+            );
+        }
+        
+        run_single((!) migration, false, progress);
+    }
+    
+    // === Internal Methods ===
+    
+    /**
+     * Runs a single migration within a transaction.
+     */
+    private void run_single(Migration migration, bool is_up, MigrationProgressDelegate? progress) 
+            throws MigrationError {
+        
+        var engine = _engine;
+        if (engine == null) {
+            throw new MigrationError.ENGINE_ERROR("Engine reference is invalid");
+        }
+        
+        // Execute within transaction
+        try {
+            var tx = ((!) engine).begin_transaction();
+            try {
+                if (is_up) {
+                    migration.up((!) engine);
+                    _storage.record_migration(migration.version, migration.description);
+                } else {
+                    migration.down((!) engine);
+                    _storage.remove_migration(migration.version);
+                }
+                
+                tx.commit();
+                
+                if (progress != null) {
+                    progress(migration, is_up);
+                }
+            } catch (MigrationError e) {
+                tx.rollback();
+                throw e;
+            } catch (Core.EngineError e) {
+                tx.rollback();
+                throw new MigrationError.EXECUTION_FAILED(
+                    "Migration %s failed: %s".printf(migration.version, e.message)
+                );
+            }
+        } catch (Core.EngineError e) {
+            throw new MigrationError.TRANSACTION_ERROR(
+                "Failed to begin transaction: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+} // namespace Implexus.Migrations
+```
+
+### 3. MigrationStorage
+
+The `MigrationStorage` class handles persistence of migration history using the existing Dbm infrastructure.
+
+**File:** `src/Migrations/MigrationStorage.vala`
+
+```vala
+namespace Implexus.Migrations {
+
+/**
+ * Stores and retrieves migration history.
+ *
+ * Migration history is persisted in the database using a dedicated
+ * key prefix. This ensures migration state survives application restarts.
+ *
+ * Key format: migration:<version>
+ * Value: Serialized (timestamp, description)
+ */
+public class MigrationStorage : Object {
+    
+    // === Constants ===
+    
+    private const string PREFIX = "migration:";
+    private const string INDEX_KEY = "migration:index";
+    
+    // === Private Fields ===
+    
+    private weak Core.Engine _engine;
+    private Storage.Dbm? _dbm;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new MigrationStorage for the given engine.
+     *
+     * @param engine The database engine
+     */
+    public MigrationStorage(Core.Engine engine) {
+        _engine = engine;
+        
+        // Get Dbm from engine configuration if available
+        var config = engine.configuration;
+        var storage = config.storage;
+        
+        // Try to get Dbm from BasicStorage
+        var basic_storage = (storage as Storage.BasicStorage);
+        if (basic_storage != null) {
+            _dbm = ((!) basic_storage).dbm;
+        }
+    }
+    
+    // === Recording Migrations ===
+    
+    /**
+     * Records that a migration has been applied.
+     *
+     * @param version The migration version
+     * @param description The migration description
+     * @throws MigrationError if the record cannot be saved
+     */
+    public void record_migration(string version, string description) throws MigrationError {
+        if (_dbm == null) {
+            throw new MigrationError.STORAGE_ERROR("Database storage not available");
+        }
+        
+        var dbm = (!) _dbm;
+        string key = PREFIX + version;
+        
+        // Store migration record
+        var writer = new Storage.ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<int64?>(new DateTime.now_utc().to_unix()));
+        writer.write_element(new Invercargill.NativeElement<string>(description));
+        
+        try {
+            dbm.set(key, writer.to_binary_data());
+        } catch (Storage.StorageError e) {
+            throw new MigrationError.STORAGE_ERROR(
+                "Failed to record migration: %s".printf(e.message)
+            );
+        }
+        
+        // Update index
+        update_index(dbm, version, true);
+    }
+    
+    /**
+     * Removes a migration record (for rollbacks).
+     *
+     * @param version The migration version to remove
+     * @throws MigrationError if the record cannot be removed
+     */
+    public void remove_migration(string version) throws MigrationError {
+        if (_dbm == null) {
+            throw new MigrationError.STORAGE_ERROR("Database storage not available");
+        }
+        
+        var dbm = (!) _dbm;
+        string key = PREFIX + version;
+        
+        try {
+            dbm.delete(key);
+        } catch (Storage.StorageError e) {
+            // Key may not exist, that's fine
+        }
+        
+        // Update index
+        update_index(dbm, version, false);
+    }
+    
+    // === Querying Migrations ===
+    
+    /**
+     * Gets all applied migration versions in sorted order.
+     *
+     * @return Sorted enumerable of version strings
+     */
+    public Invercargill.Enumerable<string> get_applied_versions() {
+        var versions = new Invercargill.DataStructures.Vector<string>();
+        
+        if (_dbm == null) {
+            return versions.as_enumerable();
+        }
+        
+        // Load from index
+        var index_data = ((!) _dbm).get(INDEX_KEY);
+        if (index_data == null) {
+            // Fall back to scanning keys
+            foreach (var key in ((!) _dbm).keys) {
+                if (key.has_prefix(PREFIX)) {
+                    var version = key.substring(PREFIX.length);
+                    versions.add(version);
+                }
+            }
+            versions.sort((a, b) => a.compare(b));
+            return versions.as_enumerable();
+        }
+        
+        // Parse index
+        var reader = new Storage.ElementReader((!) index_data);
+        try {
+            var element = reader.read_element();
+            if (!element.is_null()) {
+                var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+                foreach (var item in array) {
+                    if (!item.is_null()) {
+                        versions.add(item.as<string>());
+                    }
+                }
+            }
+        } catch (Invercargill.ElementError e) {
+            // Fall back to empty list
+        }
+        
+        return versions.as_enumerable();
+    }
+    
+    /**
+     * Gets a set of applied migration versions for efficient lookup.
+     *
+     * @return Set of applied version strings
+     */
+    public Invercargill.DataStructures.Set<string> get_applied_set() {
+        var set = new Invercargill.DataStructures.HashSet<string>();
+        
+        foreach (var version in get_applied_versions()) {
+            set.add(version);
+        }
+        
+        return set;
+    }
+    
+    /**
+     * Checks if a specific migration has been applied.
+     *
+     * @param version The migration version to check
+     * @return true if the migration has been applied
+     */
+    public bool is_applied(string version) {
+        if (_dbm == null) {
+            return false;
+        }
+        
+        string key = PREFIX + version;
+        return ((!) _dbm).has_key(key);
+    }
+    
+    /**
+     * Gets detailed information about an applied migration.
+     *
+     * @param version The migration version
+     * @return Migration record, or null if not applied
+     */
+    public MigrationRecord? get_migration_record(string version) {
+        if (_dbm == null) {
+            return null;
+        }
+        
+        string key = PREFIX + version;
+        var data = ((!) _dbm).get(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new Storage.ElementReader((!) data);
+        try {
+            var timestamp_element = reader.read_element();
+            var desc_element = reader.read_element();
+            
+            int64? timestamp = timestamp_element.as<int64?>();
+            string description = desc_element.as<string>();
+            
+            return new MigrationRecord(
+                version,
+                description,
+                timestamp != null ? new DateTime.from_unix_utc((!) timestamp) : null
+            );
+        } catch (Invercargill.ElementError e) {
+            return null;
+        }
+    }
+    
+    // === Private Methods ===
+    
+    /**
+     * Updates the migration index.
+     */
+    private void update_index(Storage.Dbm dbm, string version, bool add) {
+        var versions = new Invercargill.DataStructures.Vector<string>();
+        
+        // Load existing index
+        var index_data = dbm.get(INDEX_KEY);
+        if (index_data != null) {
+            var reader = new Storage.ElementReader((!) index_data);
+            try {
+                var element = reader.read_element();
+                if (!element.is_null()) {
+                    var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+                    foreach (var item in array) {
+                        if (!item.is_null()) {
+                            string existing = item.as<string>();
+                            if (!add || existing != version) {
+                                versions.add(existing);
+                            }
+                        }
+                    }
+                }
+            } catch (Invercargill.ElementError e) {
+                // Start fresh
+            }
+        }
+        
+        if (add) {
+            versions.add(version);
+        }
+        
+        // Sort
+        versions.sort((a, b) => a.compare(b));
+        
+        // Save index
+        if (versions.length == 0) {
+            try {
+                dbm.delete(INDEX_KEY);
+            } catch (Storage.StorageError e) {
+                // Ignore
+            }
+            return;
+        }
+        
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        foreach (var v in versions) {
+            array.add(new Invercargill.NativeElement<string>(v));
+        }
+        
+        var writer = new Storage.ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        
+        try {
+            dbm.set(INDEX_KEY, writer.to_binary_data());
+        } catch (Storage.StorageError e) {
+            // Ignore - not critical
+        }
+    }
+}
+
+/**
+ * Represents a record of an applied migration.
+ */
+public class MigrationRecord : Object {
+    /**
+     * The migration version.
+     */
+    public string version { get; construct set; }
+    
+    /**
+     * The migration description.
+     */
+    public string description { get; construct set; }
+    
+    /**
+     * When the migration was applied, or null if unknown.
+     */
+    public DateTime? applied_at { get; construct set; }
+    
+    /**
+     * Creates a new MigrationRecord.
+     */
+    public MigrationRecord(string version, string description, DateTime? applied_at) {
+        Object(version: version, description: description, applied_at: applied_at);
+    }
+}
+
+} // namespace Implexus.Migrations
+```
+
+### 4. MigrationError
+
+Error domain for migration-related errors.
+
+**File:** `src/Migrations/MigrationError.vala`
+
+```vala
+namespace Implexus.Migrations {
+
+/**
+ * Error domain for migration operations.
+ */
+public errordomain MigrationError {
+    /**
+     * The migration was not found.
+     */
+    NOT_FOUND,
+    
+    /**
+     * A migration with this version is already registered or applied.
+     */
+    VERSION_CONFLICT,
+    
+    /**
+     * The migration is already applied.
+     */
+    ALREADY_APPLIED,
+    
+    /**
+     * The migration cannot be reversed.
+     */
+    IRREVERSIBLE,
+    
+    /**
+     * The migration execution failed.
+     */
+    EXECUTION_FAILED,
+    
+    /**
+     * Transaction error during migration.
+     */
+    TRANSACTION_ERROR,
+    
+    /**
+     * Storage error during migration.
+     */
+    STORAGE_ERROR,
+    
+    /**
+     * Engine reference error.
+     */
+    ENGINE_ERROR,
+    
+    /**
+     * No migrations are available or applied.
+     */
+    NO_MIGRATIONS
+}
+
+} // namespace Implexus.Migrations
+```
+
+### 5. Bootstrap Migration
+
+A special migration for initial database setup.
+
+**File:** `src/Migrations/BootstrapMigration.vala`
+
+```vala
+namespace Implexus.Migrations {
+
+/**
+ * A bootstrap migration that runs before any other migrations.
+ *
+ * This migration is automatically applied when the database is first
+ * created. It can be extended by applications to perform initial setup
+ * such as creating required containers or seeding default data.
+ *
+ * The bootstrap migration always uses version "0000000000" to ensure
+ * it runs first.
+ *
+ * Example:
+ * {{{
+ * public class MyAppBootstrap : BootstrapMigration {
+ *     public override void up(Core.Engine engine) throws MigrationError {
+ *         base.up(engine);
+ *         
+ *         var root = engine.get_root();
+ *         try {
+ *             // Create application-specific containers
+ *             root.create_container("config");
+ *             root.create_container("sessions");
+ *             
+ *             // Seed initial data
+ *             var config = root.get_child("config");
+ *             var settings = config.create_document("settings", "AppConfig");
+ *             settings.set_entity_property("version", new Invercargill.NativeElement<string>("1.0.0"));
+ *         } catch (Core.EngineError e) {
+ *             throw new MigrationError.EXECUTION_FAILED(
+ *                 "Bootstrap failed: %s".printf(e.message)
+ *             );
+ *         }
+ *     }
+ * }
+ * }}}
+ */
+public class BootstrapMigration : Object, Migration {
+    
+    /**
+     * Bootstrap version - always runs first.
+     */
+    public string version { owned get { return "0000000000"; } }
+    
+    /**
+     * Description of the bootstrap migration.
+     */
+    public virtual string description { owned get { return "Initial database setup"; } }
+    
+    /**
+     * Performs initial database setup.
+     *
+     * The base implementation ensures the root container exists.
+     * Override to add application-specific setup.
+     */
+    public virtual void up(Core.Engine engine) throws MigrationError {
+        // Ensure root exists
+        try {
+            engine.get_root();
+        } catch (Core.EngineError e) {
+            throw new MigrationError.EXECUTION_FAILED(
+                "Failed to initialize root: %s".printf(e.message)
+            );
+        }
+    }
+    
+    /**
+     * Bootstrap migration cannot be reversed.
+     */
+    public void down(Core.Engine engine) throws MigrationError {
+        throw new MigrationError.IRREVERSIBLE(
+            "Bootstrap migration cannot be reversed"
+        );
+    }
+}
+
+} // namespace Implexus.Migrations
+```
+
+## File Organization
+
+New files should be placed in the following structure:
+
+```
+src/
+├── Migration/
+│   ├── meson.build
+│   ├── Migration.vala              # Migration interface
+│   ├── MigrationRunner.vala        # Migration execution engine
+│   ├── MigrationStorage.vala       # History persistence
+│   ├── MigrationError.vala         # Error domain
+│   └── BootstrapMigration.vala     # Initial setup migration
+```
+
+### meson.build
+
+```meson
+migration_sources = files(
+    'Migration.vala',
+    'MigrationRunner.vala',
+    'MigrationStorage.vala',
+    'MigrationError.vala',
+    'BootstrapMigration.vala'
+)
+```
+
+The migration sources should be added to the main library sources in `src/meson.build`.
+
+## Integration with Engine
+
+### Engine Interface Extension
+
+Add migration support to the Engine interface:
+
+```vala
+// In src/Core/Engine.vala
+
+public interface Engine : Object {
+    // ... existing methods ...
+    
+    /**
+     * Creates a new MigrationRunner for this engine.
+     *
+     * @return A new MigrationRunner instance
+     */
+    public abstract Migration.MigrationRunner create_migration_runner();
+}
+```
+
+### EmbeddedEngine Implementation
+
+```vala
+// In src/Engine/EmbeddedEngine.vala
+
+public class EmbeddedEngine : Object, Core.Engine {
+    // ... existing code ...
+    
+    /**
+     * {@inheritDoc}
+     */
+    public Migration.MigrationRunner create_migration_runner() {
+        return new Migration.MigrationRunner(this);
+    }
+}
+```
+
+## API Examples
+
+### Defining Migrations
+
+```vala
+// Migration 1: Create users container
+public class CreateUsersContainer : Object, Implexus.Migrations.Migration {
+    public string version { owned get { return "2026031301"; } }
+    public string description { owned get { return "Create users container"; } }
+    
+    public void up(Implexus.Core.Engine engine) throws Implexus.Migrations.MigrationError {
+        try {
+            var root = engine.get_root();
+            root.create_container("users");
+        } catch (Implexus.Core.EngineError e) {
+            throw new Implexus.Migrations.MigrationError.EXECUTION_FAILED(
+                "Failed to create users: %s".printf(e.message)
+            );
+        }
+    }
+    
+    public void down(Implexus.Core.Engine engine) throws Implexus.Migrations.MigrationError {
+        try {
+            var root = engine.get_root();
+            var users = root.get_child("users");
+            if (users != null) {
+                users.delete();
+            }
+        } catch (Implexus.Core.EngineError e) {
+            throw new Implexus.Migrations.MigrationError.EXECUTION_FAILED(
+                "Failed to remove users: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+// Migration 2: Add email index
+public class AddEmailIndex : Object, Implexus.Migrations.Migration {
+    public string version { owned get { return "2026031302"; } }
+    public string description { owned get { return "Add email index for users"; } }
+    
+    public void up(Implexus.Core.Engine engine) throws Implexus.Migrations.MigrationError {
+        try {
+            var root = engine.get_root();
+            var users = (Implexus.Entities.Container?) root.get_child("users");
+            if (users != null) {
+                ((!) users).create_index("by_email", "User", "email");
+            }
+        } catch (Implexus.Core.EngineError e) {
+            throw new Implexus.Migrations.MigrationError.EXECUTION_FAILED(
+                "Failed to create email index: %s".printf(e.message)
+            );
+        }
+    }
+    
+    public void down(Implexus.Core.Engine engine) throws Implexus.Migrations.MigrationError {
+        try {
+            var root = engine.get_root();
+            var users = root.get_child("users");
+            if (users != null) {
+                var index = ((!) users).get_child("by_email");
+                if (index != null) {
+                    index.delete();
+                }
+            }
+        } catch (Implexus.Core.EngineError e) {
+            throw new Implexus.Migrations.MigrationError.EXECUTION_FAILED(
+                "Failed to remove email index: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+// Migration 3: Seed initial data (irreversible)
+public class SeedInitialData : Object, Implexus.Migrations.Migration {
+    public string version { owned get { return "2026031303"; } }
+    public string description { owned get { return "Seed initial configuration data"; } }
+    
+    public void up(Implexus.Core.Engine engine) throws Implexus.Migrations.MigrationError {
+        try {
+            var root = engine.get_root();
+            var config = root.create_container("config");
+            var settings = config.create_document("settings", "AppSettings");
+            settings.set_entity_property("theme", new Invercargill.NativeElement<string>("dark"));
+            settings.set_entity_property("max_users", new Invercargill.NativeElement<int>(100));
+        } catch (Implexus.Core.EngineError e) {
+            throw new Implexus.Migrations.MigrationError.EXECUTION_FAILED(
+                "Failed to seed data: %s".printf(e.message)
+            );
+        }
+    }
+    
+    public void down(Implexus.Core.Engine engine) throws Implexus.Migrations.MigrationError {
+        // This migration is irreversible - data loss is not acceptable
+        throw new Implexus.Migrations.MigrationError.IRREVERSIBLE(
+            "Seed data migration cannot be reversed"
+        );
+    }
+}
+```
+
+### Running Migrations
+
+```vala
+// Basic migration run
+public void run_migrations(Implexus.Core.Engine engine) {
+    var runner = engine.create_migration_runner();
+    
+    try {
+        // Register all migrations
+        runner.register_migration(new CreateUsersContainer());
+        runner.register_migration(new AddEmailIndex());
+        runner.register_migration(new SeedInitialData());
+        
+        // Run pending migrations
+        int count = runner.run_pending();
+        print("Successfully ran %d migrations\n", count);
+        
+    } catch (Implexus.Migrations.MigrationError e) {
+        stderr.printf("Migration failed: %s\n", e.message);
+    }
+}
+
+// With progress reporting
+public void run_migrations_with_progress(Implexus.Core.Engine engine) {
+    var runner = engine.create_migration_runner();
+    
+    try {
+        runner.register_migrations(new Implexus.Migrations.Migration[] {
+            new CreateUsersContainer(),
+            new AddEmailIndex(),
+            new SeedInitialData()
+        });
+        
+        // Run with progress callback
+        int count = runner.run_pending((migration, is_up) => {
+            string direction = is_up ? "Up" : "Down";
+            print("[%s] %s: %s\n", direction, migration.version, migration.description);
+        });
+        
+        print("Completed %d migrations\n", count);
+        
+    } catch (Implexus.Migrations.MigrationError e) {
+        stderr.printf("Migration failed: %s\n", e.message);
+    }
+}
+
+// Check status before running
+public void check_and_run_migrations(Implexus.Core.Engine engine) {
+    var runner = engine.create_migration_runner();
+    
+    try {
+        runner.register_migrations(new Implexus.Migrations.Migration[] {
+            new CreateUsersContainer(),
+            new AddEmailIndex(),
+            new SeedInitialData()
+        });
+        
+        // Check status
+        int pending = runner.get_pending_count();
+        if (pending == 0) {
+            print("Database is up to date\n");
+            return;
+        }
+        
+        print("There are %d pending migrations:\n", pending);
+        foreach (var version in runner.get_pending_versions()) {
+            print("  - %s\n", version);
+        }
+        
+        // Run them
+        runner.run_pending();
+        
+    } catch (Implexus.Migrations.MigrationError e) {
+        stderr.printf("Migration failed: %s\n", e.message);
+    }
+}
+```
+
+### Rolling Back Migrations
+
+```vala
+// Roll back last migration
+public void rollback_last_migration(Implexus.Core.Engine engine) {
+    var runner = engine.create_migration_runner();
+    
+    try {
+        runner.register_migrations(new Implexus.Migrations.Migration[] {
+            new CreateUsersContainer(),
+            new AddEmailIndex(),
+            new SeedInitialData()
+        });
+        
+        runner.rollback_last((migration, is_up) => {
+            print("Rolled back: %s\n", migration.version);
+        });
+        
+    } catch (Implexus.Migrations.MigrationError e) {
+        if (e is Implexus.Migrations.MigrationError.IRREVERSIBLE) {
+            stderr.printf("Cannot roll back irreversible migration: %s\n", e.message);
+        } else {
+            stderr.printf("Rollback failed: %s\n", e.message);
+        }
+    }
+}
+
+// Roll back to specific version
+public void rollback_to_version(Implexus.Core.Engine engine, string target_version) {
+    var runner = engine.create_migration_runner();
+    
+    try {
+        runner.register_migrations(new Implexus.Migrations.Migration[] {
+            new CreateUsersContainer(),
+            new AddEmailIndex(),
+            new SeedInitialData()
+        });
+        
+        int count = runner.rollback_to(target_version);
+        print("Rolled back %d migrations\n", count);
+        
+    } catch (Implexus.Migrations.MigrationError e) {
+        stderr.printf("Rollback failed: %s\n", e.message);
+    }
+}
+```
+
+### Using Bootstrap Migration
+
+```vala
+// Custom bootstrap for application
+public class MyAppBootstrap : Implexus.Migrations.BootstrapMigration {
+    public override string description { owned get { return "MyApp initial setup"; } }
+    
+    public override void up(Implexus.Core.Engine engine) throws Implexus.Migrations.MigrationError {
+        // Call base to ensure root exists
+        base.up(engine);
+        
+        try {
+            var root = engine.get_root();
+            
+            // Create application structure
+            root.create_container("users");
+            root.create_container("projects");
+            root.create_container("settings");
+            
+            // Create default admin user
+            var users = (Implexus.Entities.Container) root.get_child("users");
+            var admin = users.create_document("admin", "User");
+            admin.set_entity_property("role", new Invercargill.NativeElement<string>("admin"));
+            admin.set_entity_property("email", new Invercargill.NativeElement<string>("admin@example.com"));
+            
+        } catch (Implexus.Core.EngineError e) {
+            throw new Implexus.Migrations.MigrationError.EXECUTION_FAILED(
+                "Bootstrap failed: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+// Using bootstrap with regular migrations
+public void initialize_database(Implexus.Core.Engine engine) {
+    var runner = engine.create_migration_runner();
+    
+    try {
+        // Register bootstrap first
+        runner.register_migration(new MyAppBootstrap());
+        
+        // Then register regular migrations
+        runner.register_migration(new CreateUsersContainer());
+        runner.register_migration(new AddEmailIndex());
+        
+        // Run all pending (bootstrap will run first due to version ordering)
+        runner.run_pending();
+        
+    } catch (Implexus.Migrations.MigrationError e) {
+        stderr.printf("Database initialization failed: %s\n", e.message);
+    }
+}
+```
+
+## Error Handling
+
+### Error Handling Strategy
+
+1. **Transaction Safety**: Each migration runs in its own transaction. If a migration fails, only that migration is rolled back.
+
+2. **Atomic Operations**: Within a migration, all operations are atomic. Either all changes apply, or none do.
+
+3. **Irreversible Migrations**: Migrations that cannot be safely reversed should throw `MigrationError.IRREVERSIBLE` in their `down()` method.
+
+4. **Progress Preservation**: Successfully applied migrations are recorded even if later migrations fail. The system can be resumed after fixing the failing migration.
+
+### Error Handling Flow
+
+```mermaid
+sequenceDiagram
+    participant App as Application
+    participant Runner as MigrationRunner
+    participant TX as Transaction
+    participant Storage as MigrationStorage
+    participant DB as Database
+    
+    App->>Runner: run_pending()
+    loop For each pending migration
+        Runner->>TX: begin_transaction()
+        Runner->>App: migration.up()
+        alt Success
+            App->>DB: Apply changes
+            Runner->>Storage: record_migration()
+            Storage->>DB: Save record
+            Runner->>TX: commit()
+            Runner->>App: progress callback
+        else Failure
+            App-->>Runner: throws EngineError
+            Runner->>TX: rollback()
+            Runner-->>App: throws MigrationError
+        end
+    end
+```
+
+### Error Recovery
+
+```vala
+public void run_with_recovery(Implexus.Core.Engine engine) {
+    var runner = engine.create_migration_runner();
+    
+    try {
+        runner.register_migrations(get_all_migrations());
+        runner.run_pending();
+        
+    } catch (Implexus.Migrations.MigrationError e) {
+        // Check what was applied before failure
+        var applied = runner.get_applied_versions();
+        var pending = runner.get_pending_versions();
+        
+        stderr.printf("Migration failed: %s\n", e.message);
+        stderr.printf("Applied: %d, Pending: %d\n", 
+            applied.length, pending.length);
+        
+        // Log for manual recovery
+        log_migration_failure(e, applied, pending);
+        
+        // Depending on error type, may be able to retry
+        if (e is Implexus.Migrations.MigrationError.TRANSACTION_ERROR) {
+            stderr.printf("Transaction error - may be retried\n");
+        } else if (e is Implexus.Migrations.MigrationError.EXECUTION_FAILED) {
+            stderr.printf("Execution failed - check migration code\n");
+        }
+    }
+}
+```
+
+## Storage Schema
+
+### Key Schema
+
+| Key Pattern | Description |
+|-------------|-------------|
+| `migration:<version>` | Record of an applied migration |
+| `migration:index` | Sorted list of all applied migration versions |
+
+### Data Format
+
+**migration:<version>**
+```
+Element[] {
+    int64 timestamp,      // Unix timestamp when applied
+    string description    // Migration description
+}
+```
+
+**migration:index**
+```
+Element[] {
+    string[] versions    // Sorted array of applied version strings
+}
+```
+
+## Design Decisions
+
+1. **Version Format**: Using string-based versions (e.g., "2026031301") instead of integers allows for:
+   - Natural ordering by timestamp
+   - Human-readable identification
+   - No collision issues in distributed development
+
+2. **One Transaction Per Migration**: Each migration runs in its own transaction rather than batching all migrations in one transaction. This:
+   - Allows partial progress recovery
+   - Prevents long-running transactions
+   - Matches common migration tool patterns
+
+3. **Separate MigrationStorage**: Storage logic is separated from the runner to:
+   - Allow alternative storage implementations
+   - Enable testing with mock storage
+   - Follow single responsibility principle
+
+4. **Bootstrap as Regular Migration**: The bootstrap migration is implemented as a regular migration with a special version number rather than a separate mechanism. This:
+   - Ensures it's tracked in history
+   - Allows rollback (if reversible)
+   - Uses the same infrastructure
+
+5. **No Auto-Discovery**: Migrations must be explicitly registered rather than auto-discovered. This:
+   - Gives applications full control
+   - Avoids reflection/metadata complexity
+   - Works well with Vala's compilation model
+
+## Future Considerations
+
+1. **Migration Validation**: Could add a `validate()` method to migrations for pre-flight checks.
+
+2. **Dry Run Mode**: Could add a `--dry-run` option that shows what would happen without executing.
+
+3. **Migration Dependencies**: Could add support for migrations that depend on other migrations.
+
+4. **Conditional Migrations**: Could add support for migrations that only run under certain conditions.
+
+5. **Migration Groups**: Could add support for grouping migrations into named sets that can be applied together.

+ 66 - 0
Architecture/README.md

@@ -0,0 +1,66 @@
+# Implexus Architecture Documentation
+
+A path-based document database library and engine for Vala.
+
+## Document Index
+
+| Document | Description |
+|----------|-------------|
+| [01-Overview](01-Overview.md) | High-level description, goals, and design principles |
+| [02-Namespaces](02-Namespaces.md) | Namespace organization and module structure |
+| [03-Core-Interfaces](03-Core-Interfaces.md) | Entity, EntityType, Storage, and Engine interfaces |
+| [04-Class-Hierarchy](04-Class-Hierarchy.md) | Class diagrams and inheritance relationships |
+| [05-Path-System](05-Path-System.md) | Path parsing, resolution, and management |
+| [06-Entity-Types](06-Entity-Types.md) | Container, Document, Category, and Index implementations |
+| [07-Storage-Layer](07-Storage-Layer.md) | DBM abstraction and binary serialization format |
+| [08-Set-Operations](08-Set-Operations.md) | Set operations API for entity children |
+| [09-Client-Server-Protocol](09-Client-Server-Protocol.md) | TCP protocol design for remote mode |
+| [10-File-Organization](10-File-Organization.md) | Source file map and project structure |
+
+## Quick Start
+
+Implexus is a document database that organizes data using path-based identification, similar to a filesystem. It supports:
+
+- **Embedded mode**: Library runs in-process
+- **Client/Server mode**: Daemon over TCP with identical API
+
+## Dependencies
+
+- GLib 2.0
+- GObject 2.0
+- Invercargill-1 (collections, expressions, Element types)
+
+## Architecture Overview
+
+```
+┌─────────────────────────────────────────────────────────────┐
+│                      Application                             │
+└─────────────────────────────────────────────────────────────┘
+                              │
+                              ▼
+┌─────────────────────────────────────────────────────────────┐
+│                     Engine Interface                         │
+│  ┌─────────────────────┐  ┌─────────────────────────────┐   │
+│  │   EmbeddedEngine    │  │      RemoteEngine           │   │
+│  └─────────────────────┘  └─────────────────────────────┘   │
+└─────────────────────────────────────────────────────────────┘
+                              │
+                              ▼
+┌─────────────────────────────────────────────────────────────┐
+│                    Entity System                             │
+│  ┌──────────┐ ┌──────────┐ ┌───────────┐ ┌──────────────┐  │
+│  │ Container │ │ Document │ │ Category │ │    Index     │  │
+│  └──────────┘ └──────────┘ └───────────┘ └──────────────┘  │
+└─────────────────────────────────────────────────────────────┘
+                              │
+                              ▼
+┌─────────────────────────────────────────────────────────────┐
+│                    Storage Layer                             │
+│  ┌─────────────────────┐  ┌─────────────────────────────┐   │
+│  │   Storage Interface │  │    Binary Serialization     │   │
+│  └─────────────────────┘  └─────────────────────────────┘   │
+│  ┌─────────────────────┐                                    │
+│  │   DBM Interface     │                                    │
+│  └─────────────────────┘                                    │
+└─────────────────────────────────────────────────────────────┘
+```

+ 397 - 0
KEY-SCHEMA.md

@@ -0,0 +1,397 @@
+U# Key Schema
+
+This document describes the key schema used by Implexus for storing data in the underlying key-value store.
+
+## Overview
+
+Implexus uses a prefix-based key schema to organize different types of data within the key-value store. All keys are strings, and values are serialized using the [`ElementWriter`](src/Storage/ElementSerializer.vala:53) / [`ElementReader`](src/Storage/ElementSerializer.vala:271) serialization system.
+
+The storage layer consists of two main components:
+- [`BasicStorage`](src/Storage/Storage.vala:186) - Entity metadata and properties
+- [`IndexManager`](src/Storage/IndexManager.vala:49) - Index data for fast lookups
+
+## Key Prefixes
+
+| Prefix | Description | Value Type | Managed By |
+|--------|-------------|------------|------------|
+| `entity:` | Entity metadata | Serialized type and label | [`BasicStorage`](src/Storage/Storage.vala:186) |
+| `props:` | Document properties | Serialized Properties | [`BasicStorage`](src/Storage/Storage.vala:186) |
+| `children:` | Container children | Serialized string array | [`BasicStorage`](src/Storage/Storage.vala:186) |
+| `config:` | Category configuration | Serialized type_label + expression | [`BasicStorage`](src/Storage/Storage.vala:186) |
+| `catcfg:` | Catalogue configuration | Serialized type_label + expression | [`BasicStorage`](src/Storage/Storage.vala:186) |
+| `typeidx:` | Type index | Serialized string array | [`IndexManager`](src/Storage/IndexManager.vala:49) |
+| `cat:` | Category members | Serialized string array | [`IndexManager`](src/Storage/IndexManager.vala:49) |
+| `catl:` | Catalogue groups/keys | Serialized string array | [`IndexManager`](src/Storage/IndexManager.vala:49) |
+| `idx:` | Text search indices | Serialized string array | [`IndexManager`](src/Storage/IndexManager.vala:49) |
+
+## Key Patterns
+
+### Entity Storage
+
+Entity metadata is stored with the `entity:` prefix:
+
+```
+entity:<entity_path> → [type_code: int64, type_label: string]
+```
+
+**Examples:**
+```
+entity:/users/john         → [1, "User"]
+entity:/products/widget    → [2, "Product"]
+entity:/categories/active  → [4, null]
+```
+
+The type code corresponds to [`EntityType`](src/Core/EntityType.vala) enum values:
+- 0 = UNKNOWN
+- 1 = DOCUMENT
+- 2 = CATEGORY
+- 3 = CATALOGUE
+- 4 = CONTAINER
+- 5 = INDEX
+
+### Document Properties
+
+Document properties are stored with the `props:` prefix:
+
+```
+props:<document_path> → Serialized Properties
+```
+
+**Example:**
+```
+props:/users/john → {"name": "John Doe", "email": "john@example.com", "age": 30}
+```
+
+Properties are serialized as a dictionary using [`ElementWriter.write_dictionary()`](src/Storage/ElementSerializer.vala:237).
+
+### Container Children
+
+Container child names are stored with the `children:` prefix:
+
+```
+children:<container_path> → [child_name1, child_name2, ...]
+```
+
+**Example:**
+```
+children:/users → ["john", "jane", "admin"]
+children:/products → ["widget", "gadget", "tool"]
+```
+
+### Category Configuration
+
+Category configuration is stored with the `config:` prefix:
+
+```
+config:<category_path> → [type_label: string, expression: string]
+```
+
+**Example:**
+```
+config:/categories/active-users → ["User", "status == 'active'"]
+config:/categories/expensive-products → ["Product", "price > 100"]
+```
+
+### Catalogue Configuration
+
+Catalogue configuration is stored with the `catcfg:` prefix:
+
+```
+catcfg:<catalogue_path> → [type_label: string, expression: string]
+```
+
+**Example:**
+```
+catcfg:/catalogues/users-by-role → ["User", "role"]
+catcfg:/catalogues/products-by-category → ["Product", "category"]
+```
+
+### Type Index
+
+The global type index maps type labels to document paths:
+
+```
+typeidx:<type_label> → [doc_path1, doc_path2, ...]
+```
+
+**Examples:**
+```
+typeidx:User → ["/users/john", "/users/jane", "/users/admin"]
+typeidx:Product → ["/products/widget", "/products/gadget"]
+typeidx:Order → ["/orders/001", "/orders/002"]
+```
+
+This enables fast lookup of all documents of a specific type.
+
+### Category Members
+
+Category member sets are stored with the `cat:` prefix and `:members` suffix:
+
+```
+cat:<category_path>:members → [doc_path1, doc_path2, ...]
+```
+
+**Examples:**
+```
+cat:/categories/active-users:members → ["/users/john", "/users/jane"]
+cat:/categories/expensive-products:members → ["/products/widget", "/products/gadget"]
+```
+
+### Catalogue Groups
+
+Catalogue groups use the `catl:` prefix with two key patterns:
+
+#### Group Members
+
+```
+catl:<catalogue_path>:group:<key_value> → [doc_path1, doc_path2, ...]
+```
+
+**Examples:**
+```
+catl:/catalogues/users-by-role:group:admin → ["/users/admin"]
+catl:/catalogues/users-by-role:group:user → ["/users/john", "/users/jane"]
+catl:/catalogues/products-by-category:group:electronics → ["/products/widget", "/products/gadget"]
+```
+
+#### Group Keys List
+
+```
+catl:<catalogue_path>:keys → [key1, key2, ...]
+```
+
+**Examples:**
+```
+catl:/catalogues/users-by-role:keys → ["admin", "user", "guest"]
+catl:/catalogues/products-by-category:keys → ["electronics", "clothing", "food"]
+```
+
+### N-gram Index
+
+Text search indices use the `idx:` prefix with n-gram type specifiers:
+
+#### Trigram Index
+
+```
+idx:<index_path>:tri:<trigram> → [doc_path1, doc_path2, ...]
+```
+
+**Examples:**
+```
+idx:/indices/document-search:tri:the → ["/docs/doc1", "/docs/doc2", "/docs/doc3"]
+idx:/indices/document-search:tri:ing → ["/docs/doc1", "/docs/doc4"]
+```
+
+#### Bigram Reverse Index
+
+Used for finding trigrams that contain a specific bigram:
+
+```
+idx:<index_path>:bi:<bigram> → [trigram1, trigram2, ...]
+```
+
+**Example:**
+```
+idx:/indices/document-search:bi:th → ["the", "tha", "thi"]
+```
+
+#### Unigram Reverse Index
+
+Used for finding bigrams that start with a specific character:
+
+```
+idx:<index_path>:uni:<unigram> → [bigram1, bigram2, ...]
+```
+
+**Example:**
+```
+idx:/indices/document-search:uni:t → ["th", "tr", "to"]
+```
+
+#### Document Content Cache
+
+Stores the indexed content for a document (used for reindexing):
+
+```
+idx:<index_path>:doc:<doc_path> → <indexed_content>
+```
+
+**Example:**
+```
+idx:/indices/document-search:doc:/docs/doc1 → "The quick brown fox jumps over the lazy dog"
+```
+
+## Value Serialization
+
+All values are serialized using the [`ElementSerializer`](src/Storage/ElementSerializer.vala) system with type tags for proper deserialization.
+
+### Type Codes
+
+| Code | Type | Description |
+|------|------|-------------|
+| 0x00 | NULL | Null value |
+| 0x01 | BOOL | Boolean (1 byte) |
+| 0x02 | INT64 | 64-bit signed integer (big-endian) |
+| 0x03 | UINT64 | 64-bit unsigned integer (big-endian) |
+| 0x04 | DOUBLE | 64-bit IEEE 754 floating point |
+| 0x05 | STRING | Length-prefixed UTF-8 string |
+| 0x06 | BINARY | Length-prefixed binary data |
+| 0x07 | ARRAY | Count-prefixed array of elements |
+| 0x08 | DICTIONARY | Count-prefixed key-value pairs |
+
+### String Encoding
+
+Strings are serialized as:
+```
+[length: int64][utf-8 bytes]
+```
+
+### Set/Array Encoding
+
+Sets (like member lists) are serialized as arrays:
+```
+[ARRAY_CODE: 0x07][count: int64][element1][element2]...[elementN]
+```
+
+Each element is a string element with its own type code and length prefix.
+
+### Properties Encoding
+
+Properties dictionaries are serialized as:
+```
+[DICTIONARY_CODE: 0x08][count: int64][key1][value1][key2][value2]...
+```
+
+## Index Management
+
+Indices are kept in sync with document changes through the [`HookManager`](src/Engine/HookManager.vala) event system.
+
+### Event Flow
+
+1. Document is created, updated, or deleted via [`EmbeddedEngine`](src/Engine/EmbeddedEngine.vala)
+2. [`HookManager`](src/Engine/HookManager.vala) fires the appropriate event
+3. Indexed entities ([`Category`](src/Entities/Category.vala), [`Catalogue`](src/Entities/Catalogue.vala), [`Index`](src/Entities/Index.vala)) listen for events
+4. [`IndexManager`](src/Storage/IndexManager.vala) updates the relevant index entries
+5. All operations within a transaction are committed atomically (when supported)
+
+### Transaction Safety
+
+When using a backend with native transaction support (like LMDB):
+
+```vala
+dbm.begin_transaction();
+try {
+    // Update document properties
+    storage.store_properties(path, properties);
+    
+    // Update type index
+    index_manager.add_to_type_index(type_label, path);
+    
+    // Update category indices
+    index_manager.add_to_category(category_path, path);
+    
+    // All changes commit atomically
+    dbm.commit_transaction();
+} catch (StorageError e) {
+    dbm.rollback_transaction();
+}
+```
+
+### Index Rebuilding
+
+If indices become corrupted or need rebuilding:
+
+1. Iterate through all keys with the relevant prefix
+2. For each document, re-evaluate index expressions
+3. Update index entries accordingly
+
+```vala
+// Example: Rebuild type index for "User"
+var user_paths = new Vector<string>();
+foreach (var key in dbm.keys) {
+    if (key.has_prefix("props:")) {
+        var path = key.substring(6);
+        var type_label = storage.get_entity_type_label(path);
+        if (type_label == "User") {
+            user_paths.add(path);
+        }
+    }
+}
+
+// Clear and rebuild
+dbm.delete("typeidx:User");
+foreach (var path in user_paths) {
+    index_manager.add_to_type_index("User", path);
+}
+```
+
+## Key Enumeration Patterns
+
+### Finding All Entities
+
+```vala
+foreach (var key in dbm.keys) {
+    if (key.has_prefix("entity:")) {
+        var path = key.substring(7);
+        // Process entity
+    }
+}
+```
+
+### Finding All Documents of a Type
+
+```vala
+var user_paths = index_manager.get_paths_for_type("User");
+foreach (var path in user_paths) {
+    var props = storage.load_properties(new EntityPath(path));
+    // Process document
+}
+```
+
+### Finding Category Members
+
+```vala
+var members = index_manager.get_category_members("/categories/active-users");
+foreach (var member_path in members) {
+    // Process member
+}
+```
+
+### Finding Catalogue Groups
+
+```vala
+var keys = index_manager.get_catalogue_keys("/catalogues/users-by-role");
+foreach (var key in keys) {
+    var members = index_manager.get_catalogue_group_members(
+        "/catalogues/users-by-role", key
+    );
+    // Process group
+}
+```
+
+## Storage Efficiency Considerations
+
+### Key Length
+
+Keys are stored verbatim, so shorter paths reduce storage overhead:
+- Prefer `/users/john` over `/application/data/users/john`
+- Type labels should be concise but descriptive
+
+### Set Storage
+
+Member sets are stored as arrays of strings. For very large sets:
+- Consider splitting into multiple categories with filter expressions
+- Use catalogues for grouping to enable efficient subset queries
+
+### N-gram Index Size
+
+Text indices can grow large for documents with much text:
+- Trigram indices: O(unique_trigrams × matching_documents)
+- Consider indexing only specific fields, not entire documents
+- Use the document content cache sparingly
+
+## See Also
+
+- [STORAGE-BACKENDS.md](STORAGE-BACKENDS.md) - Available storage backends
+- [Architecture/07-Storage-Layer.md](Architecture/07-Storage-Layer.md) - Storage layer architecture
+- [Architecture/11-Indexed-Entities.md](Architecture/11-Indexed-Entities.md) - Indexed entity documentation

+ 21 - 0
LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2026 Implexus Contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 929 - 0
PERF.md

@@ -0,0 +1,929 @@
+# Filesystem
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        10      17.0944           58.50
+get_members                           100       1.2982          770.32
+create_category_complex                10      17.5155           57.09
+get_members_complex                   100       0.0732        13661.20
+create_category_bool                   10      19.4654           51.37
+contains_document                     100       0.3536         2828.05
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 100       1.5955          626.75
+create_document_large                  20      26.9829           37.06
+get_document                          100       0.0679        14729.71
+update_document                       100       0.1615         6191.95
+get_property                          100       0.0793        12603.98
+delete_document                       100       2.1201          471.67
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      100       1.6324          612.58
+get_children                          100       5.7901          172.71
+get_child                             100       0.0334        29922.20
+delete_container                      100       1.5152          659.98
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       10      58.5151           17.09
+get_keys                              100       0.0391        25542.78
+get_group                             100       0.0447        22381.38
+get_group_documents                   100       1.4262          701.16
+create_catalogue_numeric               10      15.7000           63.69
+get_all_groups                         10       0.0080       125000.00
+create_catalogue_multifield            10      66.8770           14.95
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           10    4031.1778            0.25
+search_contains                       100       0.0172        58139.53
+search_prefix                         100       0.0172        58241.12
+search_suffix                         100       0.0185        54171.18
+search_exact                          100       0.0241        41528.24
+search_rare_term                      100       0.0686        14577.26
+create_index_title                     10    4225.6821            0.24
+search_title                          100       8.9613          111.59
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    2210
+Total time:          87593.75 ms
+Overall throughput:  25.23 ops/sec
+================================================================================
+```
+
+
+# GDBM
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        10      14.8401           67.38
+get_members                           100       0.9584         1043.46
+create_category_complex                10      14.2312           70.27
+get_members_complex                   100       0.0455        21982.85
+create_category_bool                   10      15.6614           63.85
+contains_document                     100       0.3253         3074.09
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 100       1.5477          646.12
+create_document_large                  20      28.4049           35.21
+get_document                          100       0.0564        17730.50
+update_document                       100       0.1320         7576.91
+get_property                          100       0.0718        13929.52
+delete_document                       100       2.0077          498.08
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      100       1.5780          633.70
+get_children                          100       4.5167          221.40
+get_child                             100       0.0224        44682.75
+delete_container                      100       1.5605          640.82
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       10      49.6577           20.14
+get_keys                              100       0.0312        32030.75
+get_group                             100       0.0331        30175.02
+get_group_documents                   100       1.2734          785.27
+create_catalogue_numeric               10      15.6837           63.76
+get_all_groups                         10       0.0055       181818.18
+create_catalogue_multifield            10      60.1359           16.63
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           10    3825.0580            0.26
+search_contains                       100       0.0131        76569.68
+search_prefix                         100       0.0123        81366.97
+search_suffix                         100       0.0121        82576.38
+search_exact                          100       0.0131        76335.88
+search_rare_term                      100       0.0298        33590.86
+create_index_title                     10    3944.4954            0.25
+search_title                          100       9.3440          107.02
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    2210
+Total time:          82324.23 ms
+Overall throughput:  26.85 ops/sec
+================================================================================
+```
+
+
+# LMDB 
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        10      17.0414           58.68
+get_members                           100       1.1586          863.12
+create_category_complex                10      17.5007           57.14
+get_members_complex                   100       0.0592        16906.17
+create_category_bool                   10      18.2847           54.69
+contains_document                     100       0.3355         2980.89
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 100       1.5506          644.90
+create_document_large                  20      26.8442           37.25
+get_document                          100       0.0512        19531.25
+update_document                       100       0.1279         7819.83
+get_property                          100       0.0626        15976.99
+delete_document                       100       2.0853          479.56
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      100       1.7087          585.23
+get_children                          100       4.8080          207.98
+get_child                             100       0.0236        42390.84
+delete_container                      100       1.5315          652.94
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       10      58.4415           17.11
+get_keys                              100       0.0390        25641.03
+get_group                             100       0.0402        24900.40
+get_group_documents                   100       1.3889          719.99
+create_catalogue_numeric               10      17.0699           58.58
+get_all_groups                         10       0.0066       151515.15
+create_catalogue_multifield            10      74.2756           13.46
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           10    3766.0441            0.27
+search_contains                       100       0.0141        70972.32
+search_prefix                         100       0.0125        80128.21
+search_suffix                         100       0.0125        80256.82
+search_exact                          100       0.0133        75414.78
+search_rare_term                      100       0.0306        32701.11
+create_index_title                     10    3885.2337            0.26
+search_title                          100       8.6523          115.58
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    2210
+Total time:          81446.46 ms
+Overall throughput:  27.13 ops/sec
+================================================================================
+```
+
+
+
+
+# LATEST
+
+## Filesystem
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                         Iters      Batch    Total(ms)      Avg(ms)    Items/sec
+--------------------------------------------------------------------------------------------------------
+create_document_small_indexed       200          -      1031.47       5.1574       193.90
+create_document_large_indexed        40          -      1958.17      48.9544        20.43
+create_documents_batch_small_indexed       20         10     51461.80     257.3090         3.89
+create_documents_batch_large_indexed        4         10     19140.62     478.5155         2.09
+get_document_indexed                200          -        15.79       0.0790     12663.84
+update_document_indexed             200          -        64.80       0.3240      3086.56
+get_property_indexed                200          -        17.12       0.0856     11684.29
+delete_document_indexed             200          -      8862.59      44.3129        22.57
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        20      44.5243           22.46
+get_members                           200       2.7975          357.46
+create_category_complex                20      44.5416           22.45
+get_members_complex                   200       0.0759        13177.83
+create_category_bool                   20      46.3131           21.59
+contains_document                     200       0.9147         1093.28
+
+=== Document ===
+Operation                         Iters      Batch    Total(ms)      Avg(ms)    Items/sec
+--------------------------------------------------------------------------------------------------------
+create_document_small               200          -       861.54       4.3077       232.14
+create_document_large                40          -      1751.04      43.7761        22.84
+create_documents_batch_small         20         10     47831.97     239.1599         4.18
+create_documents_batch_large          4         10     17552.37     438.8092         2.28
+get_document                        200          -        14.57       0.0729     13725.89
+update_document                     200          -        61.26       0.3063      3264.83
+get_property                        200          -        16.07       0.0803     12447.10
+delete_document                     200          -      7042.97      35.2148        28.40
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      200       4.4083          226.84
+get_children                          200      16.7590           59.67
+get_child                             200       0.0400        25018.76
+delete_container                      200       4.6284          216.06
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       20     241.2537            4.15
+get_keys                              200       0.0466        21459.23
+get_group                             200       0.0509        19627.09
+get_group_documents                   200       3.6443          274.40
+create_catalogue_numeric               20      87.7897           11.39
+get_all_groups                         20       0.0109        91743.12
+create_catalogue_multifield            20     317.9346            3.15
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           20     200.6091            4.98
+search_contains                       200       0.0181        55309.73
+search_prefix                         200       0.0149        67249.50
+search_suffix                         200       0.0174        57553.96
+search_exact                          200       0.0187        53333.33
+search_rare_term                      200       0.0762        13130.25
+create_index_title                     20     179.8288            5.56
+search_title                          200      28.7437           34.79
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    5940
+Total time:          193391.19 ms
+Overall throughput:  30.71 ops/sec
+================================================================================
+
+Performing final cleanup...
+  Cleaning up documents...
+  Cleaning up indexes...
+  Cleaning up catalogues...
+  Cleaning up categories...
+  Cleaning up containers...
+Final cleanup complete: 1171 entities deleted, 0 errors
+```
+
+## GDBM
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                         Iters      Batch    Total(ms)      Avg(ms)    Items/sec
+--------------------------------------------------------------------------------------------------------
+create_document_small_indexed       200          -      1021.43       5.1071       195.80
+create_document_large_indexed        40          -      1973.27      49.3317        20.27
+create_documents_batch_small_indexed       20         10     50834.74     254.1737         3.93
+create_documents_batch_large_indexed        4         10     18158.56     453.9641         2.20
+get_document_indexed                200          -        11.93       0.0596     16770.08
+update_document_indexed             200          -        53.69       0.2684      3725.23
+get_property_indexed                200          -        12.95       0.0648     15444.02
+delete_document_indexed             200          -      9359.95      46.7997        21.37
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        20      40.9132           24.44
+get_members                           200       2.3258          429.96
+create_category_complex                20      40.9984           24.39
+get_members_complex                   200       0.0581        17217.63
+create_category_bool                   20      43.0252           23.24
+contains_document                     200       0.9127         1095.70
+
+=== Document ===
+Operation                         Iters      Batch    Total(ms)      Avg(ms)    Items/sec
+--------------------------------------------------------------------------------------------------------
+create_document_small               200          -       838.47       4.1924       238.53
+create_document_large                40          -      1723.12      43.0779        23.21
+create_documents_batch_small         20         10     47065.95     235.3297         4.25
+create_documents_batch_large          4         10     17945.16     448.6289         2.23
+get_document                        200          -        12.33       0.0616     16221.92
+update_document                     200          -        49.28       0.2464      4058.11
+get_property                        200          -        13.55       0.0678     14756.88
+delete_document                     200          -      7097.25      35.4863        28.18
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      200       4.3146          231.77
+get_children                          200      14.5554           68.70
+get_child                             200       0.0268        37369.21
+delete_container                      200       4.4876          222.84
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       20     230.2144            4.34
+get_keys                              200       0.0413        24233.61
+get_group                             200       0.0438        22828.44
+get_group_documents                   200       3.1391          318.57
+create_catalogue_numeric               20      57.5476           17.38
+get_all_groups                         20       0.0063       160000.00
+create_catalogue_multifield            20     331.6019            3.02
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           20     183.9103            5.44
+search_contains                       200       0.0149        67024.13
+search_prefix                         200       0.0116        86169.75
+search_suffix                         200       0.0124        80742.83
+search_exact                          200       0.0139        72072.07
+search_rare_term                      200       0.0574        17427.68
+create_index_title                     20     160.6221            6.23
+search_title                          200      29.0355           34.44
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    5940
+Total time:          189758.53 ms
+Overall throughput:  31.30 ops/sec
+================================================================================
+```
+
+# LMDB
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                         Iters      Batch    Total(ms)      Avg(ms)    Items/sec
+--------------------------------------------------------------------------------------------------------
+create_document_small_indexed       200          -      1028.79       5.1439       194.40
+create_document_large_indexed        40          -      2008.79      50.2197        19.91
+create_documents_batch_small_indexed       20         10     50342.93     251.7146         3.97
+create_documents_batch_large_indexed        4         10     18697.03     467.4257         2.14
+get_document_indexed                200          -        12.27       0.0613     16302.58
+update_document_indexed             200          -        59.71       0.2985      3349.75
+get_property_indexed                200          -        13.99       0.0699     14296.95
+delete_document_indexed             200          -      9352.99      46.7649        21.38
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        20      41.1274           24.31
+get_members                           200       2.3431          426.79
+create_category_complex                20      41.4718           24.11
+get_members_complex                   200       0.0565        17692.85
+create_category_bool                   20      43.1442           23.18
+contains_document                     200       0.9112         1097.47
+
+=== Document ===
+Operation                         Iters      Batch    Total(ms)      Avg(ms)    Items/sec
+--------------------------------------------------------------------------------------------------------
+create_document_small               200          -       838.38       4.1919       238.55
+create_document_large                40          -      1716.94      42.9235        23.30
+create_documents_batch_small         20         10     47701.81     238.5091         4.19
+create_documents_batch_large          4         10     18109.93     452.7483         2.21
+get_document                        200          -        11.77       0.0589     16989.47
+update_document                     200          -        49.87       0.2494      4010.27
+get_property                        200          -        13.43       0.0671     14893.14
+delete_document                     200          -      7031.72      35.1586        28.44
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      200       4.4202          226.24
+get_children                          200      14.7210           67.93
+get_child                             200       0.0274        36556.39
+delete_container                      200       4.5213          221.18
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       20     229.6752            4.35
+get_keys                              200       0.0400        24971.91
+get_group                             200       0.0433        23110.70
+get_group_documents                   200       3.1608          316.37
+create_catalogue_numeric               20      56.8109           17.60
+get_all_groups                         20       0.0065       153846.15
+create_catalogue_multifield            20     330.8580            3.02
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           20     156.4918            6.39
+search_contains                       200       0.0150        66622.25
+search_prefix                         200       0.0114        88028.17
+search_suffix                         200       0.0122        82034.45
+search_exact                          200       0.0138        72727.27
+search_rare_term                      200       0.0614        16294.61
+create_index_title                     20     185.2466            5.40
+search_title                          200      29.0382           34.44
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    5940
+Total time:          190566.32 ms
+Overall throughput:  31.17 ops/sec
+================================================================================
+```
+
+
+# Final PERF for the night
+
+## Filesystem (-i 250)
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small_indexed         250       7.0092          142.67
+create_document_large_indexed          50      56.9881           17.55
+get_document_indexed                  250       0.0753        13275.98
+update_document_indexed               250       0.3552         2815.03
+get_property_indexed                  250       0.0816        12251.90
+delete_document_indexed               250      10.8612           92.07
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        25      60.4734           16.54
+get_members                           250       3.5705          280.08
+create_category_complex                25      60.6420           16.49
+get_members_complex                   250       0.0777        12872.00
+create_category_bool                   25      62.9581           15.88
+contains_document                     250       1.3227          756.01
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 250       5.8522          170.88
+create_document_large                  50      50.1501           19.94
+get_document                          250       0.1317         7592.09
+update_document                       250       0.5829         1715.57
+get_property                          250       0.1541         6489.97
+delete_document                       250      10.4649           95.56
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      250       6.1724          162.01
+get_children                          250      23.4848           42.58
+get_child                             250       0.0377        26522.38
+delete_container                      250       6.4657          154.66
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       25     390.1453            2.56
+get_keys                              250       0.0446        22397.42
+get_group                             250       0.0480        20812.52
+get_group_documents                   250       4.7866          208.92
+create_catalogue_numeric               25      85.6909           11.67
+get_all_groups                         25       0.0105        95057.03
+create_catalogue_multifield            25     522.5041            1.91
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           25     192.2876            5.20
+search_contains                       250       0.0187        53544.66
+search_prefix                         250       0.0153        65427.90
+search_suffix                         250       0.0180        55654.50
+search_exact                          250       0.0195        51229.51
+search_rare_term                      250       0.0745        13424.98
+create_index_title                     25     192.3760            5.20
+search_title                          250      45.4447           22.00
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    6825
+Total time:          76326.50 ms
+Overall throughput:  89.42 ops/sec
+================================================================================
+```
+
+## GDBM (-i 250)
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small_indexed         250       6.9293          144.32
+create_document_large_indexed          50      56.0397           17.84
+get_document_indexed                  250       0.0586        17062.52
+update_document_indexed               250       0.3162         3162.16
+get_property_indexed                  250       0.0637        15693.66
+delete_document_indexed               250      10.7466           93.05
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        25      58.3247           17.15
+get_members                           250       3.2833          304.57
+create_category_complex                25      61.2198           16.33
+get_members_complex                   250       0.0615        16260.16
+create_category_bool                   25      63.4099           15.77
+contains_document                     250       1.3453          743.34
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 250       6.0066          166.48
+create_document_large                  50      49.3057           20.28
+get_document                          250       0.0589        16983.70
+update_document                       250       0.2646         3778.78
+get_property                          250       0.0636        15734.16
+delete_document                       250       8.9377          111.89
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      250       6.3005          158.72
+get_children                          250      21.0522           47.50
+get_child                             250       0.0292        34274.75
+delete_container                      250       6.6352          150.71
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       25     387.5010            2.58
+get_keys                              250       0.0392        25518.02
+get_group                             250       0.0421        23764.26
+get_group_documents                   250       4.1567          240.58
+create_catalogue_numeric               25      78.2080           12.79
+get_all_groups                         25       0.0071       140449.44
+create_catalogue_multifield            25     528.5178            1.89
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           25     174.0355            5.75
+search_contains                       250       0.0144        69599.11
+search_prefix                         250       0.0114        87811.73
+search_suffix                         250       0.0126        79339.89
+search_exact                          250       0.0139        71715.43
+search_rare_term                      250       0.0565        17709.15
+create_index_title                     25     171.2890            5.84
+search_title                          250      45.3229           22.06
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    6825
+Total time:          73785.73 ms
+Overall throughput:  92.50 ops/sec
+================================================================================
+```
+
+
+## LMDB (-i 250)
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small_indexed         250       6.9978          142.90
+create_document_large_indexed          50      56.7201           17.63
+get_document_indexed                  250       0.0587        17042.74
+update_document_indexed               250       0.3144         3180.46
+get_property_indexed                  250       0.0655        15257.86
+delete_document_indexed               250      10.6036           94.31
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        25      56.7375           17.63
+get_members                           250       3.0647          326.29
+create_category_complex                25      56.4852           17.70
+get_members_complex                   250       0.0553        18070.11
+create_category_bool                   25      58.9672           16.96
+contains_document                     250       1.3022          767.95
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 250       7.7645          128.79
+create_document_large                  50      49.8867           20.05
+get_document                          250       0.0567        17637.93
+update_document                       250       0.2668         3748.29
+get_property                          250       0.0626        15974.44
+delete_document                       250       8.8921          112.46
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      250       6.3683          157.03
+get_children                          250      20.8936           47.86
+get_child                             250       0.0263        37953.54
+delete_container                      250       6.4540          154.94
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       25     382.3475            2.62
+get_keys                              250       0.0393        25476.41
+get_group                             250       0.0437        22893.77
+get_group_documents                   250       4.2143          237.29
+create_catalogue_numeric               25      79.5218           12.58
+get_all_groups                         25       0.0061       163398.69
+create_catalogue_multifield            25     500.7043            2.00
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           25     172.4624            5.80
+search_contains                       250       0.0143        70106.56
+search_prefix                         250       0.0111        90122.57
+search_suffix                         250       0.0122        82128.78
+search_exact                          250       0.0134        74783.13
+search_rare_term                      250       0.0572        17476.41
+create_index_title                     25     172.0794            5.81
+search_title                          250      47.2708           21.15
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    6825
+Total time:          73543.91 ms
+Overall throughput:  92.80 ops/sec
+================================================================================
+```
+
+## -i 100
+- Filesystem **Overall throughput:  340.68 ops/sec**
+- GDBM **Overall throughput:  371.98 ops/sec**
+- LMDB **Overall throughput:  372.55 ops/sec**
+
+
+# Async changes
+## Filesystem
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small_indexed         100       1.9896          502.62
+create_document_large_indexed          20      31.2669           31.98
+get_document_indexed                  100       0.2532         3950.07
+update_document_indexed               100       0.3861         2590.34
+get_property_indexed                  100       0.2509         3985.97
+delete_document_indexed               100       2.7629          361.94
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        10       6.6440          150.51
+get_members                           100       0.3023         3307.64
+create_category_complex                10       7.0036          142.78
+get_members_complex                   100       0.3061         3267.01
+create_category_bool                   10       7.0697          141.45
+contains_document                     100       0.0164        61087.35
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 100       1.6986          588.70
+create_document_large                  20      29.7376           33.63
+get_document                          100       0.2591         3859.51
+update_document                       100       0.4225         2366.86
+get_property                          100       0.2404         4159.56
+delete_document                       100       2.5703          389.07
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      100       1.7819          561.20
+get_children                          100      20.7077           48.29
+get_child                             100       0.2006         4984.55
+delete_container                      100       2.2147          451.52
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       10       7.1349          140.16
+get_keys                              100       0.0092       109170.31
+get_group                             100       0.0099       101419.88
+get_group_documents                   100       0.0103        97560.98
+create_catalogue_numeric               10       7.0895          141.05
+get_all_groups                         10       0.0108        92592.59
+create_catalogue_multifield            10       6.9261          144.38
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           10       8.6671          115.38
+search_contains                       100       0.0180        55463.12
+search_prefix                         100       0.0145        68728.52
+search_suffix                         100       0.0147        67980.97
+search_exact                          100       0.0168        59453.03
+search_rare_term                      100       0.0713        14027.21
+create_index_title                     10       9.1340          109.48
+search_title                          100       0.1069         9358.04
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    2730
+Total time:          5480.36 ms
+Overall throughput:  498.14 ops/sec
+================================================================================
+```
+
+## GDBM
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small_indexed         100       1.8638          536.53
+create_document_large_indexed          20      30.3226           32.98
+get_document_indexed                  100       0.1888         5295.21
+update_document_indexed               100       0.3587         2788.23
+get_property_indexed                  100       0.2179         4588.42
+delete_document_indexed               100       3.1966          312.84
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        10       6.5400          152.91
+get_members                           100       0.2725         3669.59
+create_category_complex                10       6.6872          149.54
+get_members_complex                   100       0.2610         3831.71
+create_category_bool                   10       7.2314          138.29
+contains_document                     100       0.0138        72727.27
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 100       1.7888          559.04
+create_document_large                  20      29.5335           33.86
+get_document                          100       0.2650         3773.87
+update_document                       100       0.3472         2880.18
+get_property                          100       0.2322         4306.26
+delete_document                       100       2.4786          403.46
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      100       1.7184          581.94
+get_children                          100      19.0584           52.47
+get_child                             100       0.1640         6097.19
+delete_container                      100       2.0162          495.97
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       10       6.3784          156.78
+get_keys                              100       0.0152        65789.47
+get_group                             100       0.0159        62695.92
+get_group_documents                   100       0.0162        61652.28
+create_catalogue_numeric               10       8.0344          124.46
+get_all_groups                         10       0.0104        96153.85
+create_catalogue_multifield            10       7.2731          137.49
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           10       7.5515          132.42
+search_contains                       100       0.0160        62305.30
+search_prefix                         100       0.0123        81103.00
+search_suffix                         100       0.0124        80385.85
+search_exact                          100       0.0146        68446.27
+search_rare_term                      100       0.0596        16781.34
+create_index_title                     10       7.5834          131.87
+search_title                          100       0.0606        16496.21
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    2730
+Total time:          5236.51 ms
+Overall throughput:  521.34 ops/sec
+================================================================================
+```
+
+## LMDB
+
+```
+================================================================================
+                         IMPLEXUS PERFORMANCE RESULTS                          
+================================================================================
+
+=== PostIndexDocument ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small_indexed         100       1.8420          542.89
+create_document_large_indexed          20      30.2990           33.00
+get_document_indexed                  100       0.2191         4563.50
+update_document_indexed               100       0.3317         3015.14
+get_property_indexed                  100       0.2198         4550.42
+delete_document_indexed               100       2.8281          353.60
+
+=== Category ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_category                        10       6.7399          148.37
+get_members                           100       0.2659         3761.24
+create_category_complex                10       7.2135          138.63
+get_members_complex                   100       0.2580         3875.97
+create_category_bool                   10       7.0437          141.97
+contains_document                     100       0.0130        76863.95
+
+=== Document ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_document_small                 100       1.6204          617.12
+create_document_large                  20      28.6719           34.88
+get_document                          100       0.2361         4235.85
+update_document                       100       0.4898         2041.77
+get_property                          100       0.2116         4725.00
+delete_document                       100       2.3944          417.65
+
+=== Container ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_container                      100       1.6698          598.88
+get_children                          100      17.9587           55.68
+get_child                             100       0.1855         5390.84
+delete_container                      100       2.0171          495.76
+
+=== Catalogue ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_catalogue                       10       6.4090          156.03
+get_keys                              100       0.0066       151057.40
+get_group                             100       0.0068       146412.88
+get_group_documents                   100       0.0070       141843.97
+create_catalogue_numeric               10       6.8064          146.92
+get_all_groups                         10       0.0088       113636.36
+create_catalogue_multifield            10       6.9436          144.02
+
+=== Index ===
+Operation                      Iterations     Avg (ms)         Ops/sec
+--------------------------------------------------------------------------------
+create_index                           10       7.1468          139.92
+search_contains                       100       0.0150        66889.63
+search_prefix                         100       0.0121        82918.74
+search_suffix                         100       0.0118        84530.85
+search_exact                          100       0.0136        73313.78
+search_rare_term                      100       0.0591        16923.34
+create_index_title                     10       7.2481          137.97
+search_title                          100       0.0604        16567.26
+
+================================================================================
+                              SUMMARY                                          
+================================================================================
+Total operations:    2730
+Total time:          5030.35 ms
+Overall throughput:  542.71 ops/sec
+================================================================================
+```

+ 359 - 0
README.md

@@ -0,0 +1,359 @@
+# Implexus
+
+A path-based document database library and engine for Vala.
+
+## Features
+
+- **Path-based identification**: Access entities using intuitive paths like `/users/john`
+- **Four entity types**: Container, Document, Category, and Index
+- **Dual operation modes**: Embedded (in-process) and client/server (remote TCP)
+- **Transaction support**: ACID transactions with commit/rollback
+- **Expression-based queries**: Filter entities using Invercargill.Expressions
+- **Binary serialization**: Efficient storage format
+- **No Libgee dependency**: Uses Invercargill.DataStructures for collections
+
+## Dependencies
+
+- GLib 2.0
+- GObject 2.0
+- GIO 2.0
+- Invercargill-1 (collections, expressions, Element types, JSON)
+
+## Installation
+
+### From Source
+
+```bash
+# Configure build
+meson setup build
+
+# Compile
+meson compile -C build
+
+# Install (requires root privileges)
+meson install -C build
+```
+
+### Dependencies
+
+Ensure `invercargill-1` is installed and available in your system's package configuration.
+
+## Quick Start
+
+### Embedded Mode
+
+Embedded mode runs the database in-process, ideal for single-application use:
+
+```vala
+using Implexus;
+using Implexus.Core;
+using Implexus.Engine;
+
+public static int main(string[] args) {
+    try {
+        // Create embedded engine configuration
+        var config = EngineConfiguration.embedded("./my_database");
+        
+        // Create engine
+        Core.Engine engine = EngineFactory.create(config);
+        
+        // Get root container and create entities
+        var root = engine.get_root();
+        var users = root.create_container("users");
+        
+        // Create a document with type label
+        var john = users.create_document("john", "User");
+        john.set_entity_property("email", new Invercargill.NativeElement<string>("john@example.com"));
+        john.set_entity_property("age", new Invercargill.NativeElement<int>(30));
+        
+        // Query by type
+        foreach (var entity in engine.query_by_type("User")) {
+            stdout.printf("User: %s\n", entity.name);
+        }
+        
+        return 0;
+    } catch (Error e) {
+        stderr.printf("Error: %s\n", e.message);
+        return 1;
+    }
+}
+```
+
+### Client/Server Mode
+
+For multi-client access, run the server daemon:
+
+```bash
+# Start server
+implexusd --port 9876 --storage /path/to/database
+```
+
+Then connect from a client:
+
+```vala
+using Implexus;
+using Implexus.Core;
+using Implexus.Engine;
+
+public static int main(string[] args) {
+    try {
+        // Create remote engine configuration
+        var config = EngineConfiguration.remote("localhost", 9876);
+        
+        // Create engine - same API as embedded mode!
+        Core.Engine engine = EngineFactory.create(config);
+        
+        // Use the identical API
+        var root = engine.get_root();
+        var users = root.create_container("users");
+        
+        // ... rest of code is identical to embedded mode
+        
+        return 0;
+    } catch (Error e) {
+        stderr.printf("Error: %s\n", e.message);
+        return 1;
+    }
+}
+```
+
+## Entity Types
+
+### Container
+
+A container for child entities, similar to a filesystem folder:
+
+```vala
+var root = engine.get_root();
+var users = root.create_container("users");
+var projects = root.create_container("projects");
+```
+
+### Document
+
+An object with properties and a type label for querying:
+
+```vala
+var doc = users.create_document("john", "User");
+doc.set_entity_property("email", new Invercargill.NativeElement<string>("john@example.com"));
+doc.set_entity_property("active", new Invercargill.NativeElement<bool?>(true));
+
+// Retrieve properties
+var email = doc.get_entity_property("email");
+if (email != null) {
+    stdout.printf("Email: %s\n", ((!) email).to_string());
+}
+```
+
+### Category
+
+Auto-generated categories based on expression evaluation:
+
+```vala
+var category = root.create_category("active_users", "User", "active==true");
+// category.children() returns all User documents where active==true
+```
+
+### Index
+
+Text search results organized as a container:
+
+```vala
+var index = root.create_index("search_results", "search terms");
+// index contains documents matching the search terms
+```
+
+## Path-Based Access
+
+Access any entity using its path:
+
+```vala
+// Get entity by path
+var path = new EntityPath("/users/john");
+var entity = engine.get_entity(path);
+
+// Check existence
+bool exists = engine.entity_exists(new EntityPath("/users/john"));
+```
+
+## Transactions
+
+Group multiple operations in a transaction:
+
+```vala
+var tx = engine.begin_transaction();
+
+try {
+    var jane = users.create_document("jane", "User");
+    jane.set_entity_property("email", new Invercargill.NativeElement<string>("jane@example.com"));
+    
+    var bob = users.create_document("bob", "User");
+    bob.set_entity_property("email", new Invercargill.NativeElement<string>("bob@example.com"));
+    
+    tx.commit();
+} catch (Error e) {
+    tx.rollback();
+    stderr.printf("Transaction failed: %s\n", e.message);
+}
+```
+
+## Querying
+
+### By Type
+
+```vala
+foreach (var entity in engine.query_by_type("User")) {
+    stdout.printf("Found user: %s\n", entity.name);
+}
+```
+
+### By Expression
+
+```vala
+// Find active users over 25
+foreach (var entity in engine.query_by_expression("User", "active==true && age>25")) {
+    stdout.printf("Active user over 25: %s\n", entity.name);
+}
+```
+
+## Architecture
+
+Implexus is organized into the following namespaces:
+
+| Namespace | Description |
+|-----------|-------------|
+| `Implexus.Core` | Core interfaces: Engine, Entity, EntityPath, Transaction |
+| `Implexus.Entities` | Entity implementations: Container, Document, Category, Index |
+| `Implexus.Storage` | Storage layer: Storage, DBM interfaces, binary serialization |
+| `Implexus.Engine` | Engine implementations: EmbeddedEngine, RemoteEngine |
+| `Implexus.Protocol` | Client-server protocol: Message, Request, Response |
+| `Implexus.Server` | Server implementation: Server, ClientHandler |
+
+For detailed design documentation, see the [Architecture/](Architecture/) directory:
+
+- [Overview](Architecture/01-Overview.md) - High-level description and design principles
+- [Namespaces](Architecture/02-Namespaces.md) - Namespace organization
+- [Core Interfaces](Architecture/03-Core-Interfaces.md) - Entity, Engine, Storage interfaces
+- [Class Hierarchy](Architecture/04-Class-Hierarchy.md) - Inheritance relationships
+- [Path System](Architecture/05-Path-System.md) - Path parsing and resolution
+- [Entity Types](Architecture/06-Entity-Types.md) - Container, Document, Category, Index
+- [Storage Layer](Architecture/07-Storage-Layer.md) - DBM abstraction and serialization
+- [Set Operations](Architecture/08-Set-Operations.md) - Entity children manipulation
+- [Client-Server Protocol](Architecture/09-Client-Server-Protocol.md) - TCP protocol design
+- [File Organization](Architecture/10-File-Organization.md) - Project structure
+
+## Project Structure
+
+```
+implexus/
+├── src/                    # Source files
+│   ├── Core/               # Implexus.Core namespace
+│   ├── Entities/           # Implexus.Entities namespace
+│   ├── Storage/            # Implexus.Storage namespace
+│   ├── Engine/             # Implexus.Engine namespace
+│   ├── Protocol/           # Implexus.Protocol namespace
+│   └── Server/             # Implexus.Server namespace
+├── examples/               # Example programs
+│   └── BasicUsage.vala     # Comprehensive usage example
+├── tools/                  # CLI tools
+│   └── implexusd/          # Server daemon
+├── tests/                  # Test suite
+├── vapi/                   # Custom VAPI files
+├── Architecture/           # Architecture documentation
+├── meson.build             # Build configuration
+└── README.md               # This file
+```
+
+## Building from Source
+
+### Requirements
+
+- Meson build system
+- Vala compiler
+- C compiler (gcc or clang)
+- Invercargill-1 library
+
+### Build Steps
+
+```bash
+# Setup build directory
+meson setup build
+
+# Compile
+meson compile -C build
+
+# Run tests
+meson test -C build
+
+# Install (optional)
+meson install -C build
+```
+
+## Running Tests
+
+```bash
+# Run all tests
+meson test -C build
+
+# Run with verbose output
+meson test -C build -v
+
+# Run specific test
+meson test -C build implexus_test
+```
+
+## Examples
+
+See [examples/BasicUsage.vala](examples/BasicUsage.vala) for a comprehensive example demonstrating:
+
+- Mode selection (embedded vs remote)
+- Entity creation and deletion
+- Property management
+- Path-based access
+- Transactions
+- Querying by type and expression
+
+Build and run the example:
+
+```bash
+# Build
+meson compile -C build
+
+# Run in embedded mode
+./build/examples/basicusage
+
+# Run in remote mode (requires running server)
+./build/examples/basicusage --remote --host localhost --port 9876
+```
+
+## Server Daemon
+
+The `implexusd` daemon provides TCP access to the database:
+
+```bash
+# Basic usage
+implexusd --port 9876 --storage /path/to/database
+
+# Options
+# --port PORT       TCP port to listen on (default: 9876)
+# --storage PATH    Path to database storage
+# --host HOST       Host to bind to (default: 0.0.0.0)
+```
+
+## License
+
+MIT License - see [LICENSE](LICENSE) for details.
+
+```
+Copyright (c) 2026 Implexus Contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+```

+ 256 - 0
STORAGE-BACKENDS.md

@@ -0,0 +1,256 @@
+# Storage Backends
+
+Implexus provides a flexible storage layer through the [`Dbm`](src/Storage/Dbm.vala) interface, allowing applications to choose the most appropriate backend for their use case.
+
+## Overview
+
+The [`Dbm`](src/Storage/Dbm.vala:12) interface defines a low-level key-value storage abstraction with support for:
+
+- Basic CRUD operations (`get`, `set`, `delete`, `has_key`)
+- Key enumeration via the `keys` property
+- Transaction support (`begin_transaction`, `commit_transaction`, `rollback_transaction`)
+
+The storage layer is used by [`BasicStorage`](src/Storage/Storage.vala:186) to provide high-level entity persistence operations, and by [`IndexManager`](src/Storage/IndexManager.vala:49) for index storage.
+
+## Available Backends
+
+### FilesystemDbm
+
+**Location:** [`src/Storage/FilesystemDbm.vala`](src/Storage/FilesystemDbm.vala:17)
+
+A simple file-based storage implementation where each key-value pair is stored in a separate file within a directory.
+
+#### Characteristics
+
+| Property | Value |
+|----------|-------|
+| **Transaction Support** | Software-based (in-memory buffering) |
+| **Performance** | Good for small datasets, degrades with size |
+| **Dependencies** | None (pure GLib) |
+| **File Format** | One file per key, hex-encoded filenames |
+
+#### Best For
+
+- Development and testing environments
+- Small embedded applications
+- Scenarios where external database dependencies are undesirable
+
+#### Limitations
+
+- Not suitable for high-volume production use
+- Performance degrades significantly with large datasets due to filesystem overhead
+- No compression or optimization for storage space
+
+#### Usage
+
+```vala
+var dbm = new FilesystemDbm("/path/to/data/directory");
+var storage = new BasicStorage(dbm);
+```
+
+---
+
+### GdbmDbm
+
+**Location:** [`src/Storage/Gdbm/GdbmDbm.vala`](src/Storage/Gdbm/GdbmDbm.vala:16)
+
+A production-ready backend using the GNU DBM library for persistent key-value storage.
+
+#### Characteristics
+
+| Property | Value |
+|----------|-------|
+| **Transaction Support** | Software-based (in-memory buffering) |
+| **Performance** | Good for medium datasets |
+| **Dependencies** | libgdbm |
+| **File Format** | Single database file with hash table structure |
+
+#### Best For
+
+- Single-threaded applications
+- Medium workloads with moderate read/write ratios
+- Applications already using GDBM in their stack
+
+#### Limitations
+
+- Limited concurrent access support (single writer recommended)
+- Software-based transactions (not ACID)
+- Database file can become fragmented over time
+
+#### Usage
+
+```vala
+var dbm = new GdbmDbm();
+dbm.open("/path/to/database.gdbm", false); // false = read-write mode
+var storage = new BasicStorage(dbm);
+```
+
+---
+
+### LmdbDbm
+
+**Location:** [`src/Storage/Lmdb/LmdbDbm.vala`](src/Storage/Lmdb/LmdbDbm.vala:16)
+
+A high-performance backend using the Lightning Memory-Mapped Database (LMDB).
+
+#### Characteristics
+
+| Property | Value |
+|----------|-------|
+| **Transaction Support** | Native ACID transactions |
+| **Performance** | Excellent for read-heavy workloads |
+| **Dependencies** | liblmdb |
+| **File Format** | Memory-mapped B+tree |
+| **Map Size** | Default 1GB (configurable) |
+
+#### Features
+
+- **Memory-mapped**: Direct OS-level caching for optimal read performance
+- **B+tree structure**: Efficient range queries and ordered traversal
+- **MVCC**: Multi-version concurrency control for consistent reads
+- **ACID compliant**: Full transaction support with crash recovery
+
+#### Best For
+
+- High-performance applications
+- Read-heavy workloads
+- Multi-threaded read scenarios
+- Applications requiring ACID guarantees
+
+#### Limitations
+
+- Fixed map size (must be configured at environment creation)
+- Single writer at a time (multiple readers allowed)
+- Write performance can be slower than reads due to copy-on-write
+
+#### Usage
+
+```vala
+var dbm = new LmdbDbm();
+dbm.open("/path/to/lmdb/directory", false); // false = read-write mode
+var storage = new BasicStorage(dbm);
+```
+
+## Comparison Table
+
+| Backend | Transaction Type | Read Performance | Write Performance | Concurrent Reads | Concurrent Writes | Dependencies | Recommended Use Case |
+|---------|------------------|------------------|-------------------|------------------|-------------------|--------------|---------------------|
+| FilesystemDbm | Software | Poor | Poor | No | No | None | Development, testing |
+| GdbmDbm | Software | Good | Good | Limited | No | libgdbm | Single-threaded apps |
+| LmdbDbm | Native ACID | Excellent | Good | Yes | Single writer | liblmdb | Production, high-performance |
+
+## Configuration
+
+When using [`EngineConfiguration`](src/Engine/EngineConfiguration.vala:60) for embedded mode, the storage backend is selected based on the application's needs:
+
+```vala
+// Create embedded configuration with storage path
+var config = EngineConfiguration.embedded("/path/to/database");
+
+// The EngineFactory creates the appropriate backend
+var engine = EngineFactory.create(config);
+```
+
+### Custom Backend Selection
+
+For direct control over the storage backend:
+
+```vala
+// Using LMDB for high performance
+var lmdb = new LmdbDbm();
+lmdb.open("/data/implexus-lmdb", false);
+var storage = new BasicStorage(lmdb);
+var index_manager = new IndexManager(lmdb);
+
+// Using GDBM for medium workloads
+var gdbm = new GdbmDbm();
+gdbm.open("/data/implexus.gdbm", false);
+var storage = new BasicStorage(gdbm);
+
+// Using FilesystemDbm for testing
+var fsdbm = new FilesystemDbm("/data/test-storage");
+var storage = new BasicStorage(fsdbm);
+```
+
+## Transaction Behavior
+
+All backends implement the same transaction interface, but with different guarantees:
+
+### Software-Based Transactions (FilesystemDbm, GdbmDbm)
+
+```vala
+dbm.begin_transaction();
+try {
+    dbm.set("key1", value1);
+    dbm.set("key2", value2);
+    dbm.delete("key3");
+    dbm.commit_transaction();
+} catch (StorageError e) {
+    dbm.rollback_transaction();
+}
+```
+
+Operations are buffered in memory and applied atomically on commit. Rollback discards the buffer without modifying persistent storage.
+
+### Native Transactions (LmdbDbm)
+
+LMDB provides true ACID transactions with crash recovery and durability guarantees. The same API is used:
+
+```vala
+dbm.begin_transaction();
+try {
+    dbm.set("key1", value1);
+    dbm.set("key2", value2);
+    dbm.commit_transaction();
+} catch (StorageError e) {
+    dbm.rollback_transaction();
+}
+```
+
+## Performance Considerations
+
+### Read-Heavy Workloads
+
+For applications with high read-to-write ratios, **LmdbDbm** is recommended due to:
+- Memory-mapped files enable OS-level caching
+- MVCC allows concurrent readers without blocking
+- B+tree structure optimizes sequential access
+
+### Write-Heavy Workloads
+
+For write-intensive applications:
+- **GdbmDbm** may perform better for single-writer scenarios
+- **LmdbDbm** provides ACID guarantees but with copy-on-write overhead
+
+### Small Datasets
+
+For development or small embedded applications:
+- **FilesystemDbm** is simplest with no external dependencies
+- Performance is acceptable for datasets under a few thousand keys
+
+## Migration Between Backends
+
+To migrate data between backends:
+
+```vala
+// Open source backend
+var source = new GdbmDbm();
+source.open("/old/database.gdbm", true); // read-only
+
+// Open target backend
+var target = new LmdbDbm();
+target.open("/new/lmdb/dir", false); // read-write
+
+// Copy all keys
+foreach (var key in source.keys) {
+    var value = source.get(key);
+    if (value != null) {
+        target.set(key, (!) value);
+    }
+}
+```
+
+## See Also
+
+- [KEY-SCHEMA.md](KEY-SCHEMA.md) - Key schema documentation
+- [Architecture/07-Storage-Layer.md](Architecture/07-Storage-Layer.md) - Storage layer architecture

+ 290 - 0
examples/BasicUsage.vala

@@ -0,0 +1,290 @@
+/**
+ * Basic usage example for Implexus
+ * 
+ * This example demonstrates the unified API facade that allows
+ * applications to seamlessly switch between embedded and remote modes.
+ * 
+ * Features demonstrated:
+ * - Mode selection (embedded vs remote)
+ * - Entity creation (Container, Document, Category, Index)
+ * - Property management
+ * - Path-based access
+ * - Transactions
+ * - Querying by type and expression
+ * - Entity deletion
+ * 
+ * Note: All I/O operations are async. This example uses MainLoop
+ * to run async operations from the synchronous main() function.
+ */
+using Implexus;
+using Implexus.Core;
+using Implexus.Engine;
+
+// Global engine reference for async operations
+Core.Engine engine;
+
+public static int main(string[] args) {
+    print("Implexus Basic Usage Example\n");
+    print("=============================\n\n");
+    
+    // Parse command line arguments to select mode
+    bool use_remote = false;
+    string host = "localhost";
+    uint16 port = 9876;
+    string storage_path = "./example_data";
+    
+    for (int i = 1; i < args.length; i++) {
+        if (args[i] == "--remote") {
+            use_remote = true;
+        } else if (args[i] == "--host" && i + 1 < args.length) {
+            host = args[++i];
+        } else if (args[i] == "--port" && i + 1 < args.length) {
+            port = (uint16) int.parse(args[++i]);
+        } else if (args[i] == "--path" && i + 1 < args.length) {
+            storage_path = args[++i];
+        } else if (args[i] == "--help") {
+            print("Usage: basicusage [OPTIONS]\n");
+            print("Options:\n");
+            print("  --remote         Use remote mode (default: embedded)\n");
+            print("  --host HOST      Remote server host (default: localhost)\n");
+            print("  --port PORT      Remote server port (default: 9876)\n");
+            print("  --path PATH      Storage path for embedded mode (default: ./example_data)\n");
+            print("  --help           Show this help message\n");
+            return 0;
+        }
+    }
+    
+    // Run the async example
+    var loop = new MainLoop();
+    int result = 0;
+    Error? error = null;
+    
+    run_example.begin(use_remote, host, port, storage_path, (obj, res) => {
+        try {
+            result = run_example.end(res);
+        } catch (Error e) {
+            error = e;
+            result = 1;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        stderr.printf("Error: %s\n", ((!)error).message);
+    }
+    
+    return result;
+}
+
+async int run_example(bool use_remote, string host, uint16 port, string storage_path) throws Error {
+    // ============================================================
+    // MODE SELECTION
+    // The only difference between modes is how you create the configuration!
+    // ============================================================
+    
+    EngineConfiguration config;
+    if (use_remote) {
+        print("Mode: REMOTE (%s:%u)\n\n", host, port);
+        config = EngineConfiguration.remote(host, port);
+    } else {
+        print("Mode: EMBEDDED (path: %s)\n\n", storage_path);
+        config = EngineConfiguration.embedded(storage_path);
+    }
+    
+    // Create the engine - same API regardless of mode
+    engine = EngineFactory.create(config);
+    
+    print("Engine created successfully!\n\n");
+    
+    // ============================================================
+    // WORKING WITH CONTAINERS AND DOCUMENTS
+    // ============================================================
+    
+    print("--- Creating Containers and Documents ---\n");
+    
+    // Get the root container (always exists)
+    var root = yield engine.get_root_async();
+    print("Got root entity: %s\n", root.path.to_string());
+    
+    // Create a container hierarchy for organizing users
+    var users = yield root.create_container_async("users");
+    print("Created container: %s\n", users.path.to_string());
+    
+    // Create documents with type labels for querying
+    var john = yield users.create_document_async("john", "User");
+    print("Created document: %s (type: %s)\n", john.path.to_string(), john.type_label);
+    
+    // Set various property types on the document
+    yield john.set_entity_property_async("email", new Invercargill.NativeElement<string>("john@example.com"));
+    yield john.set_entity_property_async("age", new Invercargill.NativeElement<int>(30));
+    yield john.set_entity_property_async("active", new Invercargill.NativeElement<bool?>(true));
+    print("Set properties: email, age, active\n\n");
+    
+    // ============================================================
+    // QUERYING ENTITIES
+    // ============================================================
+    
+    print("--- Querying Entities ---\n");
+    
+    // Query by type label
+    print("Users (by type 'User'):\n");
+    foreach (var entity in yield engine.query_by_type_async("User")) {
+        print("  - %s\n", entity.name);
+        
+        // Access properties
+        var email = yield entity.get_entity_property_async("email");
+        if (email != null) {
+            print("    email: %s\n", ((!) email).to_string());
+        }
+    }
+    print("\n");
+    
+    // Query by expression (using Invercargill.Expressions)
+    print("Active users (expression 'active==true'):\n");
+    foreach (var entity in yield engine.query_by_expression_async("User", "active==true")) {
+        print("  - %s\n", entity.name);
+    }
+    print("\n");
+    
+    // ============================================================
+    // PATH-BASED ACCESS
+    // ============================================================
+    
+    print("--- Path-Based Access ---\n");
+    
+    // Access entity by path
+    var path = new EntityPath("/users/john");
+    var entity = yield engine.get_entity_async(path);
+    print("Retrieved entity by path: %s\n", entity.path.to_string());
+    
+    // Check if entity exists
+    var exists = yield engine.entity_exists_async(new EntityPath("/users/john"));
+    print("Entity /users/john exists: %s\n".printf(exists ? "true" : "false"));
+    
+    var not_exists = yield engine.entity_exists_async(new EntityPath("/users/jane"));
+    print("Entity /users/jane exists: %s\n".printf(not_exists ? "true" : "false"));
+    print("\n");
+    
+    // ============================================================
+    // TRANSACTIONS
+    // ============================================================
+    
+    print("--- Transactions ---\n");
+    
+    // Create multiple entities atomically in a transaction
+    var tx = yield engine.begin_transaction_async();
+    print("Transaction started\n");
+    
+    try {
+        var jane = yield users.create_document_async("jane", "User");
+        yield jane.set_entity_property_async("email", new Invercargill.NativeElement<string>("jane@example.com"));
+        yield jane.set_entity_property_async("age", new Invercargill.NativeElement<int>(25));
+        yield jane.set_entity_property_async("active", new Invercargill.NativeElement<bool?>(true));
+        print("Created jane in transaction\n");
+        
+        var bob = yield users.create_document_async("bob", "User");
+        yield bob.set_entity_property_async("email", new Invercargill.NativeElement<string>("bob@example.com"));
+        yield bob.set_entity_property_async("age", new Invercargill.NativeElement<int>(35));
+        yield bob.set_entity_property_async("active", new Invercargill.NativeElement<bool?>(false));
+        print("Created bob in transaction\n");
+        
+        yield tx.commit_async();
+        print("Transaction committed!\n\n");
+        
+    } catch (Error e) {
+        yield tx.rollback_async();
+        print("Transaction rolled back: %s\n", e.message);
+    }
+    
+    // ============================================================
+    // CATEGORIES (Dynamic Grouping)
+    // ============================================================
+    
+    print("--- Categories (Dynamic Grouping) ---\n");
+    
+    // Create a category that auto-populates with matching entities
+    var active_users = yield root.create_category_async("active_users", "User", "active==true");
+    print("Created category: %s\n", active_users.path.to_string());
+    print("  Type filter: User\n");
+    print("  Expression: active==true\n");
+    
+    // The category's children are automatically populated
+    // with documents matching the expression
+    print("  Active users in category:\n");
+    foreach (var child in yield active_users.get_children_async()) {
+        print("    - %s\n", child.name);
+    }
+    print("\n");
+    
+    // ============================================================
+    // INDEXES (Text Search)
+    // ============================================================
+    
+    print("--- Indexes (Text Search) ---\n");
+    
+    // Create an index for text search results
+    // Parameters: name, type_label, expression
+    var search_index = yield root.create_index_async("email_search", "User", "email");
+    print("Created index: %s\n", search_index.path.to_string());
+    print("  Type filter: User\n");
+    print("  Expression: email\n");
+    
+    // Index contains documents matching the search terms
+    print("  Documents in index:\n");
+    foreach (var result in yield search_index.get_children_async()) {
+        print("    - %s\n", result.name);
+    }
+    print("\n");
+    
+    // ============================================================
+    // FINAL STATE
+    // ============================================================
+    
+    print("--- Final State ---\n");
+    print("All users:\n");
+    foreach (var user in yield engine.query_by_type_async("User")) {
+        var user_email = yield user.get_entity_property_async("email");
+        var age = yield user.get_entity_property_async("age");
+        var active = yield user.get_entity_property_async("active");
+        print("  - %s (email: %s, age: %s, active: %s)\n",
+            user.name,
+            user_email != null ? ((!) user_email).to_string() : "N/A",
+            age != null ? ((!) age).to_string() : "N/A",
+            active != null ? ((!) active).to_string() : "N/A"
+        );
+    }
+    print("\n");
+    
+    // ============================================================
+    // CLEANUP (Optional)
+    // ============================================================
+    
+    print("--- Cleanup ---\n");
+    print("Deleting test entities...\n");
+    
+    // Delete category and index first
+    yield active_users.delete_async();
+    print("Deleted active_users category\n");
+    
+    yield search_index.delete_async();
+    print("Deleted email_search index\n");
+    
+    // Delete all users
+    foreach (var user in yield engine.query_by_type_async("User")) {
+        string name = user.name;
+        yield user.delete_async();
+        print("Deleted %s\n", name);
+    }
+    
+    // Delete the users container
+    yield users.delete_async();
+    print("Deleted users container\n");
+    
+    print("\nExample completed successfully!\n");
+    print("\nTip: To test remote mode, start implexusd first:\n");
+    print("  ./build/tools/implexusd/implexusd --port 9876 --storage ./server_data\n");
+    print("  Then run: ./build/examples/basicusage --remote\n");
+    
+    return 0;
+}

+ 11 - 0
examples/meson.build

@@ -0,0 +1,11 @@
+example_names = [
+    'BasicUsage'
+]
+
+foreach example_name : example_names
+  executable(example_name.to_lower(),
+    example_name + '.vala',
+    dependencies: implexus_dep,
+    install: false
+  )
+endforeach

+ 32 - 0
implementation_plan.md

@@ -0,0 +1,32 @@
+# Optimize Batch Creation Performance
+
+The performance regression in `create_documents_batch_small` compared to `create_document_small` is due to direct writes to `storage` during entity creations, property updates, and child additions, completely bypassing the `EmbeddedTransaction`'s `_operations` queue. Every `add_child` directly updates and re-serializes the entire children array of a container synchronously, causing quadratic time complexity for batch inserts in a single container.
+
+## Proposed Changes
+
+### 1. [src/Engine/EmbeddedEngine.vala](file:///home/bbarrow/Projects/Implexus/src/Engine/EmbeddedEngine.vala)
+- Add an internal accessor for `current_transaction` so that entities can queue operations:
+  ```vala
+  internal EmbeddedTransaction? current_transaction { get { return _current_transaction; } }
+  ```
+
+### 2. [src/Engine/EmbeddedTransaction.vala](file:///home/bbarrow/Projects/Implexus/src/Engine/EmbeddedTransaction.vala)
+- Implement `apply_operation` for `OperationType.SET_PROPERTY` to properly apply property updates, OR refactor property updates to queue a full property save (`OperationType.SAVE_PROPERTIES`).
+- Since `Document` modifies `_properties` in-memory and multiple `set_entity_property` calls would each record a property, it is optimal to let `EmbeddedTransaction` merge property updates.
+- During `commit()`, merge operations or ensure that `save_properties` is called only once per document.
+- Add support for Category, Catalogue, and Index creation records if needed, or expand `OperationType.CREATE_ENTITY` to handle them. (Currently `CREATE_ENTITY` metadata storage might require expressions for categories).
+
+### 3. [src/Entities/Container.vala](file:///home/bbarrow/Projects/Implexus/src/Entities/Container.vala)
+- Modify `create_container`, `create_document`, `create_category`, `create_catalogue`, `create_index`, and `delete_child` to check if `_engine` is in a transaction.
+- If in a transaction, call `record_create_entity` and `record_add_child` instead of writing to `storage` directly.
+
+### 4. [src/Entities/Document.vala](file:///home/bbarrow/Projects/Implexus/src/Entities/Document.vala)
+- Modify `save_properties` to check for an active transaction.
+- If active, defer the property save to the transaction rather than saving immediately.
+
+### 5. [src/Entities/AbstractEntity.vala](file:///home/bbarrow/Projects/Implexus/src/Entities/AbstractEntity.vala)
+- Modify `delete()` to use `record_delete_entity` and `record_remove_child` when inside a transaction.
+
+## Verification Plan
+1. Run `implexus-perf` benchmark specifically for `Document` to verify that `create_documents_batch_small` decreases from ~58ms down to effectively ~1-2ms per batch (since the DB write is deferred and batched).
+2. The benchmark will validate both performance improvements and correctness (reads/writes in the benchmark must still function correctly).

+ 23 - 0
meson.build

@@ -0,0 +1,23 @@
+project('implexus', ['c', 'vala'],
+  version: '0.1',
+)
+
+vapi_dir = join_paths(meson.current_source_dir(), 'vapi')
+add_project_arguments(['--vapidir', vapi_dir], language: 'vala')
+
+glib_dep = dependency('glib-2.0')
+gobject_dep = dependency('gobject-2.0')
+gio_dep = dependency('gio-2.0')
+invercargill_dep = dependency('invercargill-1')
+
+# GDBM dependency (required)
+cc = meson.get_compiler('c')
+gdbm_dep = cc.find_library('gdbm', required: true, has_headers: ['gdbm.h'])
+
+# LMDB dependency (required)
+lmdb_dep = cc.find_library('lmdb', required: true, has_headers: ['lmdb.h'])
+
+subdir('src')
+subdir('examples')
+subdir('tools')
+subdir('tests')

+ 1336 - 0
plans/async-io-refactor-design.md

@@ -0,0 +1,1336 @@
+# Async I/O Refactor Design
+
+## Document Status
+- **Created**: 2026-03-14
+- **Last Updated**: 2026-03-14
+- **Status**: Finalized
+- **Author**: Architecture Review
+
+## Executive Summary
+
+This document proposes a major refactoring of the Implexus codebase to make all I/O operations inherently async. The key changes are:
+
+1. **Remove AsyncEngine and AsyncEntity wrappers** - Make the base interfaces async
+2. **Create a DBM Queue System** - Serialize DBM operations with priority reads
+3. **Add `supports_concurrent_reads` to Dbm interface** - Enable optimizations for LMDB
+4. **Make Engine and Entity interfaces async** - All I/O operations become async
+
+---
+
+## Current Architecture Summary
+
+### Current Async Pattern (To Be Removed)
+
+The current implementation uses wrapper classes:
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│                      Application Layer                           │
+└─────────────────────────────────────────────────────────────────┘
+                              │
+                              ▼
+┌─────────────────────────────────────────────────────────────────┐
+│                    AsyncEngine / AsyncEntity                     │
+│                                                                  │
+│  - Wraps Engine/Entity instances                                 │
+│  - Each async method spawns a new Thread                         │
+│  - Uses Idle.add to return results to main loop                  │
+│  - Duplicates entire interface                                   │
+└─────────────────────────────────────────────────────────────────┘
+                              │
+                              ▼
+┌─────────────────────────────────────────────────────────────────┐
+│                    Engine / Entity Interfaces                    │
+│                                                                  │
+│  - Synchronous methods                                           │
+│  - Blocking I/O                                                  │
+└─────────────────────────────────────────────────────────────────┘
+```
+
+**Problems with Current Approach:**
+- Thread explosion: Each async call spawns a new thread
+- Interface duplication: AsyncEngine mirrors Engine, AsyncEntity mirrors Entity
+- Maintenance burden: Changes must be made in multiple places
+- Inefficient: No coordination between operations
+
+### Current DBM Implementations
+
+| Implementation | Concurrent Reads | Notes |
+|----------------|------------------|-------|
+| GdbmDbm | No | GDBM does not support concurrent access |
+| LmdbDbm | Yes | LMDB supports concurrent readers via MVCC |
+| FilesystemDbm | Limited | File-per-key, but no explicit coordination |
+
+### Current Storage Layer
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│                      HighLevel Stores                            │
+│  EntityStore, DocumentStore, ContainerStore,                    │
+│  CategoryStore, CatalogueStore, IndexStore                      │
+│  (Synchronous, compose LowLevel stores)                         │
+└─────────────────────────────────────────────────────────────────┘
+                              │
+                              ▼
+┌─────────────────────────────────────────────────────────────────┐
+│                      LowLevel Storage                            │
+│  EntityMetadataStorage, PropertiesStorage, ChildrenStorage,     │
+│  TypeIndexStorage, CategoryIndexStorage, TextIndexStorage, etc. │
+│  (Synchronous, direct Dbm access)                               │
+└─────────────────────────────────────────────────────────────────┘
+                              │
+                              ▼
+┌─────────────────────────────────────────────────────────────────┐
+│                         Dbm Interface                            │
+│  has_key(), get(), set(), delete(), keys,                       │
+│  begin_transaction(), commit_transaction(), rollback_transaction()│
+│  (All synchronous)                                              │
+└─────────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## Proposed Architecture
+
+### Design Principles
+
+1. **Async by Default**: All I/O operations are async at the interface level
+2. **Queue-Based for Embedded**: DBM operations go through a priority queue
+3. **Native Async for Remote**: Socket operations use GLib async I/O
+4. **Concurrent Read Optimization**: LMDB can spawn read threads
+5. **Write Serialization**: All writes go through single queue
+
+### Architecture Overview
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│                      Application Layer                           │
+└─────────────────────────────────────────────────────────────────┘
+                              │
+                              ▼
+┌─────────────────────────────────────────────────────────────────┐
+│                    Engine / Entity Interfaces                    │
+│                                                                  │
+│  - All I/O methods are async                                    │
+│  - No wrapper classes needed                                    │
+│  - Direct async API                                             │
+└─────────────────────────────────────────────────────────────────┘
+                              │
+              ┌───────────────┴───────────────┐
+              ▼                               ▼
+┌───────────────────────────┐   ┌───────────────────────────────┐
+│      EmbeddedEngine       │   │        RemoteEngine           │
+│                           │   │                               │
+│  Uses AsyncDbmQueue       │   │  Uses Async Socket I/O        │
+│  for DBM operations       │   │  (GLib SocketConnection)      │
+└───────────────────────────┘   └───────────────────────────────┘
+              │                               │
+              ▼                               ▼
+┌───────────────────────────┐   ┌───────────────────────────────┐
+│      AsyncDbmQueue        │   │    Protocol.MessageReader     │
+│                           │   │    Protocol.MessageWriter     │
+│  - Priority read queue    │   │                               │
+│  - Serialized writes      │   │  (Already async-capable)      │
+│  - Concurrent reads for   │   │                               │
+│    LMDB                   │   │                               │
+└───────────────────────────┘   └───────────────────────────────┘
+              │
+              ▼
+┌─────────────────────────────────────────────────────────────────┐
+│                         Dbm Interface                            │
+│                                                                  │
+│  + supports_concurrent_reads: bool { get; }                     │
+│  (Methods remain synchronous - queue handles async)             │
+└─────────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## DBM Queue System Design
+
+### AsyncDbmQueue Class
+
+A dedicated queue system for DBM operations that:
+
+1. Processes operations one at a time (serialized)
+2. Prioritizes reads over writes
+3. Uses `Idle.add` to return results to main loop
+4. Supports concurrent reads for LMDB-like backends
+
+```vala
+namespace Implexus.Storage {
+
+/**
+ * Priority level for DBM operations.
+ */
+public enum DbmOperationPriority {
+    READ = 0,      // Higher priority - processed first
+    WRITE = 1,     // Lower priority - processed after reads
+    TRANSACTION = 2 // Same as write, but grouped
+}
+
+/**
+ * Delegate types for async callbacks.
+ */
+public delegate void DbmReadCallback<T>(T? result, StorageError? error);
+public delegate void DbmWriteCallback(StorageError? error);
+
+/**
+ * Queue system for async DBM operations.
+ *
+ * This class manages a dedicated thread for DBM operations,
+ * ensuring serialized access while prioritizing reads.
+ */
+public class AsyncDbmQueue : Object {
+    
+    private Dbm _dbm;
+    private Thread<void> _worker_thread;
+    private AsyncQueue<DbmOperation> _read_queue;
+    private AsyncQueue<DbmOperation> _write_queue;
+    private bool _running;
+    private Mutex _mutex;
+    private Cond _cond;
+    
+    /**
+     * Creates a new AsyncDbmQueue wrapping the given Dbm.
+     */
+    public AsyncDbmQueue(Dbm dbm) {
+        _dbm = dbm;
+        _read_queue = new AsyncQueue<DbmOperation>();
+        _write_queue = new AsyncQueue<DbmOperation>();
+        _running = true;
+        _mutex = Mutex();
+        _cond = Cond();
+        
+        _worker_thread = new Thread<void>("dbm-worker", run_worker);
+    }
+    
+    /**
+     * Whether the underlying Dbm supports concurrent reads.
+     */
+    public bool supports_concurrent_reads {
+        get { return _dbm.supports_concurrent_reads; }
+    }
+    
+    // === Read Operations ===
+    
+    /**
+     * Async has_key operation.
+     */
+    public async bool has_key_async(string key) throws StorageError {
+        bool result = false;
+        StorageError? error = null;
+        
+        var op = new DbmOperation.read_has_key(key, (r, e) => {
+            result = r;
+            error = e;
+            Idle.add(() => {
+                has_key_async.callback();
+                return false;
+            });
+        });
+        
+        _read_queue.push(op);
+        notify_worker();
+        
+        yield;
+        
+        if (error != null) {
+            throw error;
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Async get operation.
+     */
+    public async Invercargill.BinaryData? get_async(string key) throws StorageError {
+        Invercargill.BinaryData? result = null;
+        StorageError? error = null;
+        
+        var op = new DbmOperation.read_get(key, (r, e) => {
+            result = r;
+            error = e;
+            Idle.add(() => {
+                get_async.callback();
+                return false;
+            });
+        });
+        
+        _read_queue.push(op);
+        notify_worker();
+        
+        yield;
+        
+        if (error != null) {
+            throw error;
+        }
+        
+        return result;
+    }
+    
+    // === Write Operations ===
+    
+    /**
+     * Async set operation.
+     */
+    public async void set_async(string key, Invercargill.BinaryData value) throws StorageError {
+        StorageError? error = null;
+        
+        var op = new DbmOperation.write_set(key, value, (e) => {
+            error = e;
+            Idle.add(() => {
+                set_async.callback();
+                return false;
+            });
+        });
+        
+        _write_queue.push(op);
+        notify_worker();
+        
+        yield;
+        
+        if (error != null) {
+            throw error;
+        }
+    }
+    
+    /**
+     * Async delete operation.
+     */
+    public async void delete_async(string key) throws StorageError {
+        StorageError? error = null;
+        
+        var op = new DbmOperation.write_delete(key, (e) => {
+            error = e;
+            Idle.add(() => {
+                delete_async.callback();
+                return false;
+            });
+        });
+        
+        _write_queue.push(op);
+        notify_worker();
+        
+        yield;
+        
+        if (error != null) {
+            throw error;
+        }
+    }
+    
+    // === Transaction Operations ===
+    
+    /**
+     * Async begin_transaction operation.
+     */
+    public async void begin_transaction_async() throws StorageError {
+        StorageError? error = null;
+        
+        var op = new DbmOperation.transaction_begin((e) => {
+            error = e;
+            Idle.add(() => {
+                begin_transaction_async.callback();
+                return false;
+            });
+        });
+        
+        _write_queue.push(op);
+        notify_worker();
+        
+        yield;
+        
+        if (error != null) {
+            throw error;
+        }
+    }
+    
+    /**
+     * Async commit_transaction operation.
+     */
+    public async void commit_transaction_async() throws StorageError {
+        StorageError? error = null;
+        
+        var op = new DbmOperation.transaction_commit((e) => {
+            error = e;
+            Idle.add(() => {
+                commit_transaction_async.callback();
+                return false;
+            });
+        });
+        
+        _write_queue.push(op);
+        notify_worker();
+        
+        yield;
+        
+        if (error != null) {
+            throw error;
+        }
+    }
+    
+    /**
+     * Async rollback_transaction operation.
+     */
+    public async void rollback_transaction_async() {
+        var op = new DbmOperation.transaction_rollback(() => {
+            Idle.add(() => {
+                rollback_transaction_async.callback();
+                return false;
+            });
+        });
+        
+        _write_queue.push(op);
+        notify_worker();
+        
+        yield;
+    }
+    
+    // === Worker Thread ===
+    
+    private void run_worker() {
+        while (_running) {
+            DbmOperation? op = null;
+            
+            // Prioritize reads over writes
+            op = _read_queue.try_pop();
+            if (op == null) {
+                op = _write_queue.try_pop();
+            }
+            
+            if (op != null) {
+                process_operation((!) op);
+            } else {
+                // Wait for new operations
+                _mutex.lock();
+                _cond.wait(_mutex, 100000); // 100ms timeout
+                _mutex.unlock();
+            }
+        }
+    }
+    
+    private void process_operation(DbmOperation op) {
+        switch (op.type) {
+            case DbmOperationType.HAS_KEY:
+                bool result = _dbm.has_key(op.key);
+                op.read_bool_callback(result, null);
+                break;
+                
+            case DbmOperationType.GET:
+                var result = _dbm.get(op.key);
+                op.read_data_callback(result, null);
+                break;
+                
+            case DbmOperationType.SET:
+                try {
+                    _dbm.set(op.key, op.value);
+                    op.write_callback(null);
+                } catch (StorageError e) {
+                    op.write_callback(e);
+                }
+                break;
+                
+            case DbmOperationType.DELETE:
+                try {
+                    _dbm.delete(op.key);
+                    op.write_callback(null);
+                } catch (StorageError e) {
+                    op.write_callback(e);
+                }
+                break;
+                
+            case DbmOperationType.BEGIN_TRANSACTION:
+                try {
+                    _dbm.begin_transaction();
+                    op.write_callback(null);
+                } catch (StorageError e) {
+                    op.write_callback(e);
+                }
+                break;
+                
+            case DbmOperationType.COMMIT_TRANSACTION:
+                try {
+                    _dbm.commit_transaction();
+                    op.write_callback(null);
+                } catch (StorageError e) {
+                    op.write_callback(e);
+                }
+                break;
+                
+            case DbmOperationType.ROLLBACK_TRANSACTION:
+                _dbm.rollback_transaction();
+                op.void_callback();
+                break;
+        }
+    }
+    
+    private void notify_worker() {
+        _mutex.lock();
+        _cond.signal();
+        _mutex.unlock();
+    }
+    
+    /**
+     * Shuts down the worker thread.
+     */
+    public void shutdown() {
+        _running = false;
+        notify_worker();
+        _worker_thread.join();
+    }
+}
+
+/**
+ * Types of DBM operations.
+ */
+private enum DbmOperationType {
+    HAS_KEY,
+    GET,
+    SET,
+    DELETE,
+    BEGIN_TRANSACTION,
+    COMMIT_TRANSACTION,
+    ROLLBACK_TRANSACTION
+}
+
+/**
+ * Represents a single DBM operation in the queue.
+ */
+private class DbmOperation : Object {
+    public DbmOperationType type;
+    public string key;
+    public Invercargill.BinaryData value;
+    
+    public DbmReadCallback<bool>? read_bool_callback;
+    public DbmReadCallback<Invercargill.BinaryData>? read_data_callback;
+    public DbmWriteCallback? write_callback;
+    public delegate void VoidCallback();
+    public VoidCallback? void_callback;
+    
+    // Factory methods for creating operations
+    public DbmOperation.read_has_key(string key, DbmReadCallback<bool> callback) {
+        this.type = DbmOperationType.HAS_KEY;
+        this.key = key;
+        this.read_bool_callback = callback;
+    }
+    
+    public DbmOperation.read_get(string key, DbmReadCallback<Invercargill.BinaryData> callback) {
+        this.type = DbmOperationType.GET;
+        this.key = key;
+        this.read_data_callback = callback;
+    }
+    
+    public DbmOperation.write_set(string key, Invercargill.BinaryData value, DbmWriteCallback callback) {
+        this.type = DbmOperationType.SET;
+        this.key = key;
+        this.value = value;
+        this.write_callback = callback;
+    }
+    
+    public DbmOperation.write_delete(string key, DbmWriteCallback callback) {
+        this.type = DbmOperationType.DELETE;
+        this.key = key;
+        this.write_callback = callback;
+    }
+    
+    public DbmOperation.transaction_begin(DbmWriteCallback callback) {
+        this.type = DbmOperationType.BEGIN_TRANSACTION;
+        this.write_callback = callback;
+    }
+    
+    public DbmOperation.transaction_commit(DbmWriteCallback callback) {
+        this.type = DbmOperationType.COMMIT_TRANSACTION;
+        this.write_callback = callback;
+    }
+    
+    public DbmOperation.transaction_rollback(VoidCallback callback) {
+        this.type = DbmOperationType.ROLLBACK_TRANSACTION;
+        this.void_callback = callback;
+    }
+}
+
+} // namespace Implexus.Storage
+```
+
+### Concurrent Read Optimization for LMDB
+
+For DBM implementations that support concurrent reads (like LMDB), the queue can spawn dedicated read threads:
+
+```vala
+// In AsyncDbmQueue, when supports_concurrent_reads is true:
+private void process_read_with_thread(DbmOperation op) {
+    if (!_dbm.supports_concurrent_reads) {
+        process_operation(op);
+        return;
+    }
+    
+    // For LMDB, spawn a new thread for the read
+    new Thread<void>("dbm-reader", () => {
+        // LMDB allows concurrent readers
+        var result = _dbm.get(op.key);
+        
+        // Return result via Idle.add
+        Idle.add(() => {
+            op.read_data_callback(result, null);
+            return false;
+        });
+    });
+}
+```
+
+---
+
+## Dbm Interface Changes
+
+### Add supports_concurrent_reads Property
+
+```vala
+namespace Implexus.Storage {
+
+public interface Dbm : Object {
+    
+    // === Existing Methods ===
+    
+    public abstract bool has_key(string key);
+    public abstract Invercargill.BinaryData? @get(string key);
+    public abstract void @set(string key, Invercargill.BinaryData value) throws StorageError;
+    public abstract void delete(string key) throws StorageError;
+    public abstract Invercargill.Enumerable<string> keys { owned get; }
+    public abstract void begin_transaction() throws StorageError;
+    public abstract void commit_transaction() throws StorageError;
+    public abstract void rollback_transaction();
+    public abstract bool in_transaction { get; }
+    
+    // === NEW: Concurrent Read Support ===
+    
+    /**
+     * Indicates whether this Dbm implementation supports concurrent reads.
+     *
+     * If true, multiple threads can read simultaneously without blocking.
+     * Writes are still serialized through the queue.
+     *
+     * - LMDB: true (MVCC allows concurrent readers)
+     * - GDBM: false (single-threaded access)
+     * - FilesystemDbm: false (no explicit coordination)
+     */
+    public abstract bool supports_concurrent_reads { get; }
+}
+
+} // namespace Implexus.Storage
+```
+
+### Implementation Updates
+
+```vala
+// GdbmDbm.vala
+public class GdbmDbm : Object, Dbm {
+    public bool supports_concurrent_reads { get { return false; } }
+    // ... rest of implementation
+}
+
+// LmdbDbm.vala
+public class LmdbDbm : Object, Dbm {
+    public bool supports_concurrent_reads { get { return true; } }
+    // ... rest of implementation
+}
+
+// FilesystemDbm.vala
+public class FilesystemDbm : Object, Dbm {
+    public bool supports_concurrent_reads { get { return false; } }
+    // ... rest of implementation
+}
+```
+
+---
+
+## Engine Interface Changes
+
+### Current Interface (Synchronous)
+
+```vala
+public interface Engine : Object {
+    public abstract Entity get_root();
+    public abstract Entity? get_entity(EntityPath path) throws EngineError;
+    public abstract Entity? get_entity_or_null(EntityPath path);
+    public abstract bool entity_exists(EntityPath path);
+    public abstract Invercargill.Enumerable<Entity> query_by_type(string type_label);
+    public abstract Invercargill.Enumerable<Entity> query_by_expression(string type_label, string expression);
+    public abstract Transaction begin_transaction() throws EngineError;
+    public abstract bool in_transaction { get; }
+    public abstract StorageConfiguration configuration { owned get; }
+    
+    public signal void entity_created(Entity entity);
+    public signal void entity_deleted(EntityPath path);
+    public signal void entity_modified(Entity entity);
+}
+```
+
+### Proposed Interface (Async)
+
+```vala
+public interface Engine : Object {
+    // === Root Access ===
+    
+    /**
+     * Gets the root entity of the database.
+     *
+     * This operation is async as it may require I/O to verify/create root.
+     */
+    public abstract async Entity get_root_async() throws EngineError;
+    
+    // === Path-Based Access ===
+    
+    /**
+     * Gets an entity by path, throwing an error if not found.
+     */
+    public abstract async Entity? get_entity_async(EntityPath path) throws EngineError;
+    
+    /**
+     * Gets an entity by path, returning null if not found.
+     */
+    public abstract async Entity? get_entity_or_null_async(EntityPath path) throws EngineError;
+    
+    /**
+     * Checks if an entity exists at the specified path.
+     */
+    public abstract async bool entity_exists_async(EntityPath path) throws EngineError;
+    
+    // === Query Operations ===
+    
+    /**
+     * Queries all entities of a specific type.
+     *
+     * Returns an array with eager loading. This is simpler than async
+     * iteration and sufficient for expected data volumes.
+     *
+     * DECISION: Use Entity[] (eager loading) - simpler and sufficient
+     * for expected data volumes. See "Decisions Made" section.
+     */
+    public abstract async Entity[] query_by_type_async(string type_label) throws EngineError;
+    
+    /**
+     * Queries entities by type and expression.
+     */
+    public abstract async Entity[] query_by_expression_async(string type_label, string expression) throws EngineError;
+    
+    // === Transactions ===
+    
+    /**
+     * Begins a new transaction.
+     *
+     * NOTE: The with_write_transaction() helper has been REMOVED.
+     * Vala doesn't support async delegates, so transactions must be
+     * managed manually with begin_transaction_async(), perform operations,
+     * then commit_async() or rollback_async().
+     *
+     * See "Decisions Made" section for details.
+     */
+    public abstract async Transaction begin_transaction_async() throws EngineError;
+    
+    /**
+     * Indicates whether a transaction is currently active.
+     * This is synchronous as it's a quick property check.
+     */
+    public abstract bool in_transaction { get; }
+    
+    // === Configuration ===
+    
+    /**
+     * Gets the storage configuration.
+     * This is synchronous as it's a quick property access.
+     */
+    public abstract StorageConfiguration configuration { owned get; }
+    
+    // === Events ===
+    
+    public signal void entity_created(Entity entity);
+    public signal void entity_deleted(EntityPath path);
+    public signal void entity_modified(Entity entity);
+}
+```
+
+---
+
+## Entity Interface Changes
+
+### Current Interface (Synchronous)
+
+The current Entity interface has synchronous methods for all operations.
+
+### Proposed Interface (Async)
+
+```vala
+public interface Entity : Object {
+    
+    // === Identity (Synchronous - No I/O) ===
+    
+    public abstract unowned Engine engine { get; }
+    public abstract EntityPath path { owned get; }
+    public abstract string name { owned get; }
+    public abstract EntityType entity_type { get; }
+    public abstract string type_label { owned get; }
+    public abstract string configured_expression { owned get; }
+    public abstract string configured_type_label { owned get; }
+    
+    // === Parent/Child Navigation (Async - May require I/O) ===
+    
+    /**
+     * Gets the parent entity, or null for the root.
+     */
+    public abstract async Entity? get_parent_async() throws EngineError;
+    
+    /**
+     * Gets the names of all child entities.
+     */
+    public abstract async Invercargill.ReadOnlySet<string> get_child_names_async() throws EngineError;
+    
+    /**
+     * Gets a child entity by name.
+     */
+    public abstract async Entity? get_child_async(string name) throws EngineError;
+    
+    /**
+     * Gets all child entities.
+     */
+    public abstract async Entity[] get_children_async() throws EngineError;
+    
+    // === Child Management (Async - Requires I/O) ===
+    
+    public abstract async Entity? create_container_async(string name) throws EngineError;
+    public abstract async Entity? create_document_async(string name, string type_label) throws EngineError;
+    public abstract async Entity? create_category_async(string name, string type_label, string expression) throws EngineError;
+    public abstract async Entity? create_index_async(string name, string type_label, string expression) throws EngineError;
+    public abstract async Entity? create_catalogue_async(string name, string type_label, string expression) throws EngineError;
+    
+    // === Document Operations (Async - Requires I/O) ===
+    
+    /**
+     * Gets the properties stored in this document.
+     */
+    public abstract async Invercargill.Properties get_properties_async() throws EngineError;
+    
+    /**
+     * Gets a property value by name.
+     */
+    public abstract async Invercargill.Element? get_entity_property_async(string name) throws EngineError;
+    
+    /**
+     * Sets a property value.
+     */
+    public abstract async void set_entity_property_async(string name, Invercargill.Element value) throws EngineError;
+    
+    /**
+     * Removes a property.
+     */
+    public abstract async void remove_property_async(string name) throws EngineError;
+    
+    // === Lifecycle (Async - Requires I/O) ===
+    
+    /**
+     * Deletes this entity from the database.
+     */
+    public abstract async void delete_async() throws EngineError;
+    
+    /**
+     * Checks if this entity still exists in the database.
+     * This is synchronous as it may be cached or quick to check.
+     */
+    public abstract bool exists { get; }
+    
+    // === Set Operations ===
+    
+    /**
+     * Creates an EntitySet containing just this entity.
+     */
+    public abstract async EntitySet as_set_async();
+}
+```
+
+---
+
+## Transaction Interface Changes
+
+### Proposed Interface (Async)
+
+```vala
+public interface Transaction : Object {
+    
+    /**
+     * Indicates whether this transaction is still active.
+     * Synchronous - quick property check.
+     */
+    public abstract bool active { get; }
+    
+    /**
+     * Commits all changes made during this transaction.
+     */
+    public abstract async void commit_async() throws EngineError;
+    
+    /**
+     * Rolls back all changes made during this transaction.
+     */
+    public abstract async void rollback_async();
+}
+```
+
+---
+
+## HighLevel/LowLevel Storage Changes
+
+### Approach: Async Wrappers over Sync Implementation
+
+The LowLevel storage classes remain synchronous (they operate on the Dbm directly). The HighLevel stores gain async versions that use the AsyncDbmQueue.
+
+```vala
+namespace Implexus.Storage.HighLevel {
+
+public class EntityStore : Object {
+    
+    private LowLevel.EntityMetadataStorage _metadata;
+    private LowLevel.TypeIndexStorage _type_index;
+    private AsyncDbmQueue _queue;
+    
+    public EntityStore.with_queue(AsyncDbmQueue queue) {
+        _queue = queue;
+        // LowLevel stores still use sync Dbm for internal operations
+        // but we route through the queue
+    }
+    
+    // === Async Metadata Operations ===
+    
+    public async void store_metadata_async(
+        Core.EntityPath path, 
+        Core.EntityType type, 
+        string? type_label = null
+    ) throws StorageError {
+        // Serialize and queue the write
+        string key = "entity:" + path.to_string();
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<int64?>((int64) type));
+        writer.write_element(new Invercargill.NativeElement<string>(type_label ?? ""));
+        
+        yield _queue.set_async(key, writer.to_binary_data());
+    }
+    
+    public async Core.EntityType? get_entity_type_async(Core.EntityPath path) throws StorageError {
+        string key = "entity:" + path.to_string();
+        var data = yield _queue.get_async(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return null;
+            }
+            int64? type_val = element.as<int64?>();
+            return (Core.EntityType) (type_val == null ? 0 : (!) type_val);
+        } catch (Invercargill.ElementError e) {
+            throw new StorageError.CORRUPT_DATA("Failed to read entity type: %s".printf(e.message));
+        }
+    }
+    
+    public async bool exists_async(Core.EntityPath path) {
+        string key = "entity:" + path.to_string();
+        return yield _queue.has_key_async(key);
+    }
+    
+    public async void delete_async(Core.EntityPath path) throws StorageError {
+        string key = "entity:" + path.to_string();
+        yield _queue.delete_async(key);
+    }
+    
+    // ... similar async versions for other methods
+}
+
+} // namespace Implexus.Storage.HighLevel
+```
+
+---
+
+## EmbeddedEngine Changes
+
+### Current Implementation
+
+The EmbeddedEngine currently uses synchronous Dbm operations directly.
+
+### Proposed Implementation
+
+```vala
+public class EmbeddedEngine : Object, Core.Engine {
+    
+    private Storage.AsyncDbmQueue _dbm_queue;
+    private Storage.HighLevel.EntityStore _entity_store;
+    private Storage.HighLevel.DocumentStore _document_store;
+    private Storage.HighLevel.ContainerStore _container_store;
+    private Storage.HighLevel.CategoryStore _category_store;
+    private Storage.HighLevel.CatalogueStore _catalogue_store;
+    private Storage.HighLevel.IndexStore _index_store;
+    
+    public EmbeddedEngine(Core.StorageConfiguration config) {
+        // Get Dbm from storage
+        var basic_storage = (config.storage as Storage.BasicStorage);
+        if (basic_storage != null) {
+            var dbm = basic_storage.dbm;
+            
+            // Create async queue
+            _dbm_queue = new Storage.AsyncDbmQueue(dbm);
+            
+            // Initialize stores with queue
+            _entity_store = new Storage.HighLevel.EntityStore.with_queue(_dbm_queue);
+            _document_store = new Storage.HighLevel.DocumentStore.with_queue(_dbm_queue);
+            _container_store = new Storage.HighLevel.ContainerStore.with_queue(_dbm_queue);
+            _category_store = new Storage.HighLevel.CategoryStore.with_queue(_dbm_queue);
+            _catalogue_store = new Storage.HighLevel.CatalogueStore.with_queue(_dbm_queue);
+            _index_store = new Storage.HighLevel.IndexStore.with_queue(_dbm_queue);
+        }
+    }
+    
+    // === Internal Sync Methods for Hook Use ===
+    //
+    // DECISION: Hooks remain synchronous, running in the DBM thread.
+    // The storage layer (HighLevel/LowLevel) also remains synchronous.
+    // EmbeddedEngine keeps internal sync versions of methods for hook use.
+    //
+    // These internal methods are NOT part of the public API and are only
+    // used by the HookManager to perform operations during hook execution.
+    
+    internal Core.Entity? get_entity_or_null_sync(Core.EntityPath path) {
+        // Synchronous implementation for hook use
+        // Uses the underlying Dbm directly, bypassing the async queue
+        return _entity_store.get_entity_or_null_direct(path);
+    }
+    
+    // === Engine Interface Implementation ===
+    
+    public async Core.Entity get_root_async() throws Core.EngineError {
+        if (_root != null) {
+            return (!) _root;
+        }
+        
+        var root_path = new Core.EntityPath.root();
+        
+        // Check if root exists
+        bool exists = yield _entity_store.exists_async(root_path);
+        if (!exists) {
+            try {
+                yield _entity_store.store_metadata_async(root_path, Core.EntityType.CONTAINER, null);
+            } catch (Storage.StorageError e) {
+                throw new Core.EngineError.STORAGE_ERROR("Failed to create root: %s".printf(e.message));
+            }
+        }
+        
+        _root = new Entities.Container(this, root_path);
+        return (!) _root;
+    }
+    
+    public async Core.Entity? get_entity_async(Core.EntityPath path) throws Core.EngineError {
+        bool exists = yield _entity_store.exists_async(path);
+        if (!exists) {
+            throw new Core.EngineError.ENTITY_NOT_FOUND(
+                "Entity not found: %s".printf(path.to_string())
+            );
+        }
+        
+        return yield create_entity_from_storage_async(path);
+    }
+    
+    public async Core.Entity? get_entity_or_null_async(Core.EntityPath path) throws Core.EngineError {
+        try {
+            return yield get_entity_async(path);
+        } catch (Core.EngineError e) {
+            return null;
+        }
+    }
+    
+    public async bool entity_exists_async(Core.EntityPath path) throws Core.EngineError {
+        return yield _entity_store.exists_async(path);
+    }
+    
+    // ... rest of async implementations
+}
+```
+
+---
+
+## RemoteEngine Changes
+
+The RemoteEngine already uses socket communication. We need to make the socket operations properly async.
+
+### Current Implementation
+
+```vala
+internal Protocol.Message send_request_and_wait(Protocol.Message request) throws Protocol.ProtocolError {
+    ensure_connected();
+    var request_id = ((!) _writer).write_request(request);
+    return ((!) _reader).read_response_for_request(request_id);
+}
+```
+
+### Proposed Implementation
+
+```vala
+internal async Protocol.Message send_request_and_wait_async(Protocol.Message request) throws Protocol.ProtocolError {
+    ensure_connected();
+    
+    // Use async socket operations
+    var request_id = yield ((!) _writer).write_request_async(request);
+    return yield ((!) _reader).read_response_for_request_async(request_id);
+}
+
+public async Core.Entity? get_entity_async(Core.EntityPath path) throws Core.EngineError {
+    ensure_connected();
+    
+    try {
+        var request = new Protocol.GetEntityRequest.for_path(path);
+        var response = yield send_request_and_wait_async(request);
+        
+        if (response.message_type == Protocol.MessageType.ERROR) {
+            var error = (Protocol.ErrorResponse) response;
+            throw error_code_to_engine_error(error.error_code, error.error_message);
+        }
+        
+        if (response.message_type == Protocol.MessageType.ENTITY_RESPONSE) {
+            var entity_response = (Protocol.EntityResponse) response;
+            return create_remote_entity_from_data(entity_response.entity_data);
+        }
+        
+        throw new Core.EngineError.PROTOCOL_ERROR("Unexpected response type");
+        
+    } catch (Protocol.ProtocolError e) {
+        throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+    }
+}
+```
+
+---
+
+## Migration Path
+
+**DECISION: Clean Break Migration**
+
+The project is in development, so we will use a clean break approach:
+- Remove all sync methods immediately
+- Update everything in this refactor
+- No gradual migration or deprecation period
+- No compatibility shims needed
+
+### Phase 1: Infrastructure
+
+1. Create `AsyncDbmQueue` class
+2. Add `supports_concurrent_reads` property to Dbm interface
+3. Update all Dbm implementations with the new property
+4. Add tests for the queue system
+
+### Phase 2: Interface Changes (Breaking)
+
+1. Replace sync methods with async methods in Engine interface
+2. Replace sync methods with async methods in Entity interface
+3. Update Transaction interface to async
+4. Remove `with_write_transaction()` helper entirely
+
+### Phase 3: Implementation Updates
+
+1. Implement async methods in EmbeddedEngine (including internal sync versions for hooks)
+2. Implement async methods in RemoteEngine with async sockets
+3. Update HighLevel stores to use async queue
+4. Keep LowLevel storage synchronous (operates on Dbm directly)
+
+### Phase 4: Entity Updates
+
+1. Update Container, Document, Category, Catalogue, Index to use async stores
+2. Ensure all entity operations use async methods internally
+
+### Phase 5: Cleanup
+
+1. Remove AsyncEngine and AsyncEntity wrapper classes
+2. Update all tests to use async methods
+3. Update any application code to use async methods
+
+---
+
+## Files to Modify
+
+### New Files to Create
+
+| File | Purpose |
+|------|---------|
+| `src/Storage/AsyncDbmQueue.vala` | Queue system for async DBM operations |
+| `src/Storage/DbmOperation.vala` | Operation types for the queue |
+
+### Files to Modify
+
+| File | Changes |
+|------|---------|
+| `src/Storage/Dbm.vala` | Add `supports_concurrent_reads` property |
+| `src/Storage/Gdbm/GdbmDbm.vala` | Implement `supports_concurrent_reads` (return false) |
+| `src/Storage/Lmdb/LmdbDbm.vala` | Implement `supports_concurrent_reads` (return true) |
+| `src/Storage/FilesystemDbm.vala` | Implement `supports_concurrent_reads` (return false) |
+| `src/Core/Engine.vala` | Convert all I/O methods to async |
+| `src/Core/Entity.vala` | Convert all I/O methods to async |
+| `src/Core/Transaction.vala` | (if separate) Convert to async |
+| `src/Storage/HighLevel/EntityStore.vala` | Add async methods |
+| `src/Storage/HighLevel/DocumentStore.vala` | Add async methods |
+| `src/Storage/HighLevel/ContainerStore.vala` | Add async methods |
+| `src/Storage/HighLevel/CategoryStore.vala` | Add async methods |
+| `src/Storage/HighLevel/CatalogueStore.vala` | Add async methods |
+| `src/Storage/HighLevel/IndexStore.vala` | Add async methods |
+| `src/Engine/EmbeddedEngine.vala` | Implement async interface methods |
+| `src/Engine/RemoteEngine.vala` | Implement async interface methods with async sockets |
+| `src/Entities/Container.vala` | Use async stores |
+| `src/Entities/Document.vala` | Use async stores |
+| `src/Entities/Category.vala` | Use async stores |
+| `src/Entities/Catalogue.vala` | Use async stores |
+| `src/Entities/Index.vala` | Use async stores |
+| `src/Entities/AbstractEntity.vala` | Update base implementations |
+| `src/Protocol/MessageReader.vala` | Add async read methods |
+| `src/Protocol/MessageWriter.vala` | Add async write methods |
+| `src/meson.build` | Add new files to build |
+
+### Files to Delete (Phase 5)
+
+| File | Reason |
+|------|--------|
+| `src/Core/AsyncEngine.vala` | Replaced by async interface methods |
+| `src/Core/AsyncEntity.vala` | Replaced by async interface methods |
+
+### Test Files to Update
+
+| File | Changes |
+|------|---------|
+| `tests/Storage/GdbmDbmTest.vala` | Add tests for `supports_concurrent_reads` |
+| `tests/Storage/LmdbDbmTest.vala` | Add tests for `supports_concurrent_reads` |
+| `tests/Storage/FilesystemDbmTest.vala` | Add tests for `supports_concurrent_reads` |
+| `tests/Storage/StorageTest.vala` | Add tests for AsyncDbmQueue |
+| `tests/Engine/EmbeddedEngineTest.vala` | Update to use async methods |
+| All other tests | Update to use async methods |
+
+---
+
+## Decisions Made
+
+This section documents the finalized design decisions for the async I/O refactor.
+
+### Decision 1: Query Results - Eager Loading with Arrays
+
+**Decision:** Use `Entity[]` (eager loading) for query results.
+
+**Rationale:**
+- Simpler than implementing async iteration
+- Sufficient for expected data volumes
+- Avoids complexity of custom `AsyncEnumerable<Entity>`
+- Consistent with Vala's async patterns
+
+**Example:**
+```vala
+public abstract async Entity[] query_by_type_async(string type_label) throws EngineError;
+```
+
+### Decision 2: Migration Strategy - Clean Break
+
+**Decision:** Remove all sync methods immediately, update everything in this refactor.
+
+**Rationale:**
+- Project is in development phase
+- No external users to migrate
+- Cleaner codebase without deprecation paths
+- Faster implementation without compatibility layers
+
+**Implications:**
+- No gradual migration period
+- No deprecated method markers needed
+- All code updated in single refactor
+
+### Decision 3: Transaction Helper - Removed
+
+**Decision:** Remove `with_write_transaction()` entirely.
+
+**Rationale:**
+- Vala doesn't support async delegates
+- Cannot pass async callbacks to transaction helper
+- Manual transaction management is clearer
+
+**New Pattern:**
+```vala
+// Old (no longer possible):
+yield engine.with_write_transaction_async(async (tx) => {
+    yield entity.set_entity_property_async("key", value);
+});
+
+// New (manual management):
+try {
+    yield engine.begin_transaction_async();
+    yield entity.set_entity_property_async("key", value);
+    yield engine.commit_async();
+} catch (Error e) {
+    yield engine.rollback_async();
+    throw e;
+}
+```
+
+### Decision 4: Hooks - Remain Synchronous
+
+**Decision:** Keep hooks synchronous, running in the DBM thread.
+
+**Rationale:**
+- Hooks should be fast operations
+- Avoids complexity of async hook management
+- Storage layer (HighLevel/LowLevel) remains synchronous
+- Simpler mental model for hook authors
+
+**Implementation Note:**
+EmbeddedEngine keeps internal sync versions of methods like `get_entity_or_null_sync()` for hook use. These are:
+- Marked `internal` (not public API)
+- Used only by HookManager during hook execution
+- Bypass the async queue for direct Dbm access
+
+### Decision 5: Error Handling - Use `throws` Keyword
+
+**Decision:** Use `throws` keyword for async methods, consistent with existing codebase.
+
+**Rationale:**
+- Vala supports throws in async methods
+- Consistent with existing sync methods
+- Familiar pattern for Vala developers
+- No need for Result types or error callbacks
+
+**Example:**
+```vala
+public async Entity get_entity_async(EntityPath path) throws EngineError;
+public async Entity[] query_by_type_async(string type_label) throws EngineError;
+public async void set_entity_property_async(string name, Element value) throws EngineError;
+```
+
+### Decision 6: Version Compatibility - Clean Break
+
+**Decision:** No compatibility shims needed - project is in development.
+
+**Rationale:**
+- No released versions to support
+- No external users requiring migration path
+- Cleaner implementation without compatibility layers
+
+---
+
+## Summary
+
+This design proposes:
+
+1. **AsyncDbmQueue**: A dedicated thread with priority-based operation queue
+2. **Async Interfaces**: Engine and Entity interfaces with async methods
+3. **Concurrent Read Support**: Property on Dbm for LMDB optimization
+4. **Clean Break Migration**: Immediate removal of sync methods
+5. **Unified API**: No more wrapper classes, async is the default
+6. **Manual Transactions**: No transaction helper, explicit begin/commit/rollback
+7. **Sync Hooks**: Hooks remain synchronous with internal sync methods
+
+The key benefits are:
+- Single thread for DBM operations (no thread explosion)
+- Priority reads for better responsiveness
+- Clean async API without wrapper duplication
+- Support for LMDB's concurrent read capability
+- Simpler codebase without deprecation complexity

+ 111 - 0
plans/hookmanager-batch-fix.md

@@ -0,0 +1,111 @@
+# HookManager Batch Processing Fix
+
+## Problem
+
+Batched document creation is significantly slower than regular inserts due to a double-processing bug in `HookManager.commit_batch()`.
+
+### Performance Impact
+
+| Operation | Regular | Batched | Slowdown |
+|-----------|---------|---------|----------|
+| create_document_small_indexed | 1.95ms | 62.22ms | 32× |
+| create_document_large_indexed | 33.98ms | 128.26ms | 4× |
+
+## Root Cause
+
+In [`HookManager.commit_batch()`](../src/Engine/HookManager.vala:716-730):
+
+```vala
+public void commit_batch() {
+    // Execute batch for batched handlers
+    execute_batch_for_handlers((!) _current_batch);  // Step 1
+    
+    // Also execute individual events for non-batched handlers
+    ((!) _current_batch).execute(this);  // Step 2 - BUG: executes ALL handlers
+}
+```
+
+The comment says "non-batched handlers" but `HookBatch.execute()` calls `notify_entity_change_immediate()` which iterates ALL handlers including `BatchedHookHandler` instances.
+
+## Solution
+
+Modify the `notify_*_immediate()` methods to skip handlers that support batch processing, since they already processed events in `execute_batch_for_handlers()`.
+
+### Changes Required
+
+#### 1. Fix `notify_entity_change_immediate()` (around line 905)
+
+**Before:**
+```vala
+private void notify_entity_change_immediate(Core.Entity entity, EntityChangeType change_type) {
+    foreach (var handler in _handlers) {
+        try {
+            handler.on_entity_change(entity, change_type);
+        } catch (Error e) {
+            warning("Hook handler threw error for %s: %s", entity.path.to_string(), e.message);
+        }
+    }
+}
+```
+
+**After:**
+```vala
+private void notify_entity_change_immediate(Core.Entity entity, EntityChangeType change_type) {
+    foreach (var handler in _handlers) {
+        // Skip batched handlers - they already processed events in execute_batch_for_handlers()
+        if (handler is BatchedHookHandler && ((BatchedHookHandler) handler).supports_batch) {
+            continue;
+        }
+        try {
+            handler.on_entity_change(entity, change_type);
+        } catch (Error e) {
+            warning("Hook handler threw error for %s: %s", entity.path.to_string(), e.message);
+        }
+    }
+}
+```
+
+#### 2. Fix `notify_property_change_immediate()` (around line 919)
+
+**Before:**
+```vala
+private void notify_property_change_immediate(Core.Entity entity, string property_name, Value? old_value, Value? new_value) {
+    foreach (var handler in _handlers) {
+        try {
+            handler.on_property_change(entity, property_name, old_value, new_value);
+        } catch (Error e) {
+            warning("Hook handler threw error for %s.%s: %s", entity.path.to_string(), property_name, e.message);
+        }
+    }
+}
+```
+
+**After:**
+```vala
+private void notify_property_change_immediate(Core.Entity entity, string property_name, Value? old_value, Value? new_value) {
+    foreach (var handler in _handlers) {
+        // Skip batched handlers - they already processed events in execute_batch_for_handlers()
+        if (handler is BatchedHookHandler && ((BatchedHookHandler) handler).supports_batch) {
+            continue;
+        }
+        try {
+            handler.on_property_change(entity, property_name, old_value, new_value);
+        } catch (Error e) {
+            warning("Hook handler threw error for %s.%s: %s", entity.path.to_string(), property_name, e.message);
+        }
+    }
+}
+```
+
+## Expected Outcome
+
+After the fix:
+- Batched handlers process events exactly once via `on_batch_change()`
+- Non-batched handlers process events individually via `on_entity_change()` / `on_property_change()`
+- Batched inserts should be **faster** than regular inserts (as intended)
+
+## Verification
+
+1. Run all tests: `meson test -C builddir`
+2. Run performance benchmarks: `builddir/tools/implexus-perf/implexus-perf`
+3. Compare batched vs regular insert times

+ 93 - 0
plans/hookmanager-batch-optimize.md

@@ -0,0 +1,93 @@
+# HookManager Batch Optimization Plan
+
+## Problem
+
+Even after fixing the double-processing bug, batched operations are still slower than individual inserts:
+
+| Operation | Individual | Batched | Ratio |
+|-----------|------------|---------|-------|
+| create_document_small | 4.19ms | 238.51ms per batch (23.85ms/doc) | 5.7× slower |
+| create_document_large | 42.92ms | 452.75ms per batch (45.27ms/doc) | 1.05× slower |
+
+## Root Cause Analysis
+
+In `commit_batch()`, even when ALL handlers are batched handlers (Category, Catalogue, Index all implement `BatchedHookHandler` with `supports_batch = true`), the code still calls `batch.execute()`:
+
+```vala
+public void commit_batch() {
+    // Execute batch for batched handlers
+    execute_batch_for_handlers((!) _current_batch);  // ← Correct: calls on_batch_change()
+    
+    // Also execute individual events for non-batched handlers
+    ((!) _current_batch).execute(this);  // ← WASTEFUL when no non-batched handlers!
+    
+    _current_batch = null;
+    _batch_mode = false;
+}
+```
+
+### What `batch.execute()` does (unnecessarily when all handlers are batched):
+
+1. **`get_consolidated_events()`** - Creates new Vector, Dictionary, iterates all events
+2. **For each consolidated event:**
+   - Calls `engine.get_entity_or_null()` - **Storage lookup!**
+   - Calls `notify_entity_change_from_event()` → `notify_entity_change_immediate()`
+   - Iterates ALL handlers just to skip them (they're all batched)
+3. **`execute_property_changes()`** - Iterates property changes, calls handlers that skip
+
+### Why This is Expensive
+
+For10 documents with2 properties each:
+- 30 events recorded
+- 10 entity lookups from storage (expensive!)
+- 30 handler iterations (all skipped, but still iterated)
+
+## Solution
+
+Modify `commit_batch()` to check if there are any non-batched handlers before calling `batch.execute()`:
+
+```vala
+public void commit_batch() {
+    if (_current_batch == null) {
+        return;
+    }
+    
+    // Execute batch for batched handlers
+    execute_batch_for_handlers((!) _current_batch);
+    
+    // Only execute individual events if there are non-batched handlers
+    if (has_non_batched_handlers()) {
+        ((!) _current_batch).execute(this);
+    }
+    
+    _current_batch = null;
+    _batch_mode = false;
+}
+
+private bool has_non_batched_handlers() {
+    foreach (var handler in _handlers) {
+        if (!(handler is BatchedHookHandler)) {
+            return true;
+        }
+        var batched = (BatchedHookHandler) handler;
+        if (!batched.supports_batch) {
+            return true;
+        }
+    }
+    return false;
+}
+```
+
+## Expected Outcome
+
+After fix:
+- `batch.execute()` is skipped entirely when all handlers support batching
+- No unnecessary entity lookups
+- No unnecessary handler iterations
+- Batched inserts should be **faster** than individual inserts (single transaction vs N transactions)
+
+## Verification
+
+1. Run tests: `meson test -C builddir`
+2. Run benchmarks: `builddir/tools/implexus-perf/implexus-perf gdbm:///tmp/perf-test`
+3. Compare batched vs individual insert times

+ 455 - 0
plans/safepath-design.md

@@ -0,0 +1,455 @@
+# SafePath API Design
+
+## Overview
+
+This document describes the design for a `SafePath` API that provides a succinct, variadic constructor for creating URL-encoded entity paths in Implexus.
+
+## Analysis of Current Path System
+
+### Current Implementation: EntityPath
+
+The existing [`EntityPath`](../src/Core/EntityPath.vala) class provides:
+
+1. **Multiple constructors**:
+   - `EntityPath(string path_string)` - parses a path string
+   - `EntityPath.root()` - creates the root path
+   - `EntityPath.from_segments(Enumerable<string> segments)` - creates from segment collection
+   - `EntityPath.with_child(EntityPath parent, string name)` - creates child path
+
+2. **Current escaping mechanism** (tilde-based, similar to RFC 6901 JSON Pointer):
+   ```
+   ~  → ~7e
+   /  → ~2f
+   \  → ~5c
+   \0 → ~00
+   ```
+
+3. **Limitations**:
+   - No variadic constructor for inline path building
+   - Requires string concatenation or multiple method calls for multi-segment paths
+   - Tilde escaping is non-standard and may confuse users
+
+### Usage Patterns from Tests
+
+From [`EntityPathTest.vala`](../tests/Core/EntityPathTest.vala), typical usage includes:
+
+```vala
+// Current verbose patterns
+var root = new EntityPath.root();
+var users = root.append_child("users");
+var john = users.append_child("john");
+
+// Or string-based
+var path = new EntityPath("/users/john/profile");
+```
+
+## Proposed SafePath API
+
+### Design Goals
+
+1. **Succinct API**: Single constructor call with variadic segments
+2. **URL Encoding**: Automatic percent-encoding of each segment using standard RFC 3986
+3. **Integration**: Produces `EntityPath` instances for seamless integration
+4. **Safety**: Handles special characters, empty segments, and edge cases
+
+### API Design
+
+```vala
+namespace Implexus.Core {
+
+/**
+ * SafePath provides a convenient factory for creating EntityPath instances
+ * with automatic URL encoding of path segments.
+ * 
+ * Example usage:
+ * {{{
+ * var path = SafePath.path("users", "john doe", "profile");
+ * // Creates EntityPath for /users/john%20doe/profile
+ * 
+ * var root = SafePath.path();  // Root path
+ * var simple = SafePath.path("catalogue");  // Single segment
+ * }}}
+ */
+public class SafePath : Object {
+
+    /**
+     * Characters that MUST be encoded in path segments.
+     * Based on RFC 3986 with additional safety characters.
+     */
+    private const string RESERVED_CHARS = "!*'();:@&=+$,/?#[]%\"\\<>^`{|}~";
+    
+    /**
+     * Creates an EntityPath from variadic segments, URL-encoding each segment.
+     * 
+     * @param first_segment The first path segment (required to start variadic args)
+     * @param ... Additional segments, terminated by null
+     * @return A new EntityPath with encoded segments
+     * 
+     * Example:
+     * {{{
+     * var path = SafePath.path("users", "john", "profile", null);
+     * // Result: /users/john/profile
+     * 
+     * var encoded = SafePath.path("data", "2024/01", "file name", null);
+     * // Result: /data/2024%2F01/file%20name
+     * }}}
+     */
+    public static EntityPath path(string? first_segment, ...) {
+        var segments = new Invercargill.DataStructures.Vector<string>();
+        
+        if (first_segment == null) {
+            return new EntityPath.root();
+        }
+        
+        // Add first segment
+        segments.add(encode_segment(first_segment));
+        
+        // Process variadic arguments
+        va_list args = va_list();
+        while (true) {
+            string? segment = args.arg();
+            if (segment == null) {
+                break;
+            }
+            segments.add(encode_segment(segment));
+        }
+        
+        return new EntityPath.from_segments(segments.as_enumerable());
+    }
+    
+    /**
+     * Creates an EntityPath from an array of segments.
+     * Alternative API for when array-based construction is preferred.
+     * 
+     * @param segments Array of path segments
+     * @return A new EntityPath with encoded segments
+     */
+    public static EntityPath from_array(string[] segments) {
+        if (segments.length == 0) {
+            return new EntityPath.root();
+        }
+        
+        var encoded_segments = new Invercargill.DataStructures.Vector<string>();
+        foreach (var segment in segments) {
+            encoded_segments.add(encode_segment(segment));
+        }
+        
+        return new EntityPath.from_segments(encoded_segments.as_enumerable());
+    }
+    
+    /**
+     * URL-encodes a path segment according to RFC 3986.
+     * 
+     * Encodes:
+     * - All reserved URI characters
+     * - Space (as %20, not +)
+     * - Non-ASCII characters (as percent-encoded UTF-8)
+     * - Control characters
+     * 
+     * @param segment The raw segment to encode
+     * @return The URL-encoded segment
+     */
+    public static string encode_segment(string segment) {
+        if (segment.length == 0) {
+            return "";
+        }
+        
+        // Use GLib's URI escaping with custom reserved set
+        // GLib.Uri.escape_string encodes space as %20 by default
+        return Uri.escape_string(segment, RESERVED_CHARS, true);
+    }
+    
+    /**
+     * Decodes a URL-encoded path segment.
+     * 
+     * @param encoded The encoded segment
+     * @return The decoded segment
+     * @throws EntityError.INVALID_PATH if the segment contains invalid percent-encoding
+     */
+    public static string decode_segment(string encoded) throws EntityError {
+        string? decoded = Uri.unescape_string(encoded);
+        if (decoded == null) {
+            throw new EntityError.INVALID_PATH(
+                "Invalid percent-encoding in path segment: %s".printf(encoded)
+            );
+        }
+        return decoded;
+    }
+}
+
+} // namespace Implexus.Core
+```
+
+### Alternative: EntityPath Extension
+
+An alternative design extends EntityPath directly with static factory methods:
+
+```vala
+// Add to EntityPath class
+public partial class EntityPath {
+    
+    /**
+     * Creates an EntityPath from variadic segments with automatic URL encoding.
+     * Terminate with null.
+     * 
+     * Example:
+     * {{{
+     * var path = EntityPath.from_parts("users", "john doe", null);
+     * }}}
+     */
+    public static EntityPath from_parts(string? first_segment, ...) {
+        var segments = new Invercargill.DataStructures.Vector<string>();
+        
+        if (first_segment == null) {
+            return new EntityPath.root();
+        }
+        
+        segments.add(SafePath.encode_segment(first_segment));
+        
+        va_list args = va_list();
+        while (true) {
+            string? segment = args.arg();
+            if (segment == null) break;
+            segments.add(SafePath.encode_segment(segment));
+        }
+        
+        return new EntityPath.from_segments(segments.as_enumerable());
+    }
+}
+```
+
+## URL Encoding Strategy
+
+### Characters to Encode
+
+Following RFC 3986 with additional safety considerations:
+
+| Category | Characters | Encoding Example |
+|----------|------------|------------------|
+| Space | ` ` | `%20` |
+| Reserved | `! * ' ( ) ; : @ & = + $ , / ? # [ ]` | `%21`, `%2A`, etc. |
+| Percent | `%` | `%25` |
+| Control | `\x00-\x1F` | `%00`-%1F` |
+| Non-ASCII | Unicode chars | UTF-8 percent-encoded |
+
+### GLib.Uri Methods
+
+Use these GLib methods for encoding/decoding:
+
+```vala
+// Encoding
+string encoded = Uri.escape_string(segment, RESERVED_CHARS, true);
+
+// Decoding  
+string? decoded = Uri.unescape_string(encoded);
+```
+
+**Note**: `Uri.escape_string()` with `escape_reserved = true` encodes all reserved characters. We pass a custom reserved set to ensure consistent behavior.
+
+### Why Not Tilde Escaping?
+
+The current EntityPath uses tilde escaping (`~2f` for `/`). SafePath uses standard URL encoding (`%2F` for `/`) because:
+
+1. **Standard**: RFC 3986 is universally understood
+2. **Tool Support**: All HTTP tools, debuggers, and libraries handle it
+3. **Debugging**: `%XX` format is immediately recognizable
+4. **Interoperability**: Works with web APIs and external systems
+
+## Edge Cases and Error Handling
+
+### Empty Segments
+
+```vala
+// Empty string segment - allowed but produces empty encoded result
+var path = SafePath.path("users", "", "profile", null);
+// Result: /users//profile (double slash normalized by EntityPath parsing)
+
+// Recommendation: Validate segments before calling SafePath
+```
+
+### Null Handling
+
+```vala
+// null terminates the variadic list
+var path = SafePath.path("a", "b", null, "c", null);
+// Result: /a/b (stops at first null)
+```
+
+### Special Characters
+
+```vala
+// Slashes in segment names are encoded
+var path = SafePath.path("data", "2024/01/15", "log", null);
+// Result: /data/2024%2F01%2F15/log
+
+// Percent signs are double-encoded safely
+var path = SafePath.path("query", "100%", null);
+// Result: /query/100%25
+```
+
+### Unicode Handling
+
+```vala
+// Unicode characters are UTF-8 percent-encoded
+var path = SafePath.path("users", "日本語", null);
+// Result: /users/%E6%97%A5%E6%9C%AC%E8%AA%9E
+```
+
+## Integration with EntityPath
+
+### Conversion Flow
+
+```mermaid
+flowchart LR
+    A[Raw segments] --> B[SafePath.path]
+    B --> C[URL encode each segment]
+    C --> D[EntityPath.from_segments]
+    D --> E[EntityPath instance]
+    
+    E --> F[to_string: escaped display]
+    E --> G[to_key: raw storage key]
+```
+
+### Storage Considerations
+
+The EntityPath stores raw (unencoded) segments internally. The encoding happens at construction time:
+
+```vala
+// Input: "john doe" (contains space)
+var path = SafePath.path("users", "john doe", null);
+
+// Internal storage: segments = ["users", "john doe"]
+// to_string(): "/users/john%20doe" (URL encoded for display)
+// to_key(): "users/john doe" (raw for storage keys)
+```
+
+**Important**: This design stores raw segments, not encoded ones. This matches the current EntityPath behavior where escaping is only applied in `to_string()`.
+
+### Revised Design: Store Encoded Segments
+
+For true safety, we should store the encoded segments:
+
+```vala
+public static EntityPath path(string? first_segment, ...) {
+    var segments = new Invercargill.DataStructures.Vector<string>();
+    
+    if (first_segment == null) {
+        return new EntityPath.root();
+    }
+    
+    // Store ENCODED segments
+    segments.add(encode_segment(first_segment));
+    
+    va_list args = va_list();
+    while (true) {
+        string? segment = args.arg();
+        if (segment == null) break;
+        segments.add(encode_segment(segment));
+    }
+    
+    return new EntityPath.from_segments(segments.as_enumerable());
+}
+```
+
+With this approach:
+- `to_string()`: `/users/john%20doe` (segments already encoded)
+- `to_key()`: `users/john%20doe` (encoded in storage too)
+
+This ensures special characters never appear in storage keys.
+
+## Example Usage Patterns
+
+### Basic Path Construction
+
+```vala
+// Simple path
+var catalogue = SafePath.path("catalogue", null);
+// EntityPath: /catalogue
+
+// Nested path
+var document = SafePath.path("catalogue", "category", "document", null);
+// EntityPath: /catalogue/category/document
+```
+
+### Paths with Special Characters
+
+```vala
+// Spaces
+var user_path = SafePath.path("users", "John Smith", null);
+// EntityPath: /users/John%20Smith
+
+// Slashes in names
+var date_path = SafePath.path("logs", "2024/01/15", null);
+// EntityPath: /logs/2024%2F01%2F15
+
+// Query strings (common in document IDs)
+var doc = SafePath.path("docs", "id=123&type=pdf", null);
+// EntityPath: /docs/id%3D123%26type%3Dpdf
+```
+
+### Array-Based Construction
+
+```vala
+// When segments are already in an array
+string[] parts = { "users", user_id, "settings" };
+var settings_path = SafePath.from_array(parts);
+```
+
+### Integration with Engine Operations
+
+```vala
+// Creating documents with safe paths
+public async Document create_document(Engine engine, string catalogue, 
+                                       string category, string doc_name) throws Error {
+    var path = SafePath.path(catalogue, category, doc_name, null);
+    return yield engine.create_document(path);
+}
+
+// Building index paths
+var index_path = SafePath.path("catalogue", "products", "indexes", "price", null);
+```
+
+## Implementation Checklist
+
+When implementing this design:
+
+1. [ ] Create `SafePath` class in `src/Core/SafePath.vala`
+2. [ ] Implement `path()` variadic method with null terminator
+3. [ ] Implement `from_array()` array-based method
+4. [ ] Implement `encode_segment()` using `GLib.Uri.escape_string`
+5. [ ] Implement `decode_segment()` using `GLib.Uri.unescape_string`
+6. [ ] Add unit tests in `tests/Core/SafePathTest.vala`:
+   - [ ] Basic path construction
+   - [ ] URL encoding verification
+   - [ ] Special character handling
+   - [ ] Unicode handling
+   - [ ] Empty segment handling
+   - [ ] Null terminator handling
+   - [ ] Round-trip encode/decode
+7. [ ] Update `src/meson.build` to include new file
+8. [ ] Add integration examples to documentation
+
+## Open Questions
+
+1. **Segment Validation**: Should SafePath reject empty segments, or pass them through?
+   - Recommendation: Pass through, let EntityPath handle normalization
+
+2. **Encoding Storage**: Should segments be stored encoded or raw?
+   - Recommendation: Store encoded for consistency and safety
+
+3. **Error on Invalid Input**: What should happen with null bytes in segments?
+   - Recommendation: Encode as `%00` (already handled by URL encoding)
+
+4. **API Style**: Static factory class vs. EntityPath extension method?
+   - Recommendation: Start with `SafePath` class, add `EntityPath.from_parts()` as convenience alias
+
+## Summary
+
+The `SafePath` API provides:
+
+- **Succinct variadic construction**: `SafePath.path("a", "b", "c", null)`
+- **Automatic URL encoding**: Standard RFC 3986 percent-encoding
+- **Seamless integration**: Returns `EntityPath` instances
+- **Edge case handling**: Proper handling of special characters, unicode, and empty segments
+
+This design enables safer, more readable path construction while maintaining full compatibility with the existing EntityPath system.

+ 403 - 0
plans/storage-migration-plan.md

@@ -0,0 +1,403 @@
+# Storage Layer Migration Plan
+
+## Overview
+
+Migrate from legacy `BasicStorage` + `IndexManager` to the new `HighLevel` + `LowLevel` architecture while preserving all performance optimizations.
+
+## Current State Analysis
+
+### Legacy Components (to be removed)
+- [`BasicStorage`](../src/Storage/Storage.vala) - High-level storage interface
+- [`IndexManager`](../src/Storage/IndexManager.vala) - Index operations with performance optimizations
+
+### New Architecture (to be completed)
+- **LowLevel**: Single-responsibility storage classes per key prefix
+- **HighLevel**: Entity-specific facades composing LowLevel stores
+
+### Performance Optimizations in IndexManager (must preserve)
+
+| Optimization | Location | Description |
+|--------------|----------|-------------|
+| HashSet dedup on load | `load_string_set()` L764-770 | Uses HashSet during deserialization to deduplicate while preserving order |
+| HashSet for membership checks | `add_to_ngram_index()` L407-408 | Creates HashSet for O(1) contains() checks instead of O(n) Vector.contains() |
+| HashSet for remove | `remove_from_ngram_index()` L428-429 | HashSet for efficient membership test before rebuild |
+| Batch add with HashSet | `add_to_ngram_index_batch()` L446-456 | Tracks changes, only saves if modified, uses HashSet for dedup |
+| Batch remove with HashSet | `remove_from_ngram_index_batch()` L463-474 | HashSet for values, rebuilds vector without matches |
+| Set members with dedup | `set_category_members()` L205-213 | Uses HashSet to deduplicate input enumerable |
+| Batch reverse index | `add_bigrams_reverse_batch()` L564-570 | Dictionary-based batch operations |
+| Trigram batch ops | `add_trigrams_batch()` L622-627 | Dictionary-based batch trigram operations |
+
+---
+
+## Migration Plan
+
+### Phase 1: Add Batch Operations to LowLevel Classes
+
+Add the missing batch methods and HashSet optimizations to LowLevel storage classes.
+
+#### 1.1 Update `CategoryIndexStorage`
+
+**File**: [`src/Storage/LowLevel/CategoryIndexStorage.vala`](../src/Storage/LowLevel/CategoryIndexStorage.vala)
+
+**Changes**:
+- [ ] Add `set_members()` method with HashSet deduplication (from IndexManager L205-213)
+- [ ] Optimize `add_member()` to use HashSet for O(1) membership check
+- [ ] Optimize `remove_member()` to use HashSet for O(1) membership check
+
+**Current vs Optimized**:
+```vala
+// Current (O(n) contains check)
+public void add_member(string category_path, string doc_path) throws StorageError {
+    string key = members_key(category_path);
+    var members = load_string_set(key);
+    if (!members.contains(doc_path)) {  // O(n) operation
+        members.add(doc_path);
+        save_string_set(key, members);
+    }
+}
+
+// Optimized (O(1) contains check)
+public void add_member(string category_path, string doc_path) throws StorageError {
+    string key = members_key(category_path);
+    var members = load_string_set(key);
+    var members_hash = new Invercargill.DataStructures.HashSet<string>();
+    foreach (var m in members) members_hash.add(m);
+    
+    if (!members_hash.has(doc_path)) {  // O(1) operation
+        members.add(doc_path);
+        save_string_set(key, members);
+    }
+}
+```
+
+#### 1.2 Update `CatalogueIndexStorage`
+
+**File**: [`src/Storage/LowLevel/CatalogueIndexStorage.vala`](../src/Storage/LowLevel/CatalogueIndexStorage.vala)
+
+**Changes**:
+- [ ] Add `set_group_members()` method with HashSet deduplication
+- [ ] Optimize `add_to_group()` with HashSet membership check
+- [ ] Optimize `remove_from_group()` with HashSet membership check
+
+#### 1.3 Update `TextIndexStorage` (Critical - most complex)
+
+**File**: [`src/Storage/LowLevel/TextIndexStorage.vala`](../src/Storage/LowLevel/TextIndexStorage.vala)
+
+**Changes**:
+- [ ] Optimize `add_trigram()` with HashSet membership check (from IndexManager L404-414)
+- [ ] Optimize `remove_trigram()` with HashSet membership check (from IndexManager L425-440)
+- [ ] Add `add_trigram_batch()` method (from IndexManager L442-457)
+- [ ] Add `remove_trigram_batch()` method (from IndexManager L459-475)
+- [ ] Add `add_bigram_mapping_batch()` method (from IndexManager L564-570)
+- [ ] Add `add_unigram_mapping_batch()` method (from IndexManager L614-620)
+- [ ] Add `add_trigrams_batch()` dictionary method (from IndexManager L622-628)
+- [ ] Add `remove_trigrams_batch()` dictionary method (from IndexManager L630-636)
+- [ ] Update `load_string_set()` to use HashSet for deduplication (from IndexManager L747-779)
+
+#### 1.4 Update `TypeIndexStorage`
+
+**File**: [`src/Storage/LowLevel/TypeIndexStorage.vala`](../src/Storage/LowLevel/TypeIndexStorage.vala)
+
+**Changes**:
+- [ ] Optimize `add_document()` with HashSet membership check
+- [ ] Optimize `remove_document()` with HashSet membership check
+- [ ] Update `load_string_set()` to use HashSet for deduplication
+
+---
+
+### Phase 2: Add Batch Methods to HighLevel Stores
+
+Expose the new LowLevel batch methods through the HighLevel facades.
+
+#### 2.1 Update `CategoryStore`
+
+**File**: [`src/Storage/HighLevel/CategoryStore.vala`](../src/Storage/HighLevel/CategoryStore.vala)
+
+**Changes**:
+- [ ] Add `set_members()` facade method
+- [ ] Ensure all LowLevel optimizations are properly delegated
+
+#### 2.2 Update `CatalogueStore`
+
+**File**: [`src/Storage/HighLevel/CatalogueStore.vala`](../src/Storage/HighLevel/CatalogueStore.vala)
+
+**Changes**:
+- [ ] Add `set_group_members()` facade method
+- [ ] Ensure all LowLevel optimizations are properly delegated
+
+#### 2.3 Update `IndexStore`
+
+**File**: [`src/Storage/HighLevel/IndexStore.vala`](../src/Storage/HighLevel/IndexStore.vala)
+
+**Changes**:
+- [ ] Add `add_trigram_batch()` facade method
+- [ ] Add `remove_trigram_batch()` facade method
+- [ ] Add `add_bigram_mappings_batch()` facade method
+- [ ] Add `add_unigram_mappings_batch()` facade method
+- [ ] Add `add_trigrams_batch()` dictionary method
+- [ ] Add `remove_trigrams_batch()` dictionary method
+
+---
+
+### Phase 3: Migrate Entity Classes
+
+Update Category, Catalogue, and Index entities to use HighLevel stores instead of IndexManager.
+
+#### 3.1 Migrate `Category` Entity
+
+**File**: [`src/Entities/Category.vala`](../src/Entities/Category.vala)
+
+**Changes**:
+- [ ] Replace `get_index_manager()` calls with `get_category_store()` calls
+- [ ] Update `populate_index()` to use `CategoryStore.set_members()`
+- [ ] Update `add_document()` to use `CategoryStore.add_member()`
+- [ ] Update `remove_document()` to use `CategoryStore.remove_member()`
+- [ ] Update `contains_document()` to use `CategoryStore.get_members()`
+- [ ] Update `batch_update_members()` to use CategoryStore methods
+- [ ] Update `clear_index()` to use `CategoryStore.clear_index()`
+
+**Example migration**:
+```vala
+// Before (using IndexManager)
+var index_manager = get_index_manager();
+if (index_manager != null) {
+    ((!) index_manager).add_to_category(_path.to_string(), doc_path);
+}
+
+// After (using CategoryStore)
+var store = get_category_store();
+if (store != null) {
+    ((!) store).add_member(_path, doc_path);
+}
+```
+
+#### 3.2 Migrate `Catalogue` Entity
+
+**File**: [`src/Entities/Catalogue.vala`](../src/Entities/Catalogue.vala)
+
+**Changes**:
+- [ ] Replace `get_index_manager()` calls with `get_catalogue_store()` calls
+- [ ] Update all index operations to use CatalogueStore methods
+- [ ] Update batch operations to use CatalogueStore methods
+
+#### 3.3 Migrate `Index` Entity
+
+**File**: [`src/Entities/Index.vala`](../src/Entities/Index.vala)
+
+**Changes**:
+- [ ] Replace `get_index_manager()` calls with `get_index_store()` calls
+- [ ] Update trigram operations to use IndexStore methods
+- [ ] Update batch operations to use IndexStore batch methods
+- [ ] Update search methods to use IndexStore for lookups
+
+---
+
+### Phase 4: Update Engine Configuration
+
+#### 4.1 Update `EngineConfiguration`
+
+**File**: [`src/Engine/EngineConfiguration.vala`](../src/Engine/EngineConfiguration.vala)
+
+**Changes**:
+- [ ] Remove `index_manager` property (L171)
+- [ ] Add store accessors or remove if handled by EmbeddedEngine
+
+#### 4.2 Update `Core.Engine` interface
+
+**File**: [`src/Core/Engine.vala`](../src/Core/Engine.vala)
+
+**Changes**:
+- [ ] Remove `index_manager` property (L215-216)
+
+#### 4.3 Update `EmbeddedEngine`
+
+**File**: [`src/Engine/EmbeddedEngine.vala`](../src/Engine/EmbeddedEngine.vala)
+
+**Changes**:
+- [ ] Remove `_index_manager` field (L67)
+- [ ] Remove IndexManager initialization (L189)
+- [ ] Remove `_configuration.index_manager` assignment (L190)
+- [ ] Keep HighLevel store initialization (L211-217)
+- [ ] Expose stores via properties (already done L121-146)
+
+---
+
+### Phase 5: Update Remaining BasicStorage Usage
+
+#### 5.1 Update `EngineFactory`
+
+**File**: [`src/Engine/EngineFactory.vala`](../src/Engine/EngineFactory.vala)
+
+**Changes**:
+- [ ] Replace `BasicStorage` with direct Dbm + HighLevel stores
+- [ ] Or keep BasicStorage for simple cases, use stores for indexed entities
+
+#### 5.2 Update `RemoteEngine`
+
+**File**: [`src/Engine/RemoteEngine.vala`](../src/Engine/RemoteEngine.vala)
+
+**Changes**:
+- [ ] Remove placeholder BasicStorage if no longer needed
+- [ ] Or update to use new architecture
+
+#### 5.3 Update `Server`
+
+**File**: [`src/Server/Server.vala`](../src/Server/Server.vala)
+
+**Changes**:
+- [ ] Update storage initialization to use new architecture
+
+---
+
+### Phase 6: Remove Legacy Code
+
+#### 6.1 Remove Files
+- [ ] Delete `src/Storage/Storage.vala` (BasicStorage class)
+- [ ] Delete `src/Storage/IndexManager.vala`
+
+#### 6.2 Update meson.build
+
+**File**: [`src/meson.build`](../src/meson.build)
+
+**Changes**:
+- [ ] Remove `'Storage/Storage.vala'` from storage_sources (L35)
+- [ ] Remove `'Storage/IndexManager.vala'` from storage_sources (L36)
+- [ ] Remove "Legacy storage (deprecated, will be removed)" comment (L34)
+
+---
+
+### Phase 7: Testing
+
+#### 7.1 Add Unit Tests for LowLevel Classes
+
+**File**: `tests/Storage/LowLevelStorageTest.vala` (new)
+
+- [ ] Test `CategoryIndexStorage` with HashSet optimizations
+- [ ] Test `CatalogueIndexStorage` with HashSet optimizations
+- [ ] Test `TextIndexStorage` batch operations
+- [ ] Test `TypeIndexStorage` with HashSet optimizations
+
+#### 7.2 Add Unit Tests for HighLevel Classes
+
+**File**: `tests/Storage/HighLevelStorageTest.vala` (new)
+
+- [ ] Test `CategoryStore` facade
+- [ ] Test `CatalogueStore` facade
+- [ ] Test `IndexStore` batch methods
+
+#### 7.3 Update Existing Tests
+
+- [ ] Update `StorageTest.vala` to remove BasicStorage tests
+- [ ] Run full test suite to verify no regressions
+
+#### 7.4 Performance Verification
+
+- [ ] Run performance benchmarks before migration
+- [ ] Run performance benchmarks after migration
+- [ ] Verify no performance regression
+
+---
+
+## Key Code Patterns to Preserve
+
+### Pattern 1: HashSet for O(1) Membership Checks
+
+```vala
+// When checking if item exists before add/remove
+var set_hash = new Invercargill.DataStructures.HashSet<string>();
+foreach (var item in set) set_hash.add(item);
+
+if (!set_hash.has(new_item)) {  // O(1) instead of O(n)
+    set.add(new_item);
+    save_string_set(key, set);
+}
+```
+
+### Pattern 2: HashSet Deduplication on Load
+
+```vala
+// When deserializing, deduplicate while preserving order
+var result = new Invercargill.DataStructures.Vector<string>();
+var hash_set = new Invercargill.DataStructures.HashSet<string>();
+
+foreach (var item in array) {
+    if (!item.is_null()) {
+        string value = item.as<string>();
+        if (!hash_set.has(value)) {  // Prevent duplicates
+            hash_set.add(value);
+            result.add(value);
+        }
+    }
+}
+```
+
+### Pattern 3: Change Tracking for Batch Operations
+
+```vala
+// Only save if changes were made
+bool changed = false;
+var existing_hash = new Invercargill.DataStructures.HashSet<string>();
+foreach (var ex in existing) existing_hash.add(ex);
+
+foreach (var val in values) {
+    if (!existing_hash.has(val)) {
+        existing_hash.add(val);
+        existing.add(val);
+        changed = true;
+    }
+}
+if (changed) save_string_set(key, existing);  // Only save if modified
+```
+
+### Pattern 4: Dictionary-based Batch Operations
+
+```vala
+// Process multiple keys in batch
+public void add_trigrams_batch(string index_path, 
+    Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> additions) 
+    throws StorageError {
+    foreach (var trigram in additions.keys) {
+        Invercargill.DataStructures.Vector<string> docs;
+        additions.try_get(trigram, out docs);
+        add_to_ngram_index_batch(index_path, "tri", trigram, docs);
+    }
+}
+```
+
+---
+
+## Risk Assessment
+
+| Risk | Impact | Mitigation |
+|------|--------|------------|
+| Performance regression | High | Benchmark before/after, preserve HashSet patterns |
+| Data corruption | Critical | Same key prefixes used, no data migration needed |
+| API breakage | Medium | HighLevel stores already exist, just need to use them |
+| Test coverage | Medium | Add tests for LowLevel classes before migration |
+
+---
+
+## Estimated Effort
+
+| Phase | Complexity |
+|-------|------------|
+| Phase 1: LowLevel optimizations | Medium - careful pattern copying |
+| Phase 2: HighLevel batch methods | Low - simple facades |
+| Phase 3: Entity migration | Medium - many call sites |
+| Phase 4: Engine configuration | Low - few changes |
+| Phase 5: Remaining usage | Low - few files |
+| Phase 6: Remove legacy | Low - delete files |
+| Phase 7: Testing | Medium - comprehensive testing |
+
+---
+
+## Execution Order
+
+1. **Phase 1** first - ensures LowLevel classes have all optimizations
+2. **Phase 2** second - exposes optimizations through facades
+3. **Phase 7.1-7.2** - test new methods before using them
+4. **Phase 3** - migrate entities to use new stores
+5. **Phase 7.3** - run existing tests
+6. **Phase 4-5** - update engine and remaining code
+7. **Phase 7.4** - performance verification
+8. **Phase 6** - remove legacy code last

+ 500 - 0
plans/virtual-entity-resolution-design.md

@@ -0,0 +1,500 @@
+# Virtual Entity Resolution Design
+
+## Problem Statement
+
+Catalogue groups, Category members, and Index results are "virtual" entities that exist in indexes but cannot be looked up directly by path. For example:
+
+```vala
+// This works:
+var catalogue = yield engine.get_entity_async(EntityPath.parse("/spry/users/users/by_username"));
+var group = yield catalogue.get_child_async("testuser");
+
+// This fails with ENTITY_NOT_FOUND:
+var group = yield engine.get_entity_or_null_async(EntityPath.parse("/spry/users/users/by_username/testuser"));
+```
+
+The diagnosis correctly identified that:
+- Catalogue config exists: `catcfg:/spry/users/users/by_username` ✓
+- Catalogue group index contains members ✓
+- BUT: No `entity:/spry/users/users/by_username/testuser` file exists ✗
+
+This is a **design gap** - virtual children of indexed entities should be resolvable by path just like children of containers.
+
+## Design Goals
+
+1. **Path Transparency**: `get_entity_async("/catalogue/group")` should work the same as `catalogue.get_child_async("group")`
+2. **Performance**: No full path traversal needed - only check immediate parent for indexed entity types
+3. **Consistency**: Category, Catalogue, and Index all support direct path lookup for their virtual children
+4. **Backward Compatibility**: Existing code continues to work
+
+## Key Insight
+
+**Only Documents can be children of Category, Catalogue, and Index entities.**
+
+This means:
+- No recursive traversal is needed - we only need to check one level up
+- The parent path tells us everything we need to know about how to resolve the child
+- We can determine resolution strategy based solely on the parent's entity type
+
+## Entity Type Resolution Matrix
+
+| Parent Type | Child Storage | Lookup Method |
+|-------------|---------------|---------------|
+| Container | Persistent (`entity:` + `children:`) | Direct entity lookup |
+| Category | Virtual (member index `catx:`) | Check parent's member index |
+| Catalogue | Virtual (group index `catl:`) | Check parent's group index, then resolve to Document |
+| Index | Virtual (search pattern) | Execute search, find matching Document |
+
+## Architecture
+
+```mermaid
+flowchart TB
+    subgraph "Path Resolution Flow"
+        START["get_entity_async(path)"]
+        CHECK_ENTITY["Check entity:prefix for path"]
+        ENTITY_EXISTS{"Entity exists?"}
+        
+        GET_PARENT["Get parent path"]
+        CHECK_PARENT_TYPE["Load parent entity type"]
+        
+        PARENT_CONTAINER{"Parent is Container?"}
+        PARENT_CATEGORY{"Parent is Category?"}
+        PARENT_CATALOGUE{"Parent is Catalogue?"}
+        PARENT_INDEX{"Parent is Index?"}
+        
+        NOT_FOUND["Return ENTITY_NOT_FOUND"]
+        RETURN_ENTITY["Return Entity"]
+        
+        CAT_CHECK["Check if child_name in member index"]
+        CAT_EXISTS{"In index?"}
+        CAT_RETURN["Return Document from index"]
+        
+        CATL_CHECK["Check if child_name is a group key"]
+        CATL_IS_GROUP{"Is group key?"}
+        CATL_GROUP["Return CatalogueGroup virtual entity"]
+        CATL_CHECK_DOC["Check if child_name is a document in any group"]
+        CATL_DOC_EXISTS{"Found in group?"}
+        CATL_RETURN_DOC["Return Document"]
+        
+        IDX_SEARCH["Execute search with child_name as pattern"]
+        IDX_RESULTS{"Has results?"}
+        IDX_RETURN["Return IndexResult virtual entity"]
+    end
+    
+    START --> CHECK_ENTITY
+    CHECK_ENTITY --> ENTITY_EXISTS
+    ENTITY_EXISTS -->|Yes| RETURN_ENTITY
+    ENTITY_EXISTS -->|No| GET_PARENT
+    GET_PARENT --> CHECK_PARENT_TYPE
+    CHECK_PARENT_TYPE --> PARENT_CONTAINER
+    
+    PARENT_CONTAINER -->|Yes| NOT_FOUND
+    PARENT_CONTAINER -->|No| PARENT_CATEGORY
+    
+    PARENT_CATEGORY -->|Yes| CAT_CHECK
+    CAT_CHECK --> CAT_EXISTS
+    CAT_EXISTS -->|Yes| CAT_RETURN
+    CAT_EXISTS -->|No| NOT_FOUND
+    
+    PARENT_CATEGORY -->|No| PARENT_CATALOGUE
+    PARENT_CATALOGUE -->|Yes| CATL_CHECK
+    CATL_CHECK --> CATL_IS_GROUP
+    CATL_IS_GROUP -->|Yes| CATL_GROUP
+    CATL_IS_GROUP -->|No| CATL_CHECK_DOC
+    CATL_CHECK_DOC --> CATL_DOC_EXISTS
+    CATL_DOC_EXISTS -->|Yes| CATL_RETURN_DOC
+    CATL_DOC_EXISTS -->|No| NOT_FOUND
+    
+    PARENT_CATALOGUE -->|No| PARENT_INDEX
+    PARENT_INDEX -->|Yes| IDX_SEARCH
+    IDX_SEARCH --> IDX_RESULTS
+    IDX_RESULTS -->|Yes| IDX_RETURN
+    IDX_RESULTS -->|No| NOT_FOUND
+    PARENT_INDEX -->|No| NOT_FOUND
+```
+
+## Implementation Plan
+
+### Phase 1: Add Virtual Entity Resolution to EmbeddedEngine
+
+Modify [`EmbeddedEngine._create_entity_from_storage_async()`](src/Engine/EmbeddedEngine.vala:776) to handle virtual entity resolution when direct entity lookup fails.
+
+#### 1.1 New Internal Method: `_try_resolve_virtual_child_async()`
+
+```vala
+/**
+ * Attempts to resolve a path as a virtual child of an indexed entity.
+ *
+ * This method is called when direct entity lookup fails. It checks if
+ * the parent is a Category, Catalogue, or Index and attempts to resolve
+ * the child name through the appropriate index.
+ *
+ * @param path The path to resolve
+ * @return The entity, or null if not a virtual child
+ */
+private async Core.Entity? _try_resolve_virtual_child_async(Core.EntityPath path) throws Core.EngineError {
+    // Root has no parent
+    if (path.is_root) {
+        return null;
+    }
+    
+    var parent_path = path.parent;
+    var child_name = path.name;
+    
+    // Check if parent exists
+    bool parent_exists = yield _entity_exists_async_internal(parent_path);
+    if (!parent_exists) {
+        return null;
+    }
+    
+    // Get parent entity type
+    Core.EntityType? parent_type;
+    try {
+        parent_type = yield _get_entity_type_async_internal(parent_path);
+    } catch (Storage.StorageError e) {
+        return null;
+    }
+    
+    if (parent_type == null) {
+        return null;
+    }
+    
+    // Resolve based on parent type
+    switch ((!) parent_type) {
+        case Core.EntityType.CATEGORY:
+            return yield _resolve_category_child_async(parent_path, child_name);
+            
+        case Core.EntityType.CATALOGUE:
+            return yield _resolve_catalogue_child_async(parent_path, child_name);
+            
+        case Core.EntityType.INDEX:
+            return yield _resolve_index_child_async(parent_path, child_name);
+            
+        default:
+            // Container children must be persisted entities
+            return null;
+    }
+}
+```
+
+#### 1.2 Category Child Resolution
+
+```vala
+/**
+ * Resolves a child of a Category by checking the member index.
+ */
+private async Core.Entity? _resolve_category_child_async(
+    Core.EntityPath parent_path, 
+    string child_name
+) throws Core.EngineError {
+    // Check if child_name is in the category's member index
+    // Members are stored as full document paths
+    foreach (var doc_path in _category_store.get_members(parent_path)) {
+        var doc_entity_path = Core.EntityPath.parse(doc_path);
+        if (doc_entity_path.name == child_name) {
+            // Found - return the actual document
+            return yield _create_entity_from_storage_async(doc_entity_path);
+        }
+    }
+    
+    return null;
+}
+```
+
+#### 1.3 Catalogue Child Resolution
+
+Catalogues have two types of virtual children:
+1. **Group Keys** - e.g., `/catalogue/admin` returns a `CatalogueGroup`
+2. **Documents within groups** - e.g., `/catalogue/admin/someuser` returns a Document
+
+```vala
+/**
+ * Resolves a child of a Catalogue.
+ *
+ * First checks if child_name is a group key (returns CatalogueGroup).
+ * Then checks if it's a document name within any group (returns Document).
+ */
+private async Core.Entity? _resolve_catalogue_child_async(
+    Core.EntityPath parent_path, 
+    string child_name
+) throws Core.EngineError {
+    // First: Check if child_name is a group key
+    foreach (var key in _catalogue_store.get_group_keys(parent_path)) {
+        if (key == child_name) {
+            // Return a CatalogueGroup virtual entity
+            var catalogue = yield _create_entity_from_storage_async(parent_path) as Catalogue;
+            if (catalogue != null) {
+                return new CatalogueGroup(_engine, catalogue, child_name);
+            }
+        }
+    }
+    
+    // Second: Check if child_name is a document within any group
+    foreach (var key in _catalogue_store.get_group_keys(parent_path)) {
+        foreach (var doc_path in _catalogue_store.get_group_members(parent_path, key)) {
+            var doc_entity_path = Core.EntityPath.parse(doc_path);
+            if (doc_entity_path.name == child_name) {
+                // Found - return the actual document
+                return yield _create_entity_from_storage_async(doc_entity_path);
+            }
+        }
+    }
+    
+    return null;
+}
+```
+
+#### 1.4 Index Child Resolution
+
+```vala
+/**
+ * Resolves a child of an Index by executing a search.
+ *
+ * The child_name is treated as a search pattern (e.g., "*term*").
+ */
+private async Core.Entity? _resolve_index_child_async(
+    Core.EntityPath parent_path, 
+    string child_name
+) throws Core.EngineError {
+    // Load the index entity
+    var index = yield _create_entity_from_storage_async(parent_path) as Index;
+    if (index == null) {
+        return null;
+    }
+    
+    // Execute search with child_name as pattern
+    var result = ((!) index).search(child_name);
+    return result;  // Returns IndexResult or null
+}
+```
+
+#### 1.5 Modify get_entity_async()
+
+```vala
+public async Core.Entity? get_entity_async(Core.EntityPath path) throws Core.EngineError {
+    // First: Try direct entity lookup
+    bool exists = yield _entity_exists_async_internal(path);
+    if (exists) {
+        return yield _create_entity_from_storage_async(path);
+    }
+    
+    // Second: Try virtual child resolution
+    var virtual_entity = yield _try_resolve_virtual_child_async(path);
+    if (virtual_entity != null) {
+        return (!) virtual_entity;
+    }
+    
+    // Not found anywhere
+    throw new Core.EngineError.ENTITY_NOT_FOUND(
+        "Entity not found: %s".printf(path.to_string())
+    );
+}
+```
+
+#### 1.6 Modify entity_exists_async()
+
+```vala
+public async bool entity_exists_async(Core.EntityPath path) throws Core.EngineError {
+    // First: Check persistent storage
+    bool exists = yield _entity_exists_async_internal(path);
+    if (exists) {
+        return true;
+    }
+    
+    // Second: Check if it's a virtual child
+    var virtual_entity = yield _try_resolve_virtual_child_async(path);
+    return virtual_entity != null;
+}
+```
+
+### Phase 2: Sync Methods for Hook Context
+
+The sync methods used by hooks also need to support virtual entity resolution.
+
+#### 2.1 Modify get_entity_or_null_sync()
+
+```vala
+internal Core.Entity? get_entity_or_null_sync(Core.EntityPath path) {
+    // First: Try direct lookup
+    var entity = _get_entity_or_null_sync_internal(path);
+    if (entity != null) {
+        return entity;
+    }
+    
+    // Second: Try virtual child resolution (sync)
+    return _try_resolve_virtual_child_sync(path);
+}
+```
+
+#### 2.2 Add _try_resolve_virtual_child_sync()
+
+```vala
+/**
+ * Synchronous virtual child resolution for hook context.
+ */
+private Core.Entity? _try_resolve_virtual_child_sync(Core.EntityPath path) {
+    if (path.is_root) {
+        return null;
+    }
+    
+    var parent_path = path.parent;
+    var child_name = path.name;
+    
+    // Check if parent exists
+    if (!_storage.entity_exists(parent_path)) {
+        return null;
+    }
+    
+    // Get parent type
+    Core.EntityType? parent_type;
+    try {
+        parent_type = _storage.get_entity_type(parent_path);
+    } catch (Storage.StorageError e) {
+        return null;
+    }
+    
+    if (parent_type == null) {
+        return null;
+    }
+    
+    switch ((!) parent_type) {
+        case Core.EntityType.CATEGORY:
+            return _resolve_category_child_sync(parent_path, child_name);
+            
+        case Core.EntityType.CATALOGUE:
+            return _resolve_catalogue_child_sync(parent_path, child_name);
+            
+        case Core.EntityType.INDEX:
+            return _resolve_index_child_sync(parent_path, child_name);
+            
+        default:
+            return null;
+    }
+}
+```
+
+### Phase 3: Update CatalogueGroup for Direct Path Access
+
+The `CatalogueGroup` class currently creates its path as `parent.path.append_child(group_key)`. This is correct, but we need to ensure it can be created directly from a path without needing the parent instance.
+
+#### 3.1 Add Factory Method to CatalogueGroup
+
+```vala
+/**
+ * Creates a CatalogueGroup from a path.
+ *
+ * This is used by EmbeddedEngine for virtual entity resolution.
+ *
+ * @param engine The engine
+ * @param path The full path including group key
+ * @return The CatalogueGroup, or null if the group doesn't exist
+ */
+public static CatalogueGroup? from_path(Core.Engine engine, Core.EntityPath path) {
+    if (path.is_root) {
+        return null;
+    }
+    
+    var parent_path = path.parent;
+    var group_key = path.name;
+    
+    // Verify parent is a Catalogue
+    var embedded = engine as Engine.EmbeddedEngine;
+    if (embedded == null) {
+        return null;
+    }
+    
+    var catalogue_store = ((!) embedded).catalogue_store;
+    
+    // Check if group key exists
+    foreach (var key in catalogue_store.get_group_keys(parent_path)) {
+        if (key == group_key) {
+            // Create parent catalogue
+            var catalogue = ((!) embedded).get_entity_or_null_sync(parent_path) as Catalogue;
+            if (catalogue != null) {
+                return new CatalogueGroup(engine, catalogue, group_key);
+            }
+        }
+    }
+    
+    return null;
+}
+```
+
+## Edge Cases and Considerations
+
+### 1. Nested Paths in Catalogues
+
+For path `/catalogue/group/document`:
+- First level (`group`) resolves to a `CatalogueGroup`
+- Second level (`document`) is resolved by `CatalogueGroup.get_child_async()`
+
+This already works correctly because `CatalogueGroup.get_child_async()` looks up documents in the group.
+
+### 2. Index Search Patterns with Slashes
+
+Index search patterns like `*term*` should work, but patterns containing `/` would be parsed as multiple path segments. This is a pre-existing limitation.
+
+### 3. Performance Considerations
+
+- **Category lookup**: O(k) where k = number of members (must scan to match name)
+- **Catalogue group lookup**: O(g) where g = number of groups
+- **Catalogue document lookup**: O(g × m) where m = average group size
+- **Index lookup**: Depends on search pattern complexity
+
+For large catalogues, consider adding a name→path index if this becomes a bottleneck.
+
+### 4. Caching
+
+The current implementation doesn't cache virtual entity lookups. If performance becomes an issue, consider:
+1. Caching the parent entity during resolution
+2. Adding a name→path lookup index for catalogue groups
+
+## Testing Strategy
+
+### Unit Tests
+
+1. **Category Resolution**
+   - Test direct path lookup for category member
+   - Test non-existent member returns null
+   - Test that persisted entities still work
+
+2. **Catalogue Resolution**
+   - Test direct path lookup for catalogue group
+   - Test direct path lookup for document within group
+   - Test non-existent group/document returns null
+
+3. **Index Resolution**
+   - Test direct path lookup with search pattern
+   - Test pattern with no matches returns null
+
+4. **Mixed Paths**
+   - Test paths that mix containers and indexed entities
+   - Test deeply nested paths
+
+### Integration Tests
+
+1. **Existing Tests Compatibility**
+   - Ensure all existing tests pass
+   - Verify no regression in navigation-based access
+
+2. **Performance Tests**
+   - Benchmark virtual entity resolution
+   - Compare with navigation-based access
+
+## Migration
+
+No migration needed - this is purely an enhancement to the resolution logic. Existing data structures remain unchanged.
+
+## Summary
+
+| Component | Change |
+|-----------|--------|
+| [`EmbeddedEngine.get_entity_async()`](src/Engine/EmbeddedEngine.vala:284) | Add virtual child resolution fallback |
+| [`EmbeddedEngine.entity_exists_async()`](src/Engine/EmbeddedEngine.vala:310) | Add virtual child existence check |
+| [`EmbeddedEngine.get_entity_or_null_sync()`](src/Engine/EmbeddedEngine.vala:528) | Add sync virtual child resolution |
+| New: `_try_resolve_virtual_child_async()` | Async virtual entity resolution dispatcher |
+| New: `_try_resolve_virtual_child_sync()` | Sync virtual entity resolution dispatcher |
+| New: `_resolve_category_child_async()` | Category member lookup |
+| New: `_resolve_catalogue_child_async()` | Catalogue group/document lookup |
+| New: `_resolve_index_child_async()` | Index search execution |
+| [`CatalogueGroup`](src/Entities/Catalogue.vala:806) | Add `from_path()` factory method |

+ 307 - 0
src/Core/Engine.vala

@@ -0,0 +1,307 @@
+/**
+ * Engine - Main API interface for Implexus database operations
+ * 
+ * The Engine interface provides the unified API for both embedded and
+ * remote operation modes. It handles entity retrieval, creation, deletion,
+ * queries, and transactions.
+ * 
+ * All I/O operations are async - use yield when calling these methods.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Core {
+
+/**
+ * Interface for the main database engine.
+ * 
+ * The Engine is the primary entry point for working with Implexus.
+ * It provides:
+ * - Root access to the entity hierarchy
+ * - Path-based entity retrieval
+ * - Query operations by type and expression
+ * - Transaction support
+ * - Configuration access
+ * - Event signals for entity changes
+ * 
+ * Two implementations are provided:
+ * - EmbeddedEngine: Direct in-process operation
+ * - RemoteEngine: Client for connecting to implexusd daemon
+ * 
+ * All I/O methods are async. Example usage:
+ * {{{
+ * var root = yield engine.get_root_async();
+ * var entity = yield engine.get_entity_async(path);
+ * 
+ * // Transaction management
+ * try {
+ *     yield engine.begin_transaction_async();
+ *     yield entity.set_entity_property_async("key", value);
+ *     yield engine.commit_async();
+ * } catch (Error e) {
+ *     yield engine.rollback_async();
+ *     throw e;
+ * }
+ * }}}
+ */
+public interface Engine : Object {
+    
+    // === Root Access ===
+    
+    /**
+     * Gets the root entity of the database.
+     *
+     * The root entity is a CONTAINER at path "/" that serves as
+     * the starting point for all entity navigation.
+     *
+     * This operation is async as it may require I/O to verify/create root.
+     *
+     * @return The root Entity (always a Container)
+     * @throws EngineError if storage error occurs
+     */
+    public abstract async Entity get_root_async() throws EngineError;
+    
+    // === Path-Based Access ===
+    
+    /**
+     * Gets an entity by path, throwing an error if not found.
+     * 
+     * @param path The path to the entity
+     * @return The entity at the specified path
+     * @throws EngineError.ENTITY_NOT_FOUND if no entity exists at path
+     */
+    public abstract async Entity? get_entity_async(EntityPath path) throws EngineError;
+    
+    /**
+     * Gets an entity by path, returning null if not found.
+     * 
+     * This is a convenience method that doesn't throw an error
+     * when the entity doesn't exist.
+     * 
+     * @param path The path to the entity
+     * @return The entity at the specified path, or null
+     */
+    public abstract async Entity? get_entity_or_null_async(EntityPath path) throws EngineError;
+    
+    /**
+     * Checks if an entity exists at the specified path.
+     * 
+     * @param path The path to check
+     * @return true if an entity exists at the path
+     */
+    public abstract async bool entity_exists_async(EntityPath path) throws EngineError;
+    
+    // === Query Operations ===
+    
+    /**
+     * Queries all entities of a specific type.
+     * 
+     * Returns all documents with the specified type_label.
+     * Results are eagerly loaded into an array.
+     * 
+     * @param type_label The application-defined type to query
+     * @return An array of matching entities
+     */
+    public abstract async Entity[] query_by_type_async(string type_label) throws EngineError;
+    
+    /**
+     * Queries entities by type and expression.
+     * 
+     * Returns all documents with the specified type_label that
+     * match the given expression. Results are eagerly loaded
+     * into an array.
+     * 
+     * @param type_label The application-defined type to query
+     * @param expression The expression to evaluate
+     * @return An array of matching entities
+     */
+    public abstract async Entity[] query_by_expression_async(
+        string type_label, 
+        string expression
+    ) throws EngineError;
+    
+    // === Transactions ===
+    
+    /**
+     * Begins a new transaction.
+     * 
+     * Only one transaction can be active at a time per engine.
+     * 
+     * NOTE: Vala doesn't support async delegates, so there's no
+     * with_write_transaction() helper. Use manual management:
+     * 
+     * {{{
+     * try {
+     *     yield engine.begin_transaction_async();
+     *     // perform operations
+     *     yield engine.commit_async();
+     * } catch (Error e) {
+     *     yield engine.rollback_async();
+     *     throw e;
+     * }
+     * }}}
+     * 
+     * @return A new Transaction object
+     * @throws EngineError.TRANSACTION_ERROR if a transaction is already active
+     */
+    public abstract async Transaction begin_transaction_async() throws EngineError;
+    
+    /**
+     * Commits the current transaction.
+     * 
+     * @throws EngineError.TRANSACTION_ERROR if commit fails or no transaction active
+     */
+    public abstract async void commit_async() throws EngineError;
+    
+    /**
+     * Rolls back the current transaction.
+     */
+    public abstract async void rollback_async();
+    
+    /**
+     * Indicates whether a transaction is currently active.
+     * This is synchronous as it's a quick property check.
+     */
+    public abstract bool in_transaction { get; }
+    
+    // === Configuration ===
+    
+    /**
+     * Gets the storage configuration for this engine.
+     * 
+     * This is synchronous as it's a quick property access.
+     * 
+     * @return The storage configuration
+     */
+    public abstract StorageConfiguration configuration { owned get; }
+    
+    // === Events ===
+    
+    /**
+     * Signal emitted when an entity is created.
+     * 
+     * @param entity The newly created entity
+     */
+    public signal void entity_created(Entity entity);
+    
+    /**
+     * Signal emitted when an entity is deleted.
+     * 
+     * @param path The path of the deleted entity
+     */
+    public signal void entity_deleted(EntityPath path);
+    
+    /**
+     * Signal emitted when an entity is modified.
+     * 
+     * @param entity The modified entity
+     */
+    public signal void entity_modified(Entity entity);
+}
+
+/**
+ * Interface for database transactions.
+ * 
+ * Transactions provide atomic operations - either all changes
+ * are committed, or none are (rollback).
+ * 
+ * Example usage:
+ * {{{
+ * try {
+ *     yield engine.begin_transaction_async();
+ *     var doc = yield (yield engine.get_root_async()).create_category_async("batch")
+ *         .create_document_async("item1", "Item");
+ *     yield doc.set_entity_property_async("value", new ValueElement(42));
+ *     yield engine.commit_async();
+ * } catch (Error e) {
+ *     yield engine.rollback_async();
+ * }
+ * }}}
+ */
+public interface Transaction : Object {
+    
+    /**
+     * Indicates whether this transaction is still active.
+     * 
+     * Returns false after commit_async() or rollback_async() is called.
+     * This is synchronous as it's a quick property check.
+     * 
+     * @return true if the transaction is active
+     */
+    public abstract bool active { get; }
+    
+    /**
+     * Commits all changes made during this transaction.
+     * 
+     * After commit, the transaction is no longer active.
+     * 
+     * @throws EngineError.TRANSACTION_ERROR if commit fails
+     */
+    public abstract async void commit_async() throws EngineError;
+    
+    /**
+     * Rolls back all changes made during this transaction.
+     * 
+     * After rollback, the transaction is no longer active.
+     */
+    public abstract async void rollback_async();
+}
+
+/**
+ * Storage configuration for the engine.
+ *
+ * Provides access to storage settings and options.
+ * Storage implementation is provided by the specific Engine implementation.
+ */
+public class StorageConfiguration : Object {
+    
+    /**
+     * The storage backend for entity persistence.
+     */
+    public Storage.Storage storage { get; set; }
+    
+    /**
+     * The hook manager for entity change notifications.
+     *
+     * This is initialized by the engine and provides notification
+     * services for indexed entities to update their indices.
+     */
+    public Implexus.Engine.HookManager? hook_manager { get; set; default = null; }
+    
+    /**
+     * Whether to enable entity caching.
+     *
+     * Default: true
+     */
+    public bool enable_cache { get; set; default = true; }
+    
+    /**
+     * Maximum number of entities to cache.
+     *
+     * Default: 1000
+     */
+    public int cache_size { get; set; default = 1000; }
+    
+    /**
+     * Whether to auto-sync changes to disk.
+     *
+     * Default: true
+     */
+    public bool auto_sync { get; set; default = true; }
+    
+    /**
+     * The base path for storage (file path for file-based storage).
+     */
+    public string storage_path { get; set; default = ""; }
+    
+    /**
+     * Creates a new StorageConfiguration with the given storage backend.
+     *
+     * @param storage The storage backend to use
+     */
+    public StorageConfiguration(Storage.Storage storage) {
+        this.storage = storage;
+    }
+}
+
+} // namespace Implexus.Core

+ 293 - 0
src/Core/Entity.vala

@@ -0,0 +1,293 @@
+/**
+ * Entity - Base interface for all database entities
+ * 
+ * The Entity interface is the primary abstraction for all objects in Implexus.
+ * It extends Invercargill.Element for type compatibility with the Invercargill
+ * library's type system.
+ * 
+ * All I/O operations are async - use the *_async methods for database access.
+ * Identity properties (path, name, entity_type, engine) remain synchronous
+ * as they don't require I/O.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Core {
+
+/**
+ * Interface for all database entities.
+ * 
+ * Entity is the base interface for all objects stored in Implexus.
+ * It provides:
+ * - Identity through path and name
+ * - Navigation via parent/child relationships (async)
+ * - Type information via entity_type
+ * - Lifecycle management via delete_async()
+ * 
+ * Four entity types implement this interface:
+ * - Container: Container for child entities
+ * - Document: Properties-based document
+ * - Category: Expression-based auto-categorization
+ * - Index: Text search with dynamic results
+ */
+public interface Entity : Object {
+    
+    // === Identity (Synchronous - No I/O) ===
+    
+    /**
+     * The engine that manages this entity.
+     * 
+     * This is an unowned reference to avoid reference cycles.
+     * The engine lifetime must exceed the entity lifetime.
+     * 
+     * @return The engine instance
+     */
+    public abstract unowned Engine engine { get; }
+    
+    /**
+     * The full path to this entity in the database hierarchy.
+     * 
+     * @return A new EntityPath object representing this entity's location
+     */
+    public abstract EntityPath path { owned get; }
+    
+    /**
+     * The name of this entity (the last segment of the path).
+     * 
+     * @return The entity name
+     */
+    public abstract string name { owned get; }
+    
+    /**
+     * The type of this entity.
+     * 
+     * @return The EntityType enum value
+     */
+    public abstract EntityType entity_type { get; }
+    
+    /**
+     * The application-defined type label for this document.
+     * 
+     * Empty string for non-document entities.
+     * 
+     * @return The type label
+     */
+    public abstract string type_label { owned get; }
+    
+    /**
+     * The configured expression for CATEGORY/INDEX entities.
+     * 
+     * Empty string for other entity types.
+     * 
+     * @return The expression string
+     */
+    public abstract string configured_expression { owned get; }
+    
+    /**
+     * The configured type label for CATEGORY/INDEX entities.
+     * 
+     * Empty string for other entity types.
+     * 
+     * @return The type label string
+     */
+    public abstract string configured_type_label { owned get; }
+    
+    // === Parent/Child Navigation (Async - May require I/O) ===
+    
+    /**
+     * Gets the parent entity, or null for the root.
+     * 
+     * @return The parent Entity, or null if this is the root
+     * @throws EntityError if an I/O error occurs
+     */
+    public abstract async Entity? get_parent_async() throws EntityError;
+    
+    /**
+     * Gets the names of all child entities.
+     *
+     * For CONTAINER, this returns stored child names.
+     * For CATEGORY and INDEX, this returns computed child names.
+     * For DOCUMENT, this returns an empty set.
+     * 
+     * @return A read-only set of child names
+     * @throws EntityError if an I/O error occurs
+     */
+    public abstract async Invercargill.ReadOnlySet<string> get_child_names_async() throws EntityError;
+    
+    /**
+     * Gets a child entity by name.
+     * 
+     * @param name The name of the child to retrieve
+     * @return The child Entity, or null if not found
+     * @throws EntityError if an I/O error occurs
+     */
+    public abstract async Entity? get_child_async(string name) throws EntityError;
+    
+    /**
+     * Gets all child entities.
+     * 
+     * @return An array of child entities (eager loading)
+     * @throws EntityError if an I/O error occurs
+     */
+    public abstract async Entity[] get_children_async() throws EntityError;
+    
+    // === Child Management (CONTAINER only - Async) ===
+    
+    /**
+     * Creates a new container child.
+     *
+     * Only valid for CONTAINER entities. Throws INVALID_OPERATION for other types.
+     *
+     * @param name The name for the new container
+     * @return The created container entity
+     * @throws EntityError if operation is invalid, entity already exists, or I/O error
+     */
+    public abstract async Entity? create_container_async(string name) throws EntityError;
+    
+    /**
+     * Creates a new document child.
+     *
+     * Only valid for CONTAINER entities. Throws INVALID_OPERATION for other types.
+     * 
+     * @param name The name for the new document
+     * @param type_label The application-defined type for the document
+     * @return The created document entity
+     * @throws EntityError if operation is invalid, entity already exists, or I/O error
+     */
+    public abstract async Entity? create_document_async(string name, string type_label) throws EntityError;
+    
+    /**
+     * Creates a new category child.
+     *
+     * Only valid for CONTAINER entities. Throws INVALID_OPERATION for other types.
+     * 
+     * @param name The name for the new category
+     * @param type_label The document type to categorize
+     * @param expression The expression to evaluate for categorization
+     * @return The created category entity
+     * @throws EntityError if operation is invalid, entity already exists, or I/O error
+     */
+    public abstract async Entity? create_category_async(
+        string name, 
+        string type_label, 
+        string expression
+    ) throws EntityError;
+    
+    /**
+     * Creates a new index child.
+     *
+     * Only valid for CONTAINER entities. Throws INVALID_OPERATION for other types.
+     *
+     * @param name The name for the new index
+     * @param type_label The document type to index
+     * @param expression The expression to index
+     * @return The created index entity
+     * @throws EntityError if operation is invalid, entity already exists, or I/O error
+     */
+    public abstract async Entity? create_index_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws EntityError;
+    
+    /**
+     * Creates a new catalogue child.
+     *
+     * Only valid for CONTAINER entities. Throws INVALID_OPERATION for other types.
+     * A catalogue groups documents by a key extracted from an expression.
+     *
+     * @param name The name for the new catalogue
+     * @param type_label The document type to catalogue
+     * @param expression The expression to extract the grouping key
+     * @return The created catalogue entity
+     * @throws EntityError if operation is invalid, entity already exists, or I/O error
+     */
+    public abstract async Entity? create_catalogue_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws EntityError;
+    
+    // === Document Operations (DOCUMENT only - Async) ===
+    
+    /**
+     * Gets the properties stored in this document.
+     * 
+     * Only valid for DOCUMENT entities.
+     * 
+     * @return The properties collection
+     * @throws EntityError if not a document or I/O error
+     */
+    public abstract async Invercargill.Properties get_properties_async() throws EntityError;
+    
+    /**
+     * Gets a property value by name.
+     * 
+     * @param name The property name
+     * @return The property value, or null if not found
+     * @throws EntityError if not a document or I/O error
+     */
+    public abstract async Invercargill.Element? get_entity_property_async(string name) throws EntityError;
+    
+    /**
+     * Sets a property value.
+     * 
+     * @param name The property name
+     * @param value The property value
+     * @throws EntityError if not a document or I/O error
+     */
+    public abstract async void set_entity_property_async(string name, Invercargill.Element value) throws EntityError;
+    
+    /**
+     * Removes a property.
+     * 
+     * @param name The property name
+     * @throws EntityError if not a document or I/O error
+     */
+    public abstract async void remove_property_async(string name) throws EntityError;
+    
+    // === Lifecycle (Async - Requires I/O) ===
+    
+    /**
+     * Deletes this entity from the database.
+     *
+     * For containers, this recursively deletes all children.
+     * 
+     * @throws EntityError if deletion fails or I/O error
+     */
+    public abstract async void delete_async() throws EntityError;
+    
+    /**
+     * Checks if this entity still exists in the database.
+     * 
+     * This is synchronous as it may be cached or quick to check.
+     * 
+     * @return true if the entity exists
+     */
+    public abstract bool exists { get; }
+    
+    // === Set Operations (Async) ===
+    
+    /**
+     * Creates an EntitySet containing just this entity.
+     * 
+     * @return A new EntitySet with this entity
+     */
+    public abstract async EntitySet as_set_async();
+    
+    // === Signals ===
+    
+    /**
+     * Signal emitted when a property value changes.
+     * 
+     * @param key The name of the property that changed
+     */
+    public signal void property_changed(string key);
+    
+    // Note: Invercargill.Element methods (type, type_name, is_null, is_type, 
+    // assignable_to_type, as, assert_as, as_or_default, try_get_as, to_value, to_string)
+    // are inherited from Invercargill.Element interface and implemented in AbstractEntity.
+    // We do not re-declare them here to avoid Vala interface implementation conflicts.
+}
+
+} // namespace Implexus.Core

+ 96 - 0
src/Core/EntityError.vala

@@ -0,0 +1,96 @@
+/**
+ * EntityError - Error domain for entity-related errors
+ * 
+ * Defines error codes for operations on entities and the engine.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Core {
+
+/**
+ * Error domain for engine operations.
+ * 
+ * These errors can be thrown by Engine methods when
+ * operations cannot be completed as requested.
+ */
+public errordomain EngineError {
+    /** The requested entity does not exist at the specified path */
+    ENTITY_NOT_FOUND,
+    
+    /** An entity already exists at the specified path */
+    ENTITY_ALREADY_EXISTS,
+    
+    /** The provided path is invalid or malformed */
+    INVALID_PATH,
+    
+    /** The operation is not valid for this entity type */
+    INVALID_OPERATION,
+    
+    /** The entity type does not match the expected type */
+    TYPE_MISMATCH,
+    
+    /** Error parsing or evaluating an expression */
+    EXPRESSION_ERROR,
+    
+    /** Transaction-related error (already active, commit failed, etc.) */
+    TRANSACTION_ERROR,
+    
+    /** Underlying storage layer error */
+    STORAGE_ERROR,
+    
+    /** Connection error for remote engine */
+    CONNECTION_ERROR,
+    
+    /** Protocol error in client-server communication */
+    PROTOCOL_ERROR
+}
+
+/**
+ * Error domain for entity operations.
+ * 
+ * These errors can be thrown by Entity methods when
+ * operations cannot be completed as requested.
+ */
+public errordomain EntityError {
+    /** The requested entity does not exist at the specified path */
+    ENTITY_NOT_FOUND,
+    
+    /** An entity already exists at the specified path */
+    ENTITY_ALREADY_EXISTS,
+    
+    /** The provided path is invalid or malformed */
+    INVALID_PATH,
+    
+    /** The operation is not valid for this entity type */
+    INVALID_OPERATION,
+    
+    /** The entity type does not match the expected type */
+    TYPE_MISMATCH,
+    
+    /** Error parsing or evaluating an expression */
+    EXPRESSION_ERROR,
+    
+    /** Underlying storage layer error */
+    STORAGE_ERROR,
+    
+    /** The requested property does not exist */
+    PROPERTY_NOT_FOUND,
+    
+    /** The requested child does not exist */
+    CHILD_NOT_FOUND,
+    
+    /** Parent entity not found for operation */
+    PARENT_NOT_FOUND,
+    
+    /** I/O error during operation */
+    IO_ERROR,
+    
+    /** Failed to create entity */
+    CREATE_FAILED,
+    
+    /** Failed to delete entity */
+    DELETE_FAILED
+}
+
+} // namespace Implexus.Core

+ 523 - 0
src/Core/EntityPath.vala

@@ -0,0 +1,523 @@
+/**
+ * EntityPath - Represents a path to an entity in the database hierarchy
+ * 
+ * The EntityPath class provides path parsing, manipulation, and comparison
+ * for entity addressing in Implexus.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Core {
+
+/**
+ * Represents a path to an entity in the database hierarchy.
+ * 
+ * Paths are immutable and use forward-slash separators like Unix paths.
+ * The root path is represented as "/".
+ * 
+ * Example paths:
+ * - "/" (root)
+ * - "/users" (top-level category)
+ * - "/users/john" (nested entity)
+ * - "/users/john/profile" (deeply nested)
+ * 
+ * EntityPath implements Invercargill.Element, Hashable, and Equatable for
+ * compatibility with Invercargill collections.
+ */
+public class EntityPath : Object, Invercargill.Element, Invercargill.Hashable, Invercargill.Equatable<EntityPath> {
+    
+    private Invercargill.DataStructures.Vector<string> _segments;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates an EntityPath from a string representation.
+     * 
+     * @param path_string The path string (e.g., "/users/john")
+     */
+    public EntityPath(string path_string) {
+        _segments = new Invercargill.DataStructures.Vector<string>();
+        do_parse(path_string);
+    }
+    
+    /**
+     * Creates the root path.
+     */
+    public EntityPath.root() {
+        _segments = new Invercargill.DataStructures.Vector<string>();
+    }
+    
+    /**
+     * Creates an EntityPath from an enumerable of segments.
+     * 
+     * @param segments The path segments
+     */
+    public EntityPath.from_segments(Invercargill.Enumerable<string> segments) {
+        _segments = new Invercargill.DataStructures.Vector<string>();
+        foreach (var seg in segments) {
+            _segments.add(seg);
+        }
+    }
+    
+    /**
+     * Creates a child path from a parent and name.
+     * 
+     * @param parent The parent path
+     * @param name The child name
+     */
+    public EntityPath.with_child(EntityPath parent, string name) {
+        _segments = new Invercargill.DataStructures.Vector<string>();
+        foreach (var seg in parent._segments) {
+            _segments.add(seg);
+        }
+        _segments.add(name);
+    }
+    
+    // === Properties ===
+    
+    /**
+     * Indicates whether this is the root path.
+     * 
+     * @return true if this is the root path
+     */
+    public bool is_root { get { return _segments.peek_count() == 0; } }
+    
+    /**
+     * The name (last segment) of this path.
+     * 
+     * Empty string for the root path.
+     * 
+     * @return The name
+     */
+    public string name {
+        owned get {
+            if (is_root) return "";
+            try {
+                return _segments.last();
+            } catch (Invercargill.SequenceError e) {
+                return "";
+            }
+        }
+    }
+    
+    /**
+     * The parent path.
+     * 
+     * For the root path, returns itself.
+     * 
+     * @return The parent path
+     */
+    public EntityPath parent {
+        owned get {
+            if (is_root) return this;
+            var parent_segments = _segments.take(_segments.peek_count() - 1);
+            return new EntityPath.from_segments(parent_segments);
+        }
+    }
+    
+    /**
+     * The depth (number of segments) of this path.
+     * 
+     * Root has depth 0.
+     * 
+     * @return The depth
+     */
+    public int depth { get { return (int) _segments.peek_count(); } }
+    
+    /**
+     * The path segments as an enumerable.
+     * 
+     * @return The segments
+     */
+    public Invercargill.Enumerable<string> segments {
+        owned get { return _segments.as_enumerable(); }
+    }
+    
+    // === Path Operations ===
+    
+    /**
+     * Creates a child path by appending a name.
+     * 
+     * @param name The child name
+     * @return A new EntityPath representing the child
+     */
+    public EntityPath append_child(string name) {
+        try {
+            return new EntityPath.with_child(this, validate_name(name));
+        } catch (EngineError e) {
+            return new EntityPath.with_child(this, name);
+        }
+    }
+    
+    /**
+     * Creates a sibling path with a different name.
+     * 
+     * @param name The sibling name
+     * @return A new EntityPath representing the sibling
+     * @throws EngineError.INVALID_PATH if this is the root
+     */
+    public EntityPath sibling(string name) throws EngineError {
+        if (is_root) {
+            throw new EngineError.INVALID_PATH("Root has no siblings");
+        }
+        return parent.append_child(name);
+    }
+    
+    /**
+     * Gets an ancestor path by going up the specified number of levels.
+     * 
+     * @param levels The number of levels to go up
+     * @return The ancestor path
+     * @throws EngineError.INVALID_PATH if levels is invalid
+     */
+    public EntityPath ancestor(int levels) throws EngineError {
+        if (levels < 0 || levels > depth) {
+            throw new EngineError.INVALID_PATH("Invalid ancestor level: %d".printf(levels));
+        }
+        var ancestor_segments = _segments.take((uint)(depth - levels));
+        return new EntityPath.from_segments(ancestor_segments);
+    }
+    
+    /**
+     * Checks if this path is an ancestor of another path.
+     * 
+     * @param other The potential descendant
+     * @return true if this is an ancestor of other
+     */
+    public bool is_ancestor_of(EntityPath other) {
+        if (depth >= other.depth) return false;
+        for (int i = 0; i < depth; i++) {
+            try {
+                if (_segments.get(i) != other._segments.get(i)) return false;
+            } catch (Invercargill.IndexError e) {
+                return false;
+            }
+        }
+        return true;
+    }
+    
+    /**
+     * Checks if this path is a descendant of another path.
+     * 
+     * @param other The potential ancestor
+     * @return true if this is a descendant of other
+     */
+    public bool is_descendant_of(EntityPath other) {
+        return other.is_ancestor_of(this);
+    }
+    
+    /**
+     * Gets the relative path from an ancestor to this path.
+     * 
+     * @param ancestor The ancestor path
+     * @return The relative path
+     * @throws EngineError.INVALID_PATH if ancestor is not actually an ancestor
+     */
+    public EntityPath relative_to(EntityPath ancestor) throws EngineError {
+        if (!ancestor.is_ancestor_of(this)) {
+            throw new EngineError.INVALID_PATH(
+                "%s is not an ancestor of %s".printf(ancestor.to_string(), this.to_string())
+            );
+        }
+        var relative_segments = _segments.skip((uint)ancestor.depth);
+        return new EntityPath.from_segments(relative_segments);
+    }
+    
+    /**
+     * Resolves a relative path against this path.
+     * 
+     * Supports ".." (parent) and "." (current) segments.
+     * 
+     * @param relative_path The relative path to resolve
+     * @return The resolved absolute path
+     */
+    public EntityPath resolve(EntityPath relative_path) {
+        var result_segments = new Invercargill.DataStructures.Vector<string>();
+        foreach (var seg in _segments) {
+            result_segments.add(seg);
+        }
+        foreach (var seg in relative_path._segments) {
+            if (seg == "..") {
+                if (result_segments.peek_count() > 0) {
+                    try {
+                        result_segments.remove_at(result_segments.peek_count() - 1);
+                    } catch (Error e) {}
+                }
+            } else if (seg != ".") {
+                result_segments.add(seg);
+            }
+        }
+        return new EntityPath.from_segments(result_segments.as_enumerable());
+    }
+    
+    // === String Conversion ===
+    
+    /**
+     * Converts the path to a string representation.
+     * 
+     * @return The path string (e.g., "/users/john")
+     */
+    public new string to_string() {
+        if (is_root) return "/";
+        var builder = new StringBuilder();
+        foreach (var seg in _segments) {
+            builder.append("/");
+            builder.append(escape_segment(seg));
+        }
+        return builder.str;
+    }
+    
+    /**
+     * Key separator for storage serialization.
+     * Uses "/" (0x2F) for consistency with path string representation.
+     * Note: This makes "/" an illegal character in entity names.
+     */
+    public const string KEY_SEPARATOR = "/";
+    
+    /**
+     * Converts the path to a compact key for storage.
+     * Uses "/" (0x2F) as separator for consistency with path representation.
+     *
+     * @return The storage key
+     */
+    public string to_key() {
+        if (is_root) return "";
+        
+        // Build the key with "/" separators
+        var builder = new StringBuilder();
+        bool first = true;
+        foreach (var seg in _segments) {
+            if (!first) {
+                builder.append(KEY_SEPARATOR);
+            }
+            builder.append(seg);
+            first = false;
+        }
+        return builder.str;
+    }
+    
+    /**
+     * Creates an EntityPath from a storage key.
+     *
+     * @param key The storage key
+     * @return The EntityPath
+     */
+    public static EntityPath from_key(string key) {
+        // Check for root path (empty string)
+        if (key == "" || key.length == 0) {
+            return new EntityPath.root();
+        }
+        
+        // Parse "/"-separated segments
+        var vec = new Invercargill.DataStructures.Vector<string>();
+        int start = 0;
+        for (int i = 0; i < key.length; i++) {
+            if (key[i] == '/') {
+                string segment = key.substring(start, i - start);
+                vec.add(segment);
+                start = i + 1;
+            }
+        }
+        // Add the last segment
+        if (start <= key.length) {
+            vec.add(key.substring(start));
+        }
+        
+        return new EntityPath.from_segments(vec.as_enumerable());
+    }
+    
+    // === Parsing ===
+    
+    private void do_parse(string path_string) {
+        if (path_string == null || path_string == "") {
+            return; // Root path
+        }
+        
+        var normalized = path_string;
+        if (normalized.has_prefix("/")) {
+            normalized = normalized.substring(1);
+        }
+        if (normalized.has_suffix("/")) {
+            normalized = normalized.substring(0, normalized.length - 1);
+        }
+        
+        if (normalized == "") {
+            return; // Root path
+        }
+        
+        var parts = normalized.split("/");
+        foreach (var part in parts) {
+            if (part == "") continue;
+            _segments.add(unescape_segment(part));
+        }
+    }
+    
+    // === Validation ===
+    
+    private string validate_name(string name) throws EngineError {
+        if (name == null || name == "") {
+            throw new EngineError.INVALID_PATH("Entity name cannot be empty");
+        }
+        if (name.contains("/")) {
+            throw new EngineError.INVALID_PATH("Entity name cannot contain /: %s".printf(name));
+        }
+        if (name == "." || name == "..") {
+            throw new EngineError.INVALID_PATH("Entity name cannot be . or ..");
+        }
+        return name;
+    }
+    
+    // === Escaping ===
+    
+    private string escape_segment(string segment) {
+        return segment.replace("~", "~7e")
+                      .replace("/", "~2f")
+                      .replace("\\", "~5c")
+                      .replace("\0", "~00");
+    }
+    
+    private string unescape_segment(string segment) {
+        return segment.replace("~00", "\0")
+                      .replace("~5c", "\\")
+                      .replace("~2f", "/")
+                      .replace("~7e", "~");
+    }
+    
+    // === Hashable ===
+    
+    /**
+     * Computes a hash code for this path.
+     * 
+     * @return The hash code
+     */
+    public uint hash_code() {
+        uint h = 0;
+        foreach (var seg in _segments) {
+            h ^= str_hash(seg);
+        }
+        return h;
+    }
+    
+    // === Equatable ===
+    
+    /**
+     * Checks if this path equals another path.
+     * 
+     * @param other The other path
+     * @return true if the paths are equal
+     */
+    public bool equals(EntityPath other) {
+        if (depth != other.depth) return false;
+        for (int i = 0; i < depth; i++) {
+            try {
+                if (_segments.get(i) != other._segments.get(i)) return false;
+            } catch (Invercargill.IndexError e) {
+                return false;
+            }
+        }
+        return true;
+    }
+    
+    // === Invercargill.Element ===
+    
+    /**
+     * Returns the GLib.Type for EntityPath.
+     * 
+     * @return The EntityPath type
+     */
+    public Type? type() { return typeof(EntityPath); }
+    
+    /**
+     * Returns the type name string.
+     * 
+     * @return "EntityPath"
+     */
+    public string type_name() { return "EntityPath"; }
+    
+    /**
+     * Paths are never null.
+     * 
+     * @return false
+     */
+    public bool is_null() { return false; }
+    
+    /**
+     * Checks if this path is of the specified type.
+     * 
+     * @param t The type to check
+     * @return true if this path is of type t
+     */
+    public bool is_type(Type t) { return t.is_a(typeof(EntityPath)); }
+    
+    /**
+     * Checks if this path can be assigned to the specified type.
+     * 
+     * @return true if assignable
+     */
+    public bool assignable_to_type(Type t) { return is_type(t); }
+    
+    /**
+     * Casts this path to type T.
+     * 
+     * @return This path as type T, or null if not possible
+     */
+    public T? @as<T>() throws Invercargill.ElementError { return this; }
+    
+    /**
+     * Asserts and casts this path to type T.
+     * 
+     * @return This path as type T
+     */
+    public T assert_as<T>() { return (T) this; }
+    
+    /**
+     * Casts this path to type T, returning a default if not possible.
+     * 
+     * @return This path as type T, or the default
+     */
+    public T? as_or_default<T>() { return this; }
+    
+    /**
+     * Attempts to get this path as type T.
+     * 
+     * @param result Output parameter for the result
+     * @return true if successful
+     */
+    public bool try_get_as<T>(out T result) { result = this; return true; }
+    
+    /**
+     * Converts this path to a value of type T.
+     * 
+     * @return The value
+     */
+    public GLib.Value to_value(GLib.Type requested_type) throws GLib.Error { 
+        var v = Value(typeof(EntityPath));
+        v.set_object(this);
+        return v;
+    }
+    
+    // === Static Factory Methods ===
+    
+    /**
+     * Parses a path string.
+     * 
+     * @param path_string The path string to parse
+     * @return The parsed EntityPath
+     */
+    public static EntityPath parse(string path_string) {
+        return new EntityPath(path_string);
+    }
+    
+    /**
+     * Combines a base path with a relative path string.
+     * 
+     * @param base_path The base path
+     * @param relative The relative path string
+     * @return The combined path
+     */
+    public static EntityPath combine(EntityPath base_path, string relative) {
+        return base_path.resolve(new EntityPath(relative));
+    }
+}
+
+} // namespace Implexus.Core

+ 300 - 0
src/Core/EntitySet.vala

@@ -0,0 +1,300 @@
+/**
+ * EntitySet - A set of entities supporting set operations
+ * 
+ * EntitySet provides set operations (union, intersection, difference)
+ * for working with collections of entities.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Core {
+
+/**
+ * A set of entities supporting set operations.
+ * 
+ * EntitySet wraps a collection of entities and provides
+ * operations for combining and filtering entity collections.
+ * 
+ * Example usage:
+ * {{{
+ * var active_users = engine.query_by_expression("User", "status == 'active'");
+ * var admins = engine.query_by_expression("User", "role == 'admin'");
+ * var active_admins = active_users.as_set().intersect(admins.as_set());
+ * }}}
+ */
+public class EntitySet : Object, Invercargill.Element {
+    
+    private Invercargill.DataStructures.HashSet<Entity> _entities;
+    
+    /**
+     * Creates an empty EntitySet.
+     */
+    public EntitySet.empty() {
+        _entities = new Invercargill.DataStructures.HashSet<Entity>();
+    }
+    
+    /**
+     * Creates an EntitySet containing a single entity.
+     * 
+     * @param entity The entity to wrap
+     */
+    public EntitySet(Entity entity) {
+        _entities = new Invercargill.DataStructures.HashSet<Entity>();
+        _entities.add(entity);
+    }
+    
+    /**
+     * Creates an EntitySet from a collection of entities.
+     * 
+     * @param entities The entities to include
+     */
+    public EntitySet.from_enumerable(Invercargill.Enumerable<Entity> entities) {
+        _entities = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var entity in entities) {
+            _entities.add(entity);
+        }
+    }
+    
+    /**
+     * The number of entities in this set.
+     * 
+     * @return The count
+     */
+    public int count { get { return (int) _entities.count; } }
+    
+    /**
+     * Checks if this set is empty.
+     * 
+     * @return true if empty
+     */
+    public bool is_empty { get { return _entities.peek_count() == 0; } }
+    
+    /**
+     * Checks if this set contains an entity.
+     * 
+     * @param entity The entity to check
+     * @return true if the entity is in this set
+     */
+    public bool contains(Entity entity) {
+        return _entities.contains(entity);
+    }
+    
+    /**
+     * Gets the entities as an enumerable collection.
+     * 
+     * @return The entities
+     */
+    public Invercargill.Enumerable<Entity> entities {
+        owned get { return _entities.as_enumerable(); }
+    }
+    
+    // === Set Operations ===
+    
+    /**
+     * Creates a new set that is the union of this set and another.
+     * 
+     * @param other The other set
+     * @return A new EntitySet containing entities from both sets
+     */
+    public EntitySet union(EntitySet other) {
+        var result = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var entity in _entities) {
+            result.add(entity);
+        }
+        foreach (var entity in other._entities) {
+            result.add(entity);
+        }
+        var set = new EntitySet.empty();
+        set._entities = result;
+        return set;
+    }
+    
+    /**
+     * Creates a new set that is the intersection of this set and another.
+     * 
+     * @param other The other set
+     * @return A new EntitySet containing entities in both sets
+     */
+    public EntitySet intersect(EntitySet other) {
+        var result = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var entity in _entities) {
+            if (other.contains(entity)) {
+                result.add(entity);
+            }
+        }
+        var set = new EntitySet.empty();
+        set._entities = result;
+        return set;
+    }
+    
+    /**
+     * Creates a new set that is the difference of this set and another.
+     * 
+     * @param other The other set
+     * @return A new EntitySet containing entities in this set but not other
+     */
+    public EntitySet difference(EntitySet other) {
+        var result = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var entity in _entities) {
+            if (!other.contains(entity)) {
+                result.add(entity);
+            }
+        }
+        var set = new EntitySet.empty();
+        set._entities = result;
+        return set;
+    }
+    
+    /**
+     * Creates a new set that is the symmetric difference of this set and another.
+     * 
+     * @param other The other set
+     * @return A new EntitySet containing entities in either set but not both
+     */
+    public EntitySet symmetric_difference(EntitySet other) {
+        return this.difference(other).union(other.difference(this));
+    }
+    
+    // === Filtering ===
+    
+    /**
+     * Delegate for filtering entities.
+     */
+    public delegate bool FilterPredicate(Entity entity);
+    
+    /**
+     * Filters this set to entities matching a predicate.
+     *
+     * @param predicate The filter predicate
+     * @return A new EntitySet with matching entities
+     */
+    public EntitySet filter(FilterPredicate predicate) {
+        var result = new Invercargill.DataStructures.HashSet<Entity>();
+        foreach (var entity in _entities) {
+            if (predicate(entity)) {
+                result.add(entity);
+            }
+        }
+        var set = new EntitySet.empty();
+        set._entities = result;
+        return set;
+    }
+    
+    /**
+     * Filters this set to entities of a specific type.
+     * 
+     * @param type The entity type to filter by
+     * @return A new EntitySet with matching entities
+     */
+    public EntitySet filter_by_type(EntityType type) {
+        return filter(e => e.entity_type == type);
+    }
+    
+    /**
+     * Filters this set to entities with a specific type label.
+     * 
+     * @param type_label The type label to filter by
+     * @return A new EntitySet with matching entities
+     */
+    public EntitySet filter_by_type_label(string type_label) {
+        return filter(e => e.type_label == type_label);
+    }
+    
+    // === Invercargill.Element ===
+    
+    /**
+     * Returns the GLib.Type for EntitySet.
+     * 
+     * @return The EntitySet type
+     */
+    public Type? type() { return typeof(EntitySet); }
+    
+    /**
+     * Returns the type name string.
+     * 
+     * @return "EntitySet"
+     */
+    public string type_name() { return "EntitySet"; }
+    
+    /**
+     * EntitySets are never null.
+     * 
+     * @return false
+     */
+    public bool is_null() { return false; }
+    
+    /**
+     * Checks if this set is of the specified type.
+     * 
+     * @param t The type to check
+     * @return true if this set is of type t
+     */
+    public bool is_type(Type t) { return t.is_a(typeof(EntitySet)); }
+    
+    /**
+     * Checks if this set can be assigned to the specified type.
+     * 
+     * @return true if assignable
+     */
+    public bool assignable_to<T>() { return typeof(T).is_a(typeof(EntitySet)); }
+    
+    /**
+     * Checks if this set can be assigned to the specified type.
+     * 
+     * @param t The target type
+     * @return true if assignable
+     */
+    public bool assignable_to_type(Type t) { return is_type(t); }
+    
+    /**
+     * Casts this set to type T.
+     * 
+     * @return This set as type T, or null if not possible
+     */
+    public T? @as<T>() throws Invercargill.ElementError { return this; }
+    
+    /**
+     * Asserts and casts this set to type T.
+     * 
+     * @return This set as type T
+     */
+    public T assert_as<T>() { return (T) this; }
+    
+    /**
+     * Casts this set to type T, returning a default if not possible.
+     * 
+     * @return This set as type T, or the default
+     */
+    public T? as_or_default<T>() { return this; }
+    
+    /**
+     * Attempts to get this set as type T.
+     * 
+     * @param result Output parameter for the result
+     * @return true if successful
+     */
+    public bool try_get_as<T>(out T result) { result = this; return true; }
+    
+    /**
+     * Converts this set to a value of the requested type.
+     * 
+     * @param requested_type The type to convert to
+     * @return The value
+     */
+    public GLib.Value to_value(GLib.Type requested_type) throws GLib.Error { 
+        var v = Value(typeof(EntitySet));
+        v.set_object(this);
+        return v;
+    }
+    
+    /**
+     * Returns a string representation of this set.
+     * 
+     * @return A string in the form "EntitySet(count)"
+     */
+    public new string to_string() {
+        return "EntitySet(%d)".printf(count);
+    }
+}
+
+} // namespace Implexus.Core

+ 138 - 0
src/Core/EntityType.vala

@@ -0,0 +1,138 @@
+/**
+ * EntityType - Enumeration of entity types in Implexus
+ *
+ * Defines the five types of entities in the database:
+ * - CONTAINER: Container for child entities
+ * - DOCUMENT: Properties-based document
+ * - CATEGORY: Expression-based auto-categorization
+ * - CATALOGUE: Key-based groupings of documents
+ * - INDEX: Text search with dynamic results
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Core {
+
+/**
+ * Entity type enumeration defining the kinds of entities in Implexus.
+ *
+ * Each type has different capabilities:
+ * - CONTAINER: Can have children of any type
+ * - DOCUMENT: Stores properties, cannot have children
+ * - CATEGORY: Virtual container based on expression matching
+ * - CATALOGUE: Virtual container based on key-based groupings
+ * - INDEX: Virtual container based on text search
+ */
+public enum EntityType {
+    /** Container entity that can hold other entities */
+    CONTAINER,
+    /** Document entity that stores properties */
+    DOCUMENT,
+    /** Virtual container that auto-categorizes documents by expression */
+    CATEGORY,
+    /** Virtual container that groups documents by key extracted from expression */
+    CATALOGUE,
+    /** Virtual container that provides text search results */
+    INDEX;
+    
+    /**
+     * Converts the entity type to a string representation.
+     *
+     * @return Lowercase string name of the entity type
+     */
+    public string to_string() {
+        switch (this) {
+            case CONTAINER: return "container";
+            case DOCUMENT: return "document";
+            case CATEGORY: return "category";
+            case CATALOGUE: return "catalogue";
+            case INDEX: return "index";
+            default: assert_not_reached();
+        }
+    }
+    
+    /**
+     * Parses a string to an EntityType.
+     *
+     * @param name String representation of the entity type (case-insensitive)
+     * @return The corresponding EntityType, or null if not found
+     */
+    public static EntityType? from_string(string name) {
+        switch (name.down()) {
+            case "container": return CONTAINER;
+            case "document": return DOCUMENT;
+            case "category": return CATEGORY;
+            case "catalogue": return CATALOGUE;
+            case "index": return INDEX;
+            default: return null;
+        }
+    }
+    
+    /**
+     * Determines if this entity type can have child entities.
+     *
+     * CONTAINER, CATEGORY, CATALOGUE, and INDEX can have children.
+     * DOCUMENT cannot have children.
+     *
+     * @return true if the entity type can have children
+     */
+    public bool can_have_children() {
+        switch (this) {
+            case CONTAINER:
+            case CATEGORY:
+            case CATALOGUE:
+            case INDEX:
+                return true;
+            case DOCUMENT:
+                return false;
+            default:
+                assert_not_reached();
+        }
+    }
+    
+    /**
+     * Determines if this entity type is virtual.
+     *
+     * Virtual entities (CATEGORY, CATALOGUE, INDEX) compute their children
+     * dynamically based on expressions rather than storing them.
+     *
+     * @return true if the entity type is virtual
+     */
+    public bool is_virtual() {
+        switch (this) {
+            case CATEGORY:
+            case CATALOGUE:
+            case INDEX:
+                return true;
+            case CONTAINER:
+            case DOCUMENT:
+                return false;
+            default:
+                assert_not_reached();
+        }
+    }
+    
+    /**
+     * Determines if this entity type can store properties.
+     *
+     * Only DOCUMENT can store properties directly.
+     *
+     * @return true if the entity type can store properties
+     */
+    public bool can_have_properties() {
+        return this == DOCUMENT;
+    }
+    
+    /**
+     * Determines if this entity type can be created by users.
+     *
+     * All entity types can be created by users.
+     *
+     * @return true if the entity type can be created
+     */
+    public bool is_creatable() {
+        return true;
+    }
+}
+
+} // namespace Implexus.Core

+ 134 - 0
src/Core/SafePath.vala

@@ -0,0 +1,134 @@
+/**
+ * SafePath - Factory class for creating URL-encoded EntityPath instances
+ * 
+ * SafePath provides a convenient, safe API for constructing entity paths
+ * from variadic segments with automatic URL encoding.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Core {
+
+/**
+ * Factory class for creating URL-encoded EntityPath instances.
+ * Provides a safe, convenient API for constructing paths from segments.
+ * 
+ * Example usage:
+ * {{{
+ * var path = SafePath.path("users", "john doe", "profile", null);
+ * // Creates EntityPath for /users/john%20doe/profile
+ * 
+ * var simple = SafePath.path("catalogue", null);  // Single segment
+ * var root = SafePath.path(null);  // Root path
+ * }}}
+ */
+public class SafePath : Object {
+    
+    /**
+     * Creates an EntityPath from variadic path segments.
+     * Each segment is URL-encoded to handle special characters safely.
+     * 
+     * Example:
+     * {{{
+     * var path = SafePath.path("users", "john doe", "profile", null);
+     * // Results in EntityPath for /users/john%20doe/profile
+     * }}}
+     * 
+     * @param first_segment The first path segment (null creates root path)
+     * @param ... Additional segments, null-terminated
+     * @return A new EntityPath with URL-encoded segments
+     */
+    public static EntityPath path(string? first_segment, ...) {
+        var segments = new Invercargill.DataStructures.Vector<string>();
+        
+        // If first segment is null, return root path
+        if (first_segment == null) {
+            return new EntityPath.root();
+        }
+        
+        // Add first segment (encoded)
+        segments.add(encode_segment(first_segment));
+        
+        // Process variadic arguments
+        va_list args = va_list();
+        while (true) {
+            string? segment = args.arg();
+            if (segment == null) {
+                break;
+            }
+            segments.add(encode_segment(segment));
+        }
+        
+        return new EntityPath.from_segments(segments.as_enumerable());
+    }
+    
+    /**
+     * Creates an EntityPath from an array of path segments.
+     * Each segment is URL-encoded to handle special characters safely.
+     * 
+     * Example:
+     * {{{
+     * string[] parts = { "users", "john doe", "profile" };
+     * var path = SafePath.from_array(parts);
+     * // Results in EntityPath for /users/john%20doe/profile
+     * }}}
+     * 
+     * @param segments Array of path segments
+     * @return A new EntityPath with URL-encoded segments
+     */
+    public static EntityPath from_array(string[] segments) {
+        // Empty array returns root path
+        if (segments.length == 0) {
+            return new EntityPath.root();
+        }
+        
+        var encoded_segments = new Invercargill.DataStructures.Vector<string>();
+        foreach (var segment in segments) {
+            encoded_segments.add(encode_segment(segment));
+        }
+        
+        return new EntityPath.from_segments(encoded_segments.as_enumerable());
+    }
+    
+    /**
+     * URL-encodes a path segment using percent-encoding.
+     * Uses GLib.Uri.escape_string() with appropriate reserved characters.
+     * 
+     * Encodes:
+     * - All reserved URI characters (/, ?, #, =, &, etc.)
+     * - Space as %20 (not +)
+     * - Non-ASCII characters as UTF-8 percent-encoded
+     * 
+     * @param segment The raw segment to encode
+     * @return The URL-encoded segment
+     */
+    private static string encode_segment(string segment) {
+        if (segment.length == 0) {
+            return "";
+        }
+        
+        // Use GLib's URI escaping with escape_reserved=true
+        // This encodes all reserved characters per RFC 3986
+        // Space is encoded as %20, not +
+        return Uri.escape_string(segment, "", true);
+    }
+    
+    /**
+     * Decodes a URL-encoded path segment.
+     * 
+     * @param encoded The encoded segment
+     * @return The decoded segment
+     * @throws EntityError.INVALID_PATH if the segment contains invalid percent-encoding
+     */
+    public static string decode_segment(string encoded) throws EntityError {
+        string? decoded = Uri.unescape_string(encoded);
+        if (decoded == null) {
+            throw new EntityError.INVALID_PATH(
+                "Invalid percent-encoding in path segment: %s".printf(encoded)
+            );
+        }
+        return decoded;
+    }
+}
+
+} // namespace Implexus.Core

+ 621 - 0
src/Engine/ConnectionString.vala

@@ -0,0 +1,621 @@
+/**
+ * ConnectionString - URI-style connection string parser for engine configuration
+ * 
+ * Provides a unified connection string format for configuring Implexus engines.
+ * Supports both embedded and remote modes with various backend options.
+ * 
+ * Connection String Formats:
+ * 
+ * Embedded Mode (short form):
+ *   lmdb:///var/lib/myapp/db
+ *   gdbm:///var/lib/myapp/db
+ *   filesystem:///var/lib/myapp/db
+ * 
+ * Embedded Mode (full form):
+ *   implexus://embedded?backend=lmdb&path=/var/lib/myapp/db
+ *   implexus://embedded?backend=gdbm&path=/var/lib/myapp/db&map_size=1024
+ * 
+ * Remote Mode:
+ *   implexus://server.example.com:9876
+ *   implexus://192.168.1.100:9876?timeout=30
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Engine {
+
+/**
+ * Error domain for connection string parsing errors.
+ */
+public errordomain ConnectionStringError {
+    /**
+     * The connection string format is invalid or malformed.
+     */
+    INVALID_FORMAT,
+    
+    /**
+     * An unknown backend type was specified.
+     */
+    UNKNOWN_BACKEND,
+    
+    /**
+     * A required parameter is missing from the connection string.
+     */
+    MISSING_PARAMETER,
+    
+    /**
+     * A parameter value is invalid (e.g., non-numeric port).
+     */
+    INVALID_PARAMETER
+}
+
+/**
+ * Parses and represents a connection string for engine configuration.
+ * 
+ * ConnectionString provides a convenient way to configure engines from
+ * configuration files or command-line arguments using a URI-style format.
+ * 
+ * Example usage:
+ * {{{
+ * // Parse a connection string
+ * var cs = new ConnectionString("lmdb:///var/lib/myapp/db");
+ * 
+ * // Create an engine directly
+ * var engine = cs.create_engine();
+ * 
+ * // Or use try_parse for safe parsing
+ * var cs = ConnectionString.try_parse(user_input);
+ * if (cs != null) {
+ *     var engine = cs.create_engine();
+ * }
+ * 
+ * // Serialize back to string
+ * string conn_str = cs.to_string();
+ * }}}
+ */
+public class ConnectionString : Object {
+    
+    // === Mode Detection ===
+    
+    /**
+     * Whether this connection string specifies remote mode.
+     * 
+     * If false, the connection is for embedded mode.
+     */
+    public bool is_remote { get; private set; }
+    
+    // === Remote Mode Settings ===
+    
+    /**
+     * Remote server hostname.
+     * 
+     * Only set when is_remote is true.
+     */
+    public string? host { get; private set; }
+    
+    /**
+     * Remote server port.
+     * 
+     * Only set when is_remote is true and port was specified.
+     * If null, the default port (9876) should be used.
+     */
+    public int? port { get; private set; }
+    
+    /**
+     * Connection timeout in seconds.
+     * 
+     * Only set when is_remote is true and timeout was specified.
+     * If null, the default timeout (30 seconds) should be used.
+     */
+    public int? timeout { get; private set; }
+    
+    // === Embedded Mode Settings ===
+    
+    /**
+     * Backend type for embedded mode.
+     * 
+     * Valid values: "lmdb", "gdbm", "filesystem"
+     * Only set when is_remote is false.
+     */
+    public string? backend { get; private set; }
+    
+    /**
+     * Filesystem path for database storage.
+     * 
+     * Only set when is_remote is false.
+     */
+    public string? path { get; private set; }
+    
+    // === Backend-Specific Options (LMDB) ===
+    
+    /**
+     * LMDB map size in megabytes.
+     * 
+     * Only applicable when backend is "lmdb".
+     * If null, the default (1024 MB = 1GB) should be used.
+     */
+    public int? map_size { get; private set; }
+    
+    // === Backend-Specific Options (General) ===
+    
+    /**
+     * Whether to enable caching.
+     * 
+     * Only applicable in embedded mode.
+     */
+    public bool? enable_cache { get; private set; }
+    
+    /**
+     * Cache size (number of entities).
+     * 
+     * Only applicable in embedded mode when caching is enabled.
+     */
+    public int? cache_size { get; private set; }
+    
+    // === Valid Backends ===
+    
+    private const string[] VALID_BACKENDS = { "lmdb", "gdbm", "filesystem" };
+    
+    /**
+     * Creates a new ConnectionString by parsing the given string.
+     * 
+     * @param connection_string The connection string to parse
+     * @throws ConnectionStringError if the string is invalid
+     */
+    public ConnectionString(string connection_string) throws ConnectionStringError {
+        parse(connection_string);
+    }
+    
+    /**
+     * Attempts to parse a connection string without throwing exceptions.
+     * 
+     * This is useful when parsing user input where invalid strings are expected.
+     * 
+     * @param s The connection string to parse
+     * @return A ConnectionString if parsing succeeds, or null if it fails
+     */
+    public static ConnectionString? try_parse(string s) {
+        try {
+            return new ConnectionString(s);
+        } catch (Error e) {
+            return null;
+        }
+    }
+    
+    /**
+     * Parses the connection string and populates properties.
+     */
+    private void parse(string s) throws ConnectionStringError {
+        // Handle empty or null input
+        if (s == null || s.strip() == "") {
+            throw new ConnectionStringError.INVALID_FORMAT("Connection string cannot be empty");
+        }
+        
+        string input = s.strip();
+        
+        // Try short form first: backend:///path
+        if (parse_short_form(input)) {
+            return;
+        }
+        
+        // Try full form: implexus://host[:port][?params]
+        if (parse_full_form(input)) {
+            return;
+        }
+        
+        throw new ConnectionStringError.INVALID_FORMAT(
+            "Invalid connection string format: %s".printf(input)
+        );
+    }
+    
+    /**
+     * Parses short form connection strings like: lmdb:///path/to/db
+     */
+    private bool parse_short_form(string input) throws ConnectionStringError {
+        // Check for valid backend schemes
+        foreach (string backend in VALID_BACKENDS) {
+            string prefix = backend + "://";
+            if (input.has_prefix(prefix)) {
+                string path = input.substring(prefix.length);
+                
+                if (path.length == 0) {
+                    throw new ConnectionStringError.MISSING_PARAMETER(
+                        "Path is required for embedded mode: %s".printf(input)
+                    );
+                }
+                
+                // Validate path starts with / (absolute) or is a valid relative path
+                if (!path.has_prefix("/") && !is_valid_relative_path(path)) {
+                    throw new ConnectionStringError.INVALID_PARAMETER(
+                        "Invalid path: %s".printf(path)
+                    );
+                }
+                
+                this.is_remote = false;
+                this.backend = backend;
+                this.path = path;
+                return true;
+            }
+        }
+        
+        return false;
+    }
+    
+    /**
+     * Parses full form connection strings like: implexus://host:port?params
+     */
+    private bool parse_full_form(string input) throws ConnectionStringError {
+        string prefix = "implexus://";
+        if (!input.has_prefix(prefix)) {
+            return false;
+        }
+        
+        string remainder = input.substring(prefix.length);
+        
+        // Split host[:port] from query string
+        string host_part;
+        string? query = null;
+        
+        int query_pos = remainder.index_of("?");
+        if (query_pos >= 0) {
+            host_part = remainder.substring(0, query_pos);
+            query = remainder.substring(query_pos + 1);
+        } else {
+            host_part = remainder;
+        }
+        
+        // Parse host and port
+        string host;
+        int? port = null;
+        
+        // Handle IPv6 addresses [::1]:port
+        if (host_part.has_prefix("[")) {
+            int bracket_pos = host_part.index_of("]");
+            if (bracket_pos < 0) {
+                throw new ConnectionStringError.INVALID_FORMAT(
+                    "Invalid IPv6 address format: %s".printf(host_part)
+                );
+            }
+            host = host_part.substring(1, bracket_pos - 1);
+            if (bracket_pos + 1 < host_part.length && host_part[bracket_pos + 1] == ':') {
+                string port_str = host_part.substring(bracket_pos + 2);
+                port = parse_port(port_str, input);
+            }
+        } else {
+            // Handle host:port or just host
+            int colon_pos = host_part.last_index_of(":");
+            if (colon_pos > 0) {
+                // Check if this is actually an IPv6 address without brackets
+                int colon_count = 0;
+                foreach (unowned char c in host_part.to_utf8()) {
+                    if (c == ':') colon_count++;
+                }
+                if (colon_count == 1) {
+                    // host:port format
+                    host = host_part.substring(0, colon_pos);
+                    string port_str = host_part.substring(colon_pos + 1);
+                    port = parse_port(port_str, input);
+                } else {
+                    // IPv6 address without port
+                    host = host_part;
+                }
+            } else {
+                host = host_part;
+            }
+        }
+        
+        if (host.length == 0) {
+            throw new ConnectionStringError.MISSING_PARAMETER(
+                "Host is required: %s".printf(input)
+            );
+        }
+        
+        // Parse query parameters
+        var params = parse_query_string(query);
+        
+        // Determine if this is remote or embedded mode
+        // "embedded" as host with backend parameter = embedded mode
+        // anything else = remote mode
+        bool is_embedded_host = host == "embedded";
+        string? backend_param = params.get("backend");
+        
+        if (is_embedded_host && backend_param != null) {
+            // Embedded mode via full form
+            string backend_value = (!) backend_param;
+            
+            // Validate backend
+            if (!is_valid_backend(backend_value)) {
+                throw new ConnectionStringError.UNKNOWN_BACKEND(
+                    "Unknown backend: %s. Valid backends: lmdb, gdbm, filesystem".printf(backend_value)
+                );
+            }
+            
+            string? path_param = params.get("path");
+            if (path_param == null) {
+                throw new ConnectionStringError.MISSING_PARAMETER(
+                    "Path is required for embedded mode: %s".printf(input)
+                );
+            }
+            
+            this.is_remote = false;
+            this.backend = backend_value;
+            this.path = (!) path_param;
+            
+            // Parse backend-specific options
+            parse_backend_options(params);
+        } else {
+            // Remote mode
+            this.is_remote = true;
+            this.host = host;
+            this.port = port;
+            
+            // Parse remote-specific options
+            string? timeout_param = params.get("timeout");
+            if (timeout_param != null) {
+                this.timeout = parse_int_param("timeout", (!) timeout_param, input);
+            }
+        }
+        
+        return true;
+    }
+    
+    /**
+     * Parses a port string and validates it.
+     */
+    private int parse_port(string port_str, string input) throws ConnectionStringError {
+        int port = parse_int_param("port", port_str, input);
+        if (port < 1 || port > 65535) {
+            throw new ConnectionStringError.INVALID_PARAMETER(
+                "Port must be between 1 and 65535: %d".printf(port)
+            );
+        }
+        return port;
+    }
+    
+    /**
+     * Parses an integer parameter with validation.
+     */
+    private int parse_int_param(string name, string value, string input) throws ConnectionStringError {
+        int64 result;
+        if (!int64.try_parse(value, out result)) {
+            throw new ConnectionStringError.INVALID_PARAMETER(
+                "Invalid %s value: %s".printf(name, value)
+            );
+        }
+        return (int) result;
+    }
+    
+    /**
+     * Parses backend-specific options from query parameters.
+     */
+    private void parse_backend_options(HashTable<string, string> params) throws ConnectionStringError {
+        // LMDB-specific options
+        string? map_size_param = params.get("map_size");
+        if (map_size_param != null) {
+            this.map_size = parse_int_param("map_size", (!) map_size_param, "");
+            if (this.map_size <= 0) {
+                throw new ConnectionStringError.INVALID_PARAMETER(
+                    "map_size must be positive: %d".printf(this.map_size)
+                );
+            }
+        }
+        
+        // Cache options
+        string? enable_cache_param = params.get("enable_cache");
+        if (enable_cache_param != null) {
+            this.enable_cache = parse_bool_param("enable_cache", (!) enable_cache_param);
+        }
+        
+        string? cache_size_param = params.get("cache_size");
+        if (cache_size_param != null) {
+            this.cache_size = parse_int_param("cache_size", (!) cache_size_param, "");
+            if (this.cache_size <= 0) {
+                throw new ConnectionStringError.INVALID_PARAMETER(
+                    "cache_size must be positive: %d".printf(this.cache_size)
+                );
+            }
+        }
+    }
+    
+    /**
+     * Parses a boolean parameter.
+     */
+    private bool parse_bool_param(string name, string value) throws ConnectionStringError {
+        string lower = value.down();
+        if (lower == "true" || lower == "1" || lower == "yes" || lower == "on") {
+            return true;
+        } else if (lower == "false" || lower == "0" || lower == "no" || lower == "off") {
+            return false;
+        } else {
+            throw new ConnectionStringError.INVALID_PARAMETER(
+                "Invalid %s value: %s (expected true/false)".printf(name, value)
+            );
+        }
+    }
+    
+    /**
+     * Parses a query string into a hash table.
+     */
+    private HashTable<string, string> parse_query_string(string? query) {
+        var params = new HashTable<string, string>(str_hash, str_equal);
+        
+        if (query == null || query.length == 0) {
+            return params;
+        }
+        
+        // Split by &
+        string[] pairs = query.split("&");
+        foreach (string pair in pairs) {
+            string trimmed = pair.strip();
+            if (trimmed.length == 0) continue;
+            
+            int eq_pos = trimmed.index_of("=");
+            if (eq_pos > 0) {
+                string key = trimmed.substring(0, eq_pos);
+                string value = Uri.unescape_string(trimmed.substring(eq_pos + 1));
+                if (value == null) {
+                    value = trimmed.substring(eq_pos + 1);
+                }
+                params.insert(key, value);
+            } else {
+                // Flag parameter without value
+                params.insert(trimmed, "true");
+            }
+        }
+        
+        return params;
+    }
+    
+    /**
+     * Checks if a backend name is valid.
+     */
+    private bool is_valid_backend(string backend) {
+        foreach (string valid in VALID_BACKENDS) {
+            if (valid == backend.down()) {
+                return true;
+            }
+        }
+        return false;
+    }
+    
+    /**
+     * Checks if a path is a valid relative path.
+     */
+    private bool is_valid_relative_path(string path) {
+        // Relative paths should not be empty and should not contain ..
+        if (path.length == 0) return false;
+        if (path.contains("..")) return false;
+        // Should start with alphanumeric or ./
+        return path[0].isalnum() || path.has_prefix("./");
+    }
+    
+    /**
+     * Creates an engine from this connection string.
+     * 
+     * This is a convenience method that creates the appropriate engine type
+     * based on the connection string parameters.
+     * 
+     * @return A new Engine instance
+     * @throws Core.EngineError if engine creation fails
+     */
+    public Core.Engine create_engine() throws Core.EngineError {
+        if (is_remote) {
+            // Create remote engine
+            string host = this.host ?? "localhost";
+            uint16 port = (uint16) (this.port ?? 9876);
+            return EngineFactory.create_remote(host, port);
+        } else {
+            // Create embedded engine
+            var config = new EngineConfiguration();
+            config.mode = EngineMode.EMBEDDED;
+            config.storage_path = this.path ?? "./data";
+            
+            // Pass the backend selection to the configuration
+            config.storage_type = this.backend ?? "filesystem";
+            
+            // Apply LMDB-specific options
+            if (map_size != null) {
+                config.lmdb_map_size = (int64) ((!) map_size) * 1024 * 1024; // Convert MB to bytes
+            }
+            
+            // Apply cache options
+            if (enable_cache != null) {
+                config.enable_cache = (!) enable_cache;
+            }
+            if (cache_size != null) {
+                config.cache_size = (!) cache_size;
+            }
+            
+            return EngineFactory.create(config);
+        }
+    }
+    
+    /**
+     * Creates an EngineConfiguration from this connection string.
+     * 
+     * This is useful when you need to modify the configuration before
+     * creating the engine.
+     * 
+     * @return An EngineConfiguration based on this connection string
+     */
+    public EngineConfiguration to_configuration() {
+        var config = new EngineConfiguration();
+        
+        if (is_remote) {
+            config.mode = EngineMode.REMOTE;
+            config.host = this.host ?? "localhost";
+            config.port = (uint16) (this.port ?? 9876);
+            if (timeout != null) {
+                config.timeout_ms = (uint) (timeout * 1000);
+            }
+        } else {
+            config.mode = EngineMode.EMBEDDED;
+            config.storage_path = this.path ?? "./data";
+            
+            if (enable_cache != null) {
+                config.enable_cache = (!) enable_cache;
+            }
+            if (cache_size != null) {
+                config.cache_size = (!) cache_size;
+            }
+        }
+        
+        return config;
+    }
+    
+    /**
+     * Serializes this connection string back to string format.
+     * 
+     * @return A connection string that can be parsed back
+     */
+    public string to_connection_string() {
+        if (is_remote) {
+            var sb = new StringBuilder("implexus://");
+            sb.append(host ?? "localhost");
+            
+            if (port != null) {
+                sb.append(":");
+                sb.append(port.to_string());
+            }
+            
+            bool has_params = timeout != null;
+            if (has_params) {
+                sb.append("?");
+                if (timeout != null) {
+                    sb.append("timeout=");
+                    sb.append(timeout.to_string());
+                }
+            }
+            
+            return sb.str;
+        } else {
+            // Use short form for embedded mode
+            var sb = new StringBuilder(backend ?? "filesystem");
+            sb.append("://");
+            sb.append(path ?? "./data");
+            
+            return sb.str;
+        }
+    }
+    
+    /**
+     * Returns a human-readable description of this connection string.
+     * 
+     * @return A description string
+     */
+    public string describe() {
+        if (is_remote) {
+            return "ConnectionString(REMOTE, host=%s, port=%d)".printf(
+                host ?? "localhost",
+                port ?? 9876
+            );
+        } else {
+            return "ConnectionString(EMBEDDED, backend=%s, path=%s)".printf(
+                backend ?? "filesystem",
+                path ?? "./data"
+            );
+        }
+    }
+}
+
+} // namespace Implexus.Engine

+ 1323 - 0
src/Engine/EmbeddedEngine.vala

@@ -0,0 +1,1323 @@
+/**
+ * EmbeddedEngine - Embedded database engine implementation
+ * 
+ * Provides direct in-process database operations without requiring
+ * a separate server process. All I/O operations are async and go
+ * through the AsyncDbmQueue for serialized access.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Engine {
+
+/**
+ * Embedded database engine implementation.
+ * 
+ * The EmbeddedEngine provides direct in-process access to the database.
+ * It manages storage, entity creation, queries, and transactions.
+ * All I/O operations are async and use the AsyncDbmQueue for proper
+ * thread safety and priority-based scheduling.
+ * 
+ * Example usage:
+ * {{{
+ * var storage = new Storage.BasicStorage.with_directory("/path/to/data");
+ * var config = new Core.StorageConfiguration(storage);
+ * var engine = new EmbeddedEngine(config);
+ * 
+ * // All I/O is async
+ * var root = yield engine.get_root_async();
+ * var users = yield root.create_container_async("users");
+ * var john = yield users.create_document_async("john", "User");
+ * yield john.set_entity_property_async("email", new Invercargill.NativeElement<string>("john@example.com"));
+ * 
+ * // Query users
+ * foreach (var user in yield engine.query_by_type_async("User")) {
+ *     print("%s\n", user.name);
+ * }
+ * }}}
+ */
+public class EmbeddedEngine : Object, Core.Engine {
+    
+    // === Private Fields ===
+    
+    /**
+     * The underlying Dbm storage backend.
+     */
+    private Storage.Dbm _dbm;
+    
+    /**
+     * The async queue for DBM operations.
+     */
+    private Storage.AsyncDbmQueue _queue;
+    
+    /**
+     * The storage backend (deprecated, kept for backward compatibility).
+     */
+    private Storage.Storage _storage;
+    
+    /**
+     * The root container entity.
+     */
+    private Entities.Container? _root = null;
+    
+    /**
+     * The storage configuration.
+     */
+    private Core.StorageConfiguration _configuration;
+    
+    /**
+     * The hook manager for entity change notifications.
+     */
+    private HookManager _hook_manager;
+    
+    /**
+     * Whether a transaction is currently active.
+     */
+    private bool _in_transaction = false;
+    
+    /**
+     * The current active transaction.
+     */
+    private weak EmbeddedTransaction? _current_transaction = null;
+    
+    /**
+     * Internal accessor for the current transaction.
+     * Used by entities to queue operations when in a transaction.
+     */
+    internal EmbeddedTransaction? current_transaction { get { return _current_transaction; } }
+    
+    // === New High-Level Stores ===
+    
+    /**
+     * Entity metadata and type index store.
+     */
+    private Storage.HighLevel.EntityStore _entity_store;
+    
+    /**
+     * Document properties store.
+     */
+    private Storage.HighLevel.DocumentStore _document_store;
+    
+    /**
+     * Container children store.
+     */
+    private Storage.HighLevel.ContainerStore _container_store;
+    
+    /**
+     * Category configuration and index store.
+     */
+    private Storage.HighLevel.CategoryStore _category_store;
+    
+    /**
+     * Catalogue configuration and index store.
+     */
+    private Storage.HighLevel.CatalogueStore _catalogue_store;
+    
+    /**
+     * Text index store.
+     */
+    private Storage.HighLevel.IndexStore _index_store;
+    
+    // === Public Store Accessors ===
+    
+    /**
+     * Access to the entity metadata and type index store.
+     */
+    public Storage.HighLevel.EntityStore entity_store { get { return _entity_store; } }
+    
+    /**
+     * Access to the document properties store.
+     */
+    public Storage.HighLevel.DocumentStore document_store { get { return _document_store; } }
+    
+    /**
+     * Access to the container children store.
+     */
+    public Storage.HighLevel.ContainerStore container_store { get { return _container_store; } }
+    
+    /**
+     * Access to the category configuration and index store.
+     */
+    public Storage.HighLevel.CategoryStore category_store { get { return _category_store; } }
+    
+    /**
+     * Access to the catalogue configuration and index store.
+     */
+    public Storage.HighLevel.CatalogueStore catalogue_store { get { return _catalogue_store; } }
+    
+    /**
+     * Access to the text index store.
+     */
+    public Storage.HighLevel.IndexStore index_store { get { return _index_store; } }
+    
+    /**
+     * Access to the hook manager.
+     */
+    public HookManager hook_manager { get { return _hook_manager; } }
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new EmbeddedEngine with the given configuration.
+     *
+     * @param config The storage configuration
+     */
+    public EmbeddedEngine(Core.StorageConfiguration config) {
+        _configuration = config;
+        _storage = config.storage;
+        initialize_managers();
+    }
+    
+    /**
+     * Creates a new EmbeddedEngine with a file-based storage.
+     *
+     * This is a convenience constructor that creates a BasicStorage
+     * with the given directory path.
+     *
+     * @param storage_path The directory path for storage
+     */
+    public EmbeddedEngine.with_path(string storage_path) {
+        _storage = new Storage.BasicStorage.with_directory(storage_path);
+        _configuration = new Core.StorageConfiguration(_storage);
+        _configuration.storage_path = storage_path;
+        initialize_managers();
+    }
+    
+    /**
+     * Initializes the HookManager, AsyncDbmQueue, and HighLevel stores.
+     */
+    private void initialize_managers() {
+        // Get Dbm from storage for stores
+        var basic_storage = (_storage as Storage.BasicStorage);
+        if (basic_storage != null) {
+            _dbm = basic_storage.dbm;
+            
+            // Create and start the async queue
+            _queue = new Storage.AsyncDbmQueue(_dbm);
+            _queue.start();
+        }
+        
+        // Initialize hook manager
+        _hook_manager = new HookManager();
+        _hook_manager.engine = this;  // Set engine reference for batch execution
+        _configuration.hook_manager = _hook_manager;
+        
+        // Initialize new high-level stores
+        initialize_stores();
+        
+        // Connect engine signals to hook manager
+        this.entity_created.connect(on_entity_created);
+        this.entity_modified.connect(on_entity_modified);
+        this.entity_deleted.connect(on_entity_deleted);
+    }
+    
+    /**
+     * Initializes the new high-level stores.
+     */
+    private void initialize_stores() {
+        if (_dbm != null) {
+            _entity_store = new Storage.HighLevel.EntityStore(_dbm);
+            _document_store = new Storage.HighLevel.DocumentStore(_dbm);
+            _container_store = new Storage.HighLevel.ContainerStore(_dbm);
+            _category_store = new Storage.HighLevel.CategoryStore(_dbm);
+            _catalogue_store = new Storage.HighLevel.CatalogueStore(_dbm);
+            _index_store = new Storage.HighLevel.IndexStore(_dbm);
+        }
+    }
+    
+    /**
+     * Handles entity creation events and notifies hooks.
+     */
+    private void on_entity_created(Core.Entity entity) {
+        _hook_manager.notify_entity_change(entity, EntityChangeType.CREATED);
+    }
+    
+    /**
+     * Handles entity modification events and notifies hooks.
+     */
+    private void on_entity_modified(Core.Entity entity) {
+        _hook_manager.notify_entity_change(entity, EntityChangeType.MODIFIED);
+    }
+    
+    /**
+     * Handles entity deletion events and notifies hooks.
+     */
+    private void on_entity_deleted(Core.EntityPath path) {
+        // Note: We need the type_label for hooks but we don't have it here
+        // The hook handlers will need to check if the path is in their index
+        // This is a limitation of the current design
+    }
+    
+    // === Engine Interface - Root Access ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Gets the root container, creating it if necessary.
+     */
+    public async Core.Entity get_root_async() throws Core.EngineError {
+        if (_root != null) {
+            return (!) _root;
+        }
+        
+        var root_path = new Core.EntityPath.root();
+        
+        // Check if root exists in storage via queue
+        bool exists = yield _entity_exists_async_internal(root_path);
+        if (!exists) {
+            try {
+                yield _store_entity_metadata_async_internal(root_path, Core.EntityType.CONTAINER, null);
+            } catch (Storage.StorageError e) {
+                throw new Core.EngineError.STORAGE_ERROR("Failed to create root: %s".printf(e.message));
+            }
+        }
+        
+        _root = new Entities.Container(this, root_path);
+        return (!) _root;
+    }
+    
+    // === Engine Interface - Path-Based Access ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public async Core.Entity? get_entity_async(Core.EntityPath path) throws Core.EngineError {
+        // First: Try direct entity lookup
+        bool exists = yield _entity_exists_async_internal(path);
+        if (exists) {
+            return yield _create_entity_from_storage_async(path);
+        }
+        
+        // Second: Try virtual child resolution
+        var virtual_entity = yield _try_resolve_virtual_child_async(path);
+        if (virtual_entity != null) {
+            return (!) virtual_entity;
+        }
+        
+        // Not found anywhere
+        throw new Core.EngineError.ENTITY_NOT_FOUND(
+            "Entity not found: %s".printf(path.to_string())
+        );
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public async Core.Entity? get_entity_or_null_async(Core.EntityPath path) throws Core.EngineError {
+        try {
+            return yield get_entity_async(path);
+        } catch (Core.EngineError e) {
+            return null;
+        }
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public async bool entity_exists_async(Core.EntityPath path) throws Core.EngineError {
+        // First: Check persistent storage
+        bool exists = yield _entity_exists_async_internal(path);
+        if (exists) {
+            return true;
+        }
+        
+        // Second: Check if it's a virtual child
+        var virtual_entity = yield _try_resolve_virtual_child_async(path);
+        return virtual_entity != null;
+    }
+    
+    // === Engine Interface - Query Operations ===
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns all documents with the specified type_label.
+     */
+    public async Core.Entity[] query_by_type_async(string type_label) throws Core.EngineError {
+        var results = new Invercargill.DataStructures.Vector<Core.Entity>();
+        
+        // Get root and recursively search for documents with matching type_label
+        var root = yield get_root_async();
+        yield _search_by_type_async(root, type_label, results);
+        
+        // Convert to array
+        var array = new Core.Entity[results.length];
+        int i = 0;
+        foreach (var entity in results) {
+            array[i++] = entity;
+        }
+        
+        return array;
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns all documents with the specified type_label that match the expression.
+     */
+    public async Core.Entity[] query_by_expression_async(
+        string type_label, 
+        string expression
+    ) throws Core.EngineError {
+        var results = new Invercargill.DataStructures.Vector<Core.Entity>();
+        
+        // Get all documents of the type
+        var all_of_type = yield query_by_type_async(type_label);
+        
+        foreach (var entity in all_of_type) {
+            try {
+                // Evaluate the expression on the document
+                if (yield _evaluate_expression_async(entity, expression)) {
+                    results.add(entity);
+                }
+            } catch (Core.EngineError e) {
+                // Skip entities that fail expression evaluation
+            }
+        }
+        
+        // Convert to array
+        var array = new Core.Entity[results.length];
+        int i = 0;
+        foreach (var entity in results) {
+            array[i++] = entity;
+        }
+        
+        return array;
+    }
+    
+    // === Engine Interface - Transactions ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public async Core.Transaction begin_transaction_async() throws Core.EngineError {
+        if (_in_transaction) {
+            throw new Core.EngineError.TRANSACTION_ERROR(
+                "A transaction is already active"
+            );
+        }
+        
+        Storage.StorageError? error = null;
+        
+        // Begin Dbm transaction via queue
+        var op = _queue.queue_write();
+        op.callback = begin_transaction_async.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                try {
+                    _dbm.begin_transaction();
+                    op.error = null;
+                } catch (Storage.StorageError e) {
+                    op.error = e;
+                }
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            throw new Core.EngineError.TRANSACTION_ERROR(
+                "Failed to create transaction thread: %s".printf(e.message)
+            );
+        }
+        
+        yield;
+        
+        if (op.error != null) {
+            throw new Core.EngineError.TRANSACTION_ERROR(
+                "Failed to begin transaction: %s".printf(op.error.message)
+            );
+        }
+        
+        _in_transaction = true;
+        
+        var tx = new EmbeddedTransaction(this);
+        _current_transaction = tx;
+        return tx;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public async void commit_async() throws Core.EngineError {
+        if (!_in_transaction) {
+            throw new Core.EngineError.TRANSACTION_ERROR("No active transaction to commit");
+        }
+        
+        // Commit Dbm transaction via queue
+        var op = _queue.queue_write();
+        op.callback = commit_async.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                try {
+                    _dbm.commit_transaction();
+                    op.error = null;
+                } catch (Storage.StorageError e) {
+                    op.error = e;
+                }
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            throw new Core.EngineError.TRANSACTION_ERROR(
+                "Failed to create commit thread: %s".printf(e.message)
+            );
+        }
+        
+        yield;
+        
+        if (op.error != null) {
+            throw new Core.EngineError.TRANSACTION_ERROR(
+                "Failed to commit transaction: %s".printf(op.error.message)
+            );
+        }
+        
+        _in_transaction = false;
+        _current_transaction = null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public async void rollback_async() {
+        if (!_in_transaction) {
+            return;
+        }
+        
+        // Rollback Dbm transaction via queue
+        var op = _queue.queue_write();
+        op.callback = rollback_async.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                _dbm.rollback_transaction();
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            // Fall through - we'll still reset transaction state
+        }
+        
+        yield;
+        
+        _in_transaction = false;
+        _current_transaction = null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public bool in_transaction { 
+        get { return _in_transaction; } 
+    }
+    
+    // === Engine Interface - Configuration ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public Core.StorageConfiguration configuration { 
+        owned get { return _configuration; } 
+    }
+    
+    // === Internal Sync Methods for Hook Use ===
+    //
+    // These methods are used by HookManager and run in the DBM thread.
+    // They bypass the async queue and access storage directly.
+    
+    /**
+     * Gets an entity by path synchronously (for hook use).
+     *
+     * This method bypasses the async queue and should only be called
+     * from within a hook callback, which runs in the DBM thread.
+     *
+     * @param path The entity path
+     * @return The entity, or null if not found
+     */
+    internal Core.Entity? get_entity_or_null_sync(Core.EntityPath path) {
+        // First: Try direct lookup
+        var entity = _get_entity_or_null_sync_internal(path);
+        if (entity != null) {
+            return entity;
+        }
+        
+        // Second: Try virtual child resolution (sync)
+        return _try_resolve_virtual_child_sync(path);
+    }
+    
+    /**
+     * Checks if an entity exists synchronously (for hook use).
+     *
+     * @param path The entity path
+     * @return true if the entity exists
+     */
+    internal bool entity_exists_sync(Core.EntityPath path) {
+        return _storage.entity_exists(path);
+    }
+    
+    /**
+     * Gets all child names synchronously (for hook use).
+     *
+     * @param parent_path The parent container path
+     * @return Enumerable of child names
+     */
+    internal Invercargill.Enumerable<string> get_child_names_sync(Core.EntityPath parent_path) {
+        return _container_store.get_children(parent_path);
+    }
+    
+    /**
+     * Gets the type index for a category synchronously (for hook use).
+     *
+     * @param category_path The category path
+     * @return Enumerable of entity paths
+     */
+    internal Invercargill.Enumerable<string> get_type_index_sync(Core.EntityPath category_path) {
+        return _category_store.get_members(category_path);
+    }
+    
+    // === Private Async Implementation Methods ===
+    
+    /**
+     * Internal async method to check if entity exists.
+     */
+    private async bool _entity_exists_async_internal(Core.EntityPath path) throws Core.EngineError {
+        bool result = false;
+        Error? error = null;
+        
+        var op = _queue.queue_read();
+        op.callback = _entity_exists_async_internal.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                result = _storage.entity_exists(path);
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            throw new Core.EngineError.STORAGE_ERROR("Failed to create read thread: %s".printf(e.message));
+        }
+        
+        yield;
+        
+        if (error != null) {
+            throw new Core.EngineError.STORAGE_ERROR(error.message);
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Internal async method to store entity metadata.
+     */
+    private async void _store_entity_metadata_async_internal(
+        Core.EntityPath path, 
+        Core.EntityType type, 
+        string? type_label
+    ) throws Storage.StorageError {
+        var op = _queue.queue_write();
+        op.callback = _store_entity_metadata_async_internal.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                try {
+                    _storage.store_entity_metadata(path, type, type_label);
+                    op.error = null;
+                } catch (Storage.StorageError e) {
+                    op.error = e;
+                }
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            throw new Storage.StorageError.IO_ERROR("Failed to create write thread: %s".printf(e.message));
+        }
+        
+        yield;
+        
+        if (op.error != null) {
+            throw (Storage.StorageError) op.error;
+        }
+    }
+    
+    /**
+     * Internal async method to get entity type.
+     */
+    private async Core.EntityType? _get_entity_type_async_internal(Core.EntityPath path) throws Storage.StorageError {
+        Core.EntityType? result = null;
+        Error? error = null;
+        
+        var op = _queue.queue_read();
+        op.callback = _get_entity_type_async_internal.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                try {
+                    result = _storage.get_entity_type(path);
+                } catch (Storage.StorageError e) {
+                    error = e;
+                }
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            throw new Storage.StorageError.IO_ERROR("Failed to create read thread: %s".printf(e.message));
+        }
+        
+        yield;
+        
+        if (error != null) {
+            throw (Storage.StorageError) error;
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Internal async method to get entity type label.
+     */
+    private async string? _get_entity_type_label_async_internal(Core.EntityPath path) throws Storage.StorageError {
+        string? result = null;
+        Error? error = null;
+        
+        var op = _queue.queue_read();
+        op.callback = _get_entity_type_label_async_internal.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                try {
+                    result = _storage.get_entity_type_label(path);
+                } catch (Storage.StorageError e) {
+                    error = e;
+                }
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            throw new Storage.StorageError.IO_ERROR("Failed to create read thread: %s".printf(e.message));
+        }
+        
+        yield;
+        
+        if (error != null) {
+            throw (Storage.StorageError) error;
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Internal async method to get category config.
+     */
+    private async Storage.CategoryConfig? _get_category_config_async_internal(Core.EntityPath path) throws Storage.StorageError {
+        Storage.CategoryConfig? result = null;
+        Error? error = null;
+        
+        var op = _queue.queue_read();
+        op.callback = _get_category_config_async_internal.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                try {
+                    result = _storage.get_category_config(path);
+                } catch (Storage.StorageError e) {
+                    error = e;
+                }
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            throw new Storage.StorageError.IO_ERROR("Failed to create read thread: %s".printf(e.message));
+        }
+        
+        yield;
+        
+        if (error != null) {
+            throw (Storage.StorageError) error;
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Internal async method to get catalogue config.
+     */
+    private async Storage.CatalogueConfig? _get_catalogue_config_async_internal(Core.EntityPath path) throws Storage.StorageError {
+        Storage.CatalogueConfig? result = null;
+        Error? error = null;
+        
+        var op = _queue.queue_read();
+        op.callback = _get_catalogue_config_async_internal.callback;
+        
+        try {
+            new Thread<void>.try(null, () => {
+                try {
+                    result = _storage.get_catalogue_config(path);
+                } catch (Storage.StorageError e) {
+                    error = e;
+                }
+                Idle.add(() => {
+                    op.callback();
+                    return Source.REMOVE;
+                });
+            });
+        } catch (ThreadError e) {
+            throw new Storage.StorageError.IO_ERROR("Failed to create read thread: %s".printf(e.message));
+        }
+        
+        yield;
+        
+        if (error != null) {
+            throw (Storage.StorageError) error;
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Creates an entity instance from storage metadata (async version).
+     * 
+     * @param path The entity path
+     * @return The entity instance, or null if not found
+     */
+    private async Core.Entity? _create_entity_from_storage_async(Core.EntityPath path) throws Core.EngineError {
+        try {
+            var entity_type = yield _get_entity_type_async_internal(path);
+            if (entity_type == null) {
+                return null;
+            }
+            
+            var type_label = yield _get_entity_type_label_async_internal(path);
+            
+            switch ((!) entity_type) {
+                case Core.EntityType.CONTAINER:
+                    return new Entities.Container(this, path);
+                    
+                case Core.EntityType.DOCUMENT:
+                    return new Entities.Document(this, path, type_label ?? "");
+                    
+                case Core.EntityType.CATEGORY:
+                    var config = yield _get_category_config_async_internal(path);
+                    Entities.Category category;
+                    if (config != null) {
+                        category = new Entities.Category(
+                            this, path,
+                            ((!) config).type_label,
+                            ((!) config).expression
+                        );
+                    } else {
+                        category = new Entities.Category(this, path, "", "");
+                    }
+                    // Register with hook manager for change notifications
+                    category.register_hooks();
+                    return category;
+                    
+                case Core.EntityType.CATALOGUE:
+                    var cat_config = yield _get_catalogue_config_async_internal(path);
+                    Entities.Catalogue catalogue;
+                    if (cat_config != null) {
+                        catalogue = new Entities.Catalogue(
+                            this, path,
+                            ((!) cat_config).type_label,
+                            ((!) cat_config).expression
+                        );
+                    } else {
+                        catalogue = new Entities.Catalogue(this, path, "", "");
+                    }
+                    // Register with hook manager for change notifications
+                    catalogue.register_hooks();
+                    return catalogue;
+                    
+                case Core.EntityType.INDEX:
+                    var config = yield _get_category_config_async_internal(path);
+                    if (config != null) {
+                        return new Entities.Index(
+                            this, path,
+                            ((!) config).type_label,
+                            ((!) config).expression
+                        );
+                    }
+                    return new Entities.Index(this, path, "", "");
+                    
+                default:
+                    throw new Core.EngineError.STORAGE_ERROR(
+                        "Unknown entity type: %s".printf(((!) entity_type).to_string())
+                    );
+            }
+        } catch (Storage.StorageError e) {
+            throw new Core.EngineError.STORAGE_ERROR(
+                "Failed to load entity: %s".printf(e.message)
+            );
+        }
+    }
+    
+    // === Private Sync Implementation Methods (for hooks) ===
+    
+    /**
+     * Synchronous entity retrieval for hook use.
+     */
+    private Core.Entity? _get_entity_or_null_sync_internal(Core.EntityPath path) {
+        if (!_storage.entity_exists(path)) {
+            return null;
+        }
+        
+        return _create_entity_from_storage_sync(path);
+    }
+    
+    /**
+     * Creates an entity instance from storage metadata (sync version for hooks).
+     */
+    private Core.Entity? _create_entity_from_storage_sync(Core.EntityPath path) {
+        try {
+            var entity_type = _storage.get_entity_type(path);
+            if (entity_type == null) {
+                return null;
+            }
+            
+            var type_label = _storage.get_entity_type_label(path);
+            
+            switch ((!) entity_type) {
+                case Core.EntityType.CONTAINER:
+                    return new Entities.Container(this, path);
+                    
+                case Core.EntityType.DOCUMENT:
+                    return new Entities.Document(this, path, type_label ?? "");
+                    
+                case Core.EntityType.CATEGORY:
+                    var config = _storage.get_category_config(path);
+                    Entities.Category category;
+                    if (config != null) {
+                        category = new Entities.Category(
+                            this, path,
+                            ((!) config).type_label,
+                            ((!) config).expression
+                        );
+                    } else {
+                        category = new Entities.Category(this, path, "", "");
+                    }
+                    category.register_hooks();
+                    return category;
+                    
+                case Core.EntityType.CATALOGUE:
+                    var cat_config = _storage.get_catalogue_config(path);
+                    Entities.Catalogue catalogue;
+                    if (cat_config != null) {
+                        catalogue = new Entities.Catalogue(
+                            this, path,
+                            ((!) cat_config).type_label,
+                            ((!) cat_config).expression
+                        );
+                    } else {
+                        catalogue = new Entities.Catalogue(this, path, "", "");
+                    }
+                    catalogue.register_hooks();
+                    return catalogue;
+                    
+                case Core.EntityType.INDEX:
+                    var config = _storage.get_category_config(path);
+                    if (config != null) {
+                        return new Entities.Index(
+                            this, path,
+                            ((!) config).type_label,
+                            ((!) config).expression
+                        );
+                    }
+                    return new Entities.Index(this, path, "", "");
+                    
+                default:
+                    return null;
+            }
+        } catch (Storage.StorageError e) {
+            return null;
+        }
+    }
+    
+    // === Private Query Methods ===
+    
+    /**
+     * Recursively searches for documents with the given type_label (async version).
+     */
+    private async void _search_by_type_async(
+        Core.Entity parent, 
+        string type_label, 
+        Invercargill.DataStructures.Vector<Core.Entity> results
+    ) throws Core.EngineError {
+        var children = yield parent.get_children_async();
+        
+        foreach (var child in children) {
+            // Check if this is a document with matching type_label
+            if (child.entity_type == Core.EntityType.DOCUMENT && 
+                child.type_label == type_label) {
+                results.add(child);
+            }
+            
+            // Recursively search containers
+            if (child.entity_type == Core.EntityType.CONTAINER) {
+                yield _search_by_type_async(child, type_label, results);
+            }
+        }
+    }
+    
+    /**
+     * Evaluates a simple expression on an entity.
+     *
+     * Supports:
+     * - Property name: checks if property exists and is truthy
+     * - property==value: checks if property equals value
+     * - property!=value: checks if property does not equal value
+     *
+     * @param entity The entity to evaluate on
+     * @param expression The expression to evaluate
+     * @return true if the expression matches
+     */
+    private async bool _evaluate_expression_async(Core.Entity entity, string expression) throws Core.EngineError {
+        // Handle equality comparison
+        if (expression.contains("==")) {
+            var parts = expression.split("==", 2);
+            if (parts.length == 2) {
+                var prop_name = parts[0].strip();
+                var expected = parts[1].strip();
+                
+                var value = yield entity.get_entity_property_async(prop_name);
+                if (value == null) {
+                    return false;
+                }
+                
+                return ((!) value).to_string() == expected;
+            }
+        }
+        
+        // Handle inequality comparison
+        if (expression.contains("!=")) {
+            var parts = expression.split("!=", 2);
+            if (parts.length == 2) {
+                var prop_name = parts[0].strip();
+                var expected = parts[1].strip();
+                
+                var value = yield entity.get_entity_property_async(prop_name);
+                if (value == null) {
+                    return true;
+                }
+                
+                return ((!) value).to_string() != expected;
+            }
+        }
+        
+        // Simple property existence check
+        var value = yield entity.get_entity_property_async(expression.strip());
+        return value != null && !((!) value).is_null();
+    }
+    
+    // === Virtual Entity Resolution (Async) ===
+    
+    /**
+     * Attempts to resolve a path as a virtual child of an indexed entity.
+     *
+     * This method is called when direct entity lookup fails. It checks if
+     * the parent is a Category, Catalogue, or Index and attempts to resolve
+     * the child name through the appropriate index.
+     *
+     * @param path The path to resolve
+     * @return The entity, or null if not a virtual child
+     */
+    private async Core.Entity? _try_resolve_virtual_child_async(Core.EntityPath path) throws Core.EngineError {
+        // Root has no parent
+        if (path.is_root) {
+            return null;
+        }
+        
+        var parent_path = path.parent;
+        var child_name = path.name;
+        
+        // Check if parent exists
+        bool parent_exists = yield _entity_exists_async_internal(parent_path);
+        if (!parent_exists) {
+            return null;
+        }
+        
+        // Get parent entity type
+        Core.EntityType? parent_type;
+        try {
+            parent_type = yield _get_entity_type_async_internal(parent_path);
+        } catch (Storage.StorageError e) {
+            return null;
+        }
+        
+        if (parent_type == null) {
+            return null;
+        }
+        
+        // Resolve based on parent type
+        switch ((!) parent_type) {
+            case Core.EntityType.CATEGORY:
+                return yield _resolve_category_child_async(parent_path, child_name);
+                
+            case Core.EntityType.CATALOGUE:
+                return yield _resolve_catalogue_child_async(parent_path, child_name);
+                
+            case Core.EntityType.INDEX:
+                return yield _resolve_index_child_async(parent_path, child_name);
+                
+            default:
+                // Container children must be persisted entities
+                return null;
+        }
+    }
+    
+    /**
+     * Resolves a child of a Category by checking the member index.
+     *
+     * @param parent_path The category path
+     * @param child_name The child name to look up
+     * @return The document entity, or null if not found
+     */
+    private async Core.Entity? _resolve_category_child_async(
+        Core.EntityPath parent_path,
+        string child_name
+    ) throws Core.EngineError {
+        // Check if child_name is in the category's member index
+        // Members are stored as full document paths
+        foreach (var doc_path in _category_store.get_members(parent_path)) {
+            var doc_entity_path = Core.EntityPath.parse(doc_path);
+            if (doc_entity_path.name == child_name) {
+                // Found - return the actual document
+                return yield _create_entity_from_storage_async(doc_entity_path);
+            }
+        }
+        
+        return null;
+    }
+    
+    /**
+     * Resolves a child of a Catalogue.
+     *
+     * First checks if child_name is a group key (returns CatalogueGroup).
+     * Then checks if it's a document name within any group (returns Document).
+     *
+     * @param parent_path The catalogue path
+     * @param child_name The child name to look up
+     * @return The entity (CatalogueGroup or Document), or null if not found
+     */
+    private async Core.Entity? _resolve_catalogue_child_async(
+        Core.EntityPath parent_path,
+        string child_name
+    ) throws Core.EngineError {
+        // First: Check if child_name is a group key
+        foreach (var key in _catalogue_store.get_group_keys(parent_path)) {
+            if (key == child_name) {
+                // Return a CatalogueGroup virtual entity
+                var catalogue_entity = yield _create_entity_from_storage_async(parent_path);
+                var catalogue = catalogue_entity as Entities.Catalogue;
+                if (catalogue != null) {
+                    return new Entities.CatalogueGroup(this, (!) catalogue, child_name);
+                }
+            }
+        }
+        
+        // Second: Check if child_name is a document within any group
+        foreach (var key in _catalogue_store.get_group_keys(parent_path)) {
+            foreach (var doc_path in _catalogue_store.get_group_members(parent_path, key)) {
+                var doc_entity_path = Core.EntityPath.parse(doc_path);
+                if (doc_entity_path.name == child_name) {
+                    // Found - return the actual document
+                    return yield _create_entity_from_storage_async(doc_entity_path);
+                }
+            }
+        }
+        
+        return null;
+    }
+    
+    /**
+     * Resolves a child of an Index by executing a search.
+     *
+     * The child_name is treated as a search pattern (e.g., "*term*").
+     *
+     * @param parent_path The index path
+     * @param child_name The search pattern
+     * @return The IndexResult entity, or null if no matches
+     */
+    private async Core.Entity? _resolve_index_child_async(
+        Core.EntityPath parent_path,
+        string child_name
+    ) throws Core.EngineError {
+        // Load the index entity
+        var index_entity = yield _create_entity_from_storage_async(parent_path);
+        var index = index_entity as Entities.Index;
+        if (index == null) {
+            return null;
+        }
+        
+        // Execute search with child_name as pattern
+        var result = ((!) index).search(child_name);
+        return result;  // Returns IndexResult or null
+    }
+    
+    // === Virtual Entity Resolution (Sync for hooks) ===
+    
+    /**
+     * Synchronous virtual child resolution for hook context.
+     *
+     * @param path The path to resolve
+     * @return The entity, or null if not a virtual child
+     */
+    private Core.Entity? _try_resolve_virtual_child_sync(Core.EntityPath path) {
+        if (path.is_root) {
+            return null;
+        }
+        
+        var parent_path = path.parent;
+        var child_name = path.name;
+        
+        // Check if parent exists
+        if (!_storage.entity_exists(parent_path)) {
+            return null;
+        }
+        
+        // Get parent type
+        Core.EntityType? parent_type;
+        try {
+            parent_type = _storage.get_entity_type(parent_path);
+        } catch (Storage.StorageError e) {
+            return null;
+        }
+        
+        if (parent_type == null) {
+            return null;
+        }
+        
+        switch ((!) parent_type) {
+            case Core.EntityType.CATEGORY:
+                return _resolve_category_child_sync(parent_path, child_name);
+                
+            case Core.EntityType.CATALOGUE:
+                return _resolve_catalogue_child_sync(parent_path, child_name);
+                
+            case Core.EntityType.INDEX:
+                return _resolve_index_child_sync(parent_path, child_name);
+                
+            default:
+                return null;
+        }
+    }
+    
+    /**
+     * Synchronous category child resolution for hook context.
+     *
+     * @param parent_path The category path
+     * @param child_name The child name to look up
+     * @return The document entity, or null if not found
+     */
+    private Core.Entity? _resolve_category_child_sync(
+        Core.EntityPath parent_path,
+        string child_name
+    ) {
+        foreach (var doc_path in _category_store.get_members(parent_path)) {
+            var doc_entity_path = Core.EntityPath.parse(doc_path);
+            if (doc_entity_path.name == child_name) {
+                return _create_entity_from_storage_sync(doc_entity_path);
+            }
+        }
+        
+        return null;
+    }
+    
+    /**
+     * Synchronous catalogue child resolution for hook context.
+     *
+     * @param parent_path The catalogue path
+     * @param child_name The child name to look up
+     * @return The entity (CatalogueGroup or Document), or null if not found
+     */
+    private Core.Entity? _resolve_catalogue_child_sync(
+        Core.EntityPath parent_path,
+        string child_name
+    ) {
+        // First: Check if child_name is a group key
+        foreach (var key in _catalogue_store.get_group_keys(parent_path)) {
+            if (key == child_name) {
+                var catalogue = _create_entity_from_storage_sync(parent_path) as Entities.Catalogue;
+                if (catalogue != null) {
+                    return new Entities.CatalogueGroup(this, (!) catalogue, child_name);
+                }
+            }
+        }
+        
+        // Second: Check if child_name is a document within any group
+        foreach (var key in _catalogue_store.get_group_keys(parent_path)) {
+            foreach (var doc_path in _catalogue_store.get_group_members(parent_path, key)) {
+                var doc_entity_path = Core.EntityPath.parse(doc_path);
+                if (doc_entity_path.name == child_name) {
+                    return _create_entity_from_storage_sync(doc_entity_path);
+                }
+            }
+        }
+        
+        return null;
+    }
+    
+    /**
+     * Synchronous index child resolution for hook context.
+     *
+     * @param parent_path The index path
+     * @param child_name The search pattern
+     * @return The IndexResult entity, or null if no matches
+     */
+    private Core.Entity? _resolve_index_child_sync(
+        Core.EntityPath parent_path,
+        string child_name
+    ) {
+        var index = _create_entity_from_storage_sync(parent_path) as Entities.Index;
+        if (index == null) {
+            return null;
+        }
+        
+        return ((!) index).search(child_name);
+    }
+    
+    // === Transaction Internal Methods ===
+    
+    /**
+     * Commits the current transaction (internal, called by EmbeddedTransaction).
+     *
+     * @throws Core.EngineError if the commit fails
+     */
+    internal async void commit_transaction_internal() throws Core.EngineError {
+        yield commit_async();
+    }
+    
+    /**
+     * Rolls back the current transaction (internal, called by EmbeddedTransaction).
+     */
+    internal async void rollback_transaction_internal() {
+        yield rollback_async();
+    }
+    
+    // === Cleanup ===
+    
+    /**
+     * Shuts down the engine and releases resources.
+     */
+    public void shutdown() {
+        if (_queue != null) {
+            _queue.shutdown();
+        }
+    }
+}
+
+} // namespace Implexus.Engine

+ 403 - 0
src/Engine/EmbeddedTransaction.vala

@@ -0,0 +1,403 @@
+/**
+ * EmbeddedTransaction - Transaction implementation for EmbeddedEngine
+ * 
+ * Provides atomic operations for batch changes to the database.
+ * Integrates with the hook batching system for improved performance.
+ * 
+ * @version 0.2
+ * @since 0.1
+ */
+namespace Implexus.Engine {
+
+/**
+ * Transaction implementation for the embedded engine.
+ * 
+ * Transactions batch operations for atomic commit. Either all changes
+ * are committed, or none are (rollback).
+ * 
+ * Hook events are accumulated during the transaction and executed
+ * in batch at commit time for improved performance.
+ * 
+ * Example usage:
+ * {{{
+ * var tx = engine.begin_transaction();
+ * try {
+ *     var doc = engine.get_root().create_category("batch")
+ *         .create_document("item1", "Item");
+ *     doc.set_entity_property("value", new Invercargill.NativeElement<int>(42));
+ *     tx.commit();
+ * } catch (Error e) {
+ *     tx.rollback();
+ * }
+ * }}}
+ */
+public class EmbeddedTransaction : Object, Core.Transaction {
+    
+    // === Private Fields ===
+    
+    /**
+     * The engine that created this transaction.
+     */
+    private weak EmbeddedEngine _engine;
+    
+    /**
+     * Whether this transaction is still active.
+     */
+    private bool _active = true;
+    
+    /**
+     * Operations pending commit.
+     */
+    private Invercargill.DataStructures.Vector<PendingOperation> _operations;
+    
+    /**
+     * Snapshot of entity states for rollback support.
+     */
+    private Invercargill.DataStructures.Dictionary<string, Invercargill.Element?> _snapshots;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new EmbeddedTransaction.
+     * 
+     * This constructor begins batch mode on the hook manager, which
+     * accumulates hook events until commit or rollback.
+     * 
+     * @param engine The engine that created this transaction
+     */
+    public EmbeddedTransaction(EmbeddedEngine engine) throws Core.EngineError {
+        _engine = engine;
+        _operations = new Invercargill.DataStructures.Vector<PendingOperation>();
+        _snapshots = new Invercargill.DataStructures.Dictionary<string, Invercargill.Element?>();
+        
+        // Begin hook batching for improved performance
+        var hook_manager = engine.configuration.hook_manager;
+        if (hook_manager != null) {
+            ((!) hook_manager).begin_batch();
+        }
+        
+        // Note: DBM transaction is already started by EmbeddedEngine.begin_transaction_async()
+    }
+    
+    // === Transaction Interface ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public bool active { 
+        get { return _active; } 
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Commits all pending operations to storage and executes
+     * accumulated hook events in batch.
+     */
+    public async void commit_async() throws Core.EngineError {
+        if (!_active) {
+            throw new Core.EngineError.TRANSACTION_ERROR("Transaction is not active");
+        }
+        
+        try {
+            // Apply all pending operations
+            foreach (var op in _operations) {
+                apply_operation(op);
+            }
+            
+            // Commit the DBM transaction FIRST - this ensures all data is persisted
+            // before hooks run, so hooks can safely read the committed state
+            yield _engine.commit_transaction_internal();
+            
+            // Commit hook batch - executes all accumulated hooks
+            // Hooks run AFTER the DBM commit so they can read committed data
+            // without needing to start a new transaction
+            var hook_manager = _engine.configuration.hook_manager;
+            if (hook_manager != null) {
+                ((!) hook_manager).commit_batch();
+            }
+            
+            // Clear operations and mark as inactive
+            _operations.clear();
+            _snapshots.clear();
+            _active = false;
+        } catch (Core.EngineError e) {
+            // Rollback on failure
+            yield rollback_async();
+            throw e;
+        }
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Discards all pending operations and accumulated hook events.
+     */
+    public async void rollback_async() {
+        if (!_active) {
+            return;
+        }
+        
+        // Rollback hook batch - discards all accumulated hooks
+        var hook_manager = _engine.configuration.hook_manager;
+        if (hook_manager != null) {
+            ((!) hook_manager).rollback_batch();
+        }
+        
+        // Restore snapshots
+        foreach (var key in _snapshots.keys) {
+            var value = _snapshots.get(key);
+            if (value != null && !((!) value).is_null()) {
+                // Would restore the snapshot - storage layer would need this
+            }
+        }
+        
+        // Clear operations and mark as inactive
+        _operations.clear();
+        _snapshots.clear();
+        _active = false;
+        
+        // Rollback the DBM transaction
+        yield _engine.rollback_transaction_internal();
+    }
+    
+    // === Internal Methods for Recording Operations ===
+    
+    /**
+     * Records a property set operation.
+     * 
+     * @param path The entity path
+     * @param property_name The property name
+     * @param value The new value
+     */
+    internal void record_set_property(
+        Core.EntityPath path, 
+        string property_name, 
+        Invercargill.Element value
+    ) {
+        var op = new PendingOperation();
+        op.type = OperationType.SET_PROPERTY;
+        op.path = path;
+        op.property_name = property_name;
+        op.value = value;
+        _operations.add(op);
+    }
+    
+    /**
+     * Records an entity creation operation.
+     * 
+     * @param path The entity path
+     * @param entity_type The entity type
+     * @param type_label The type label (for documents)
+     */
+    internal void record_create_entity(
+        Core.EntityPath path, 
+        Core.EntityType entity_type, 
+        string? type_label
+    ) {
+        var op = new PendingOperation();
+        op.type = OperationType.CREATE_ENTITY;
+        op.path = path;
+        op.entity_type = entity_type;
+        op.type_label = type_label;
+        _operations.add(op);
+    }
+    
+    /**
+     * Records an entity deletion operation.
+     * 
+     * @param path The entity path
+     */
+    internal void record_delete_entity(Core.EntityPath path) {
+        var op = new PendingOperation();
+        op.type = OperationType.DELETE_ENTITY;
+        op.path = path;
+        _operations.add(op);
+    }
+    
+    /**
+     * Records adding a child to a category.
+     * 
+     * @param parent The parent path
+     * @param child_name The child name
+     */
+    internal void record_add_child(Core.EntityPath parent, string child_name) {
+        var op = new PendingOperation();
+        op.type = OperationType.ADD_CHILD;
+        op.path = parent;
+        op.child_name = child_name;
+        _operations.add(op);
+    }
+    
+    /**
+     * Records removing a child from a category.
+     *
+     * @param parent The parent path
+     * @param child_name The child name
+     */
+    internal void record_remove_child(Core.EntityPath parent, string child_name) {
+        var op = new PendingOperation();
+        op.type = OperationType.REMOVE_CHILD;
+        op.path = parent;
+        op.child_name = child_name;
+        _operations.add(op);
+    }
+    
+    /**
+     * Records a save properties operation.
+     *
+     * This records a request to save all properties for an entity.
+     * Multiple property updates to the same entity are merged - only
+     * the last SAVE_PROPERTIES operation for a given path is applied.
+     *
+     * @param path The entity path
+     * @param properties The properties to save
+     */
+    internal void record_save_properties(
+        Core.EntityPath path,
+        Invercargill.Properties properties
+    ) {
+        // Remove any previous SAVE_PROPERTIES operations for this path
+        // to avoid redundant writes during commit
+        for (int i = (int)_operations.length - 1; i >= 0; i--) {
+            var op = _operations.get(i);
+            if (op.type == OperationType.SAVE_PROPERTIES && op.path.equals(path)) {
+                _operations.remove_at(i);
+            }
+        }
+        
+        var op = new PendingOperation();
+        op.type = OperationType.SAVE_PROPERTIES;
+        op.path = path;
+        op.properties = properties;
+        _operations.add(op);
+    }
+    
+    /**
+     * Records a save category config operation.
+     *
+     * @param path The category path
+     * @param type_label The type label for the category
+     * @param expression The expression for the category
+     */
+    internal void record_save_category_config(
+        Core.EntityPath path,
+        string type_label,
+        string expression
+    ) {
+        var op = new PendingOperation();
+        op.type = OperationType.SAVE_CATEGORY_CONFIG;
+        op.path = path;
+        op.type_label = type_label;
+        op.expression = expression;
+        _operations.add(op);
+    }
+    
+    /**
+     * Records a save catalogue config operation.
+     *
+     * @param path The catalogue path
+     * @param type_label The type label for the catalogue
+     * @param expression The expression for the catalogue
+     */
+    internal void record_save_catalogue_config(
+        Core.EntityPath path,
+        string type_label,
+        string expression
+    ) {
+        var op = new PendingOperation();
+        op.type = OperationType.SAVE_CATALOGUE_CONFIG;
+        op.path = path;
+        op.type_label = type_label;
+        op.expression = expression;
+        _operations.add(op);
+    }
+    
+    // === Private Methods ===
+    
+    /**
+     * Applies a pending operation to storage.
+     */
+    private void apply_operation(PendingOperation op) throws Core.EngineError {
+        var storage = _engine.configuration.storage;
+        
+        try {
+            switch (op.type) {
+                case OperationType.SET_PROPERTY:
+                    // Properties are now handled via SAVE_PROPERTIES
+                    // This case is kept for backward compatibility but does nothing
+                    break;
+                    
+                case OperationType.CREATE_ENTITY:
+                    storage.store_entity_metadata(op.path, op.entity_type, op.type_label);
+                    if (op.entity_type == Core.EntityType.DOCUMENT) {
+                        storage.store_properties(op.path, new Invercargill.DataStructures.PropertyDictionary());
+                        // Register document in type index for Index.populate_index()
+                        if (op.type_label != null) {
+                            _engine.entity_store.register_document_type((!) op.type_label, op.path.to_string());
+                        }
+                    }
+                    break;
+                    
+                case OperationType.DELETE_ENTITY:
+                    storage.delete_entity(op.path);
+                    break;
+                    
+                case OperationType.ADD_CHILD:
+                    storage.add_child(op.path, op.child_name);
+                    break;
+                    
+                case OperationType.REMOVE_CHILD:
+                    storage.remove_child(op.path, op.child_name);
+                    break;
+                    
+                case OperationType.SAVE_PROPERTIES:
+                    if (op.properties != null) {
+                        storage.store_properties(op.path, (!) op.properties);
+                    }
+                    break;
+                    
+                case OperationType.SAVE_CATEGORY_CONFIG:
+                    storage.store_category_config(op.path, op.type_label ?? "", op.expression ?? "");
+                    break;
+                    
+                case OperationType.SAVE_CATALOGUE_CONFIG:
+                    storage.store_catalogue_config(op.path, op.type_label ?? "", op.expression ?? "");
+                    break;
+            }
+        } catch (Storage.StorageError e) {
+            throw new Core.EngineError.STORAGE_ERROR("Transaction commit failed: %s".printf(e.message));
+        }
+    }
+}
+
+/**
+ * Types of pending operations.
+ */
+internal enum OperationType {
+    SET_PROPERTY,
+    CREATE_ENTITY,
+    DELETE_ENTITY,
+    ADD_CHILD,
+    REMOVE_CHILD,
+    SAVE_PROPERTIES,
+    SAVE_CATEGORY_CONFIG,
+    SAVE_CATALOGUE_CONFIG
+}
+
+/**
+ * Represents a pending operation in a transaction.
+ */
+internal class PendingOperation : Object {
+    public OperationType type;
+    public Core.EntityPath path;
+    public string? property_name;
+    public Invercargill.Element? value;
+    public Core.EntityType entity_type;
+    public string? type_label;
+    public string? child_name;
+    public Invercargill.Properties? properties;
+    public string? expression;  // For category/catalogue config
+}
+
+} // namespace Implexus.Engine

+ 280 - 0
src/Engine/EngineConfiguration.vala

@@ -0,0 +1,280 @@
+/**
+ * EngineConfiguration - Configuration for engine creation
+ * 
+ * Provides a unified configuration class for creating engines in
+ * either embedded or remote mode. This allows applications to easily
+ * switch between modes without changing how they interact with the engine.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Engine {
+
+/**
+ * Engine operation mode.
+ */
+public enum EngineMode {
+    /**
+     * Embedded mode - direct in-process database operations.
+     * 
+     * Best for single-application scenarios where the database
+     * is exclusively used by one process.
+     */
+    EMBEDDED,
+    
+    /**
+     * Remote mode - connect to a remote implexusd server.
+     * 
+     * Best for multi-client scenarios where multiple processes
+     * need to access the same database concurrently.
+     */
+    REMOTE
+}
+
+/**
+ * Configuration class for engine creation.
+ * 
+ * EngineConfiguration provides a unified way to configure and create
+ * engines in either embedded or remote mode. Use the static factory
+ * methods for common configurations, or set properties directly for
+ * more control.
+ * 
+ * Example usage:
+ * {{{
+ * // Embedded mode with defaults
+ * var config = EngineConfiguration.embedded("./my_database");
+ * var engine = EngineFactory.create(config);
+ * 
+ * // Remote mode with defaults
+ * var config = EngineConfiguration.remote("localhost", 9876);
+ * var engine = EngineFactory.create(config);
+ * 
+ * // Custom configuration
+ * var config = new EngineConfiguration();
+ * config.mode = EngineMode.EMBEDDED;
+ * config.storage_path = "./data";
+ * config.enable_cache = true;
+ * config.cache_size = 2000;
+ * }}}
+ */
+public class EngineConfiguration : Object {
+    
+    // === Mode Selection ===
+    
+    /**
+     * The engine operation mode.
+     * 
+     * Default: EMBEDDED
+     */
+    public EngineMode mode { get; set; default = EngineMode.EMBEDDED; }
+    
+    // === Embedded Mode Settings ===
+    
+    /**
+     * Storage path for embedded mode.
+     * 
+     * The directory where database files will be stored.
+     * Will be created if it doesn't exist.
+     * 
+     * Default: "./data"
+     */
+    public string storage_path { get; set; default = "./data"; }
+    
+    /**
+     * Whether to enable entity caching for embedded mode.
+     * 
+     * Default: true
+     */
+    public bool enable_cache { get; set; default = true; }
+    
+    /**
+     * Maximum number of entities to cache.
+     * 
+     * Default: 1000
+     */
+    public int cache_size { get; set; default = 1000; }
+    
+    /**
+     * Whether to auto-sync changes to disk.
+     * 
+     * Default: true
+     */
+    public bool auto_sync { get; set; default = true; }
+    
+    // === Remote Mode Settings ===
+    
+    /**
+     * Remote server hostname.
+     * 
+     * Default: "localhost"
+     */
+    public string host { get; set; default = "localhost"; }
+    
+    /**
+     * Remote server port.
+     * 
+     * Default: 9876
+     */
+    public uint16 port { get; set; default = 9876; }
+    
+    /**
+     * Connection timeout in milliseconds.
+     * 
+     * Default: 30000 (30 seconds)
+     */
+    public uint timeout_ms { get; set; default = 30000; }
+    
+    /**
+     * Maximum number of connection retry attempts.
+     * 
+     * Default: 3
+     */
+    public uint max_retries { get; set; default = 3; }
+    
+    /**
+     * Delay between retry attempts in milliseconds.
+     *
+     * Default: 1000 (1 second)
+     */
+    public uint retry_delay_ms { get; set; default = 1000; }
+    
+    // === Backend Selection (Embedded Mode) ===
+    
+    /**
+     * Storage backend type for embedded mode.
+     * 
+     * Valid values: "filesystem", "lmdb", "gdbm"
+     * 
+     * Default: "filesystem"
+     */
+    public string storage_type { get; set; default = "filesystem"; }
+    
+    // === LMDB-Specific Settings ===
+    
+    /**
+     * LMDB map size in bytes.
+     * 
+     * This defines the maximum size of the database. LMDB uses memory-mapped
+     * files, so this should be set appropriately for your use case.
+     * 
+     * Default: 1073741824 (1 GB)
+     */
+    public int64 lmdb_map_size { get; set; default = 1073741824; }
+    
+    // === Manager References ===
+    
+    /**
+     * HookManager for entity change notifications.
+     *
+     * Set by EmbeddedEngine during initialization.
+     */
+    public HookManager? hook_manager { get; set; default = null; }
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new EngineConfiguration with default settings.
+     */
+    public EngineConfiguration() {
+        // All defaults are set in property declarations
+    }
+    
+    /**
+     * Creates a configuration for embedded mode.
+     * 
+     * This is a convenience factory method that sets the mode
+     * to EMBEDDED and configures the storage path.
+     * 
+     * @param storage_path The directory for database storage
+     * @return A configuration for embedded mode
+     */
+    public static EngineConfiguration embedded(string storage_path = "./data") {
+        var config = new EngineConfiguration();
+        config.mode = EngineMode.EMBEDDED;
+        config.storage_path = storage_path;
+        return config;
+    }
+    
+    /**
+     * Creates a configuration for remote mode.
+     * 
+     * This is a convenience factory method that sets the mode
+     * to REMOTE and configures the connection parameters.
+     * 
+     * @param host The server hostname
+     * @param port The server port
+     * @return A configuration for remote mode
+     */
+    public static EngineConfiguration remote(string host = "localhost", uint16 port = 9876) {
+        var config = new EngineConfiguration();
+        config.mode = EngineMode.REMOTE;
+        config.host = host;
+        config.port = port;
+        return config;
+    }
+    
+    /**
+     * Creates a configuration from a connection string.
+     * 
+     * This factory method parses a connection string and creates
+     * the appropriate configuration. See ConnectionString for
+     * supported formats.
+     * 
+     * @param connection_string The connection string to parse
+     * @return A configuration based on the connection string
+     * @throws ConnectionStringError if parsing fails
+     * 
+     * Example:
+     * {{{
+     * var config = EngineConfiguration.from_connection_string("lmdb:///var/lib/db");
+     * var config = EngineConfiguration.from_connection_string("implexus://server:9876");
+     * }}}
+     */
+    public static EngineConfiguration from_connection_string(string connection_string) throws ConnectionStringError {
+        var cs = new ConnectionString(connection_string);
+        return cs.to_configuration();
+    }
+    
+    /**
+     * Creates a copy of this configuration.
+     * 
+     * Useful when you need to create multiple engines with
+     * slight variations.
+     * 
+     * @return A new EngineConfiguration with the same settings
+     */
+    public EngineConfiguration copy() {
+        var copy = new EngineConfiguration();
+        copy.mode = this.mode;
+        
+        // Embedded settings
+        copy.storage_path = this.storage_path;
+        copy.enable_cache = this.enable_cache;
+        copy.cache_size = this.cache_size;
+        copy.auto_sync = this.auto_sync;
+        
+        // Remote settings
+        copy.host = this.host;
+        copy.port = this.port;
+        copy.timeout_ms = this.timeout_ms;
+        copy.max_retries = this.max_retries;
+        copy.retry_delay_ms = this.retry_delay_ms;
+        
+        return copy;
+    }
+    
+    /**
+     * Returns a string representation of this configuration.
+     * 
+     * @return A string describing the configuration
+     */
+    public string describe() {
+        if (mode == EngineMode.EMBEDDED) {
+            return "EngineConfiguration(EMBEDDED, path=%s)".printf(storage_path);
+        } else {
+            return "EngineConfiguration(REMOTE, %s:%u)".printf(host, port);
+        }
+    }
+}
+
+} // namespace Implexus.Engine

+ 232 - 0
src/Engine/EngineFactory.vala

@@ -0,0 +1,232 @@
+/**
+ * EngineFactory - Factory for creating Implexus engines
+ * 
+ * Provides static factory methods for creating engines in either
+ * embedded or remote mode. This is the primary entry point for
+ * applications to obtain an engine instance.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Engine {
+
+/**
+ * Factory class for creating engine instances.
+ * 
+ * EngineFactory provides a unified API for creating engines based on
+ * configuration. It handles the complexity of setting up both embedded
+ * and remote engines, making it trivial to switch between modes.
+ * 
+ * Example usage:
+ * {{{
+ * // Using configuration object
+ * var config = EngineConfiguration.embedded("./my_database");
+ * var engine = EngineFactory.create(config);
+ * 
+ * // Using convenience methods
+ * var engine = EngineFactory.create_embedded("./my_database");
+ * var engine = EngineFactory.create_remote("localhost", 9876);
+ * 
+ * // Use the engine - same API regardless of mode
+ * var root = engine.get_root();
+ * var users = root.create_category("users");
+ * }}}
+ * 
+ * The factory handles:
+ * - Storage initialization for embedded mode
+ * - Connection establishment for remote mode
+ * - Retry logic for remote connections
+ * - Proper error handling and reporting
+ */
+public class EngineFactory : Object {
+    
+    // Private constructor - this class only provides static methods
+    private EngineFactory() {
+    }
+    
+    /**
+     * Creates an engine based on the given configuration.
+     * 
+     * This is the primary factory method. It examines the configuration
+     * mode and creates the appropriate engine type.
+     * 
+     * @param config The engine configuration
+     * @return A new engine instance configured as specified
+     * @throws Core.EngineError if engine creation or connection fails
+     */
+    public static Core.Engine create(EngineConfiguration config) throws Core.EngineError {
+        if (config.mode == EngineMode.EMBEDDED) {
+            return create_embedded_engine(config);
+        } else {
+            return create_remote_engine(config);
+        }
+    }
+    
+    /**
+     * Creates an embedded engine with the specified storage path.
+     * 
+     * This is a convenience method for quickly creating an embedded
+     * engine with default settings.
+     * 
+     * @param storage_path The directory for database storage (default: "./data")
+     * @return A new EmbeddedEngine instance
+     * @throws Core.EngineError if storage initialization fails
+     */
+    public static Core.Engine create_embedded(string storage_path = "./data") throws Core.EngineError {
+        var config = EngineConfiguration.embedded(storage_path);
+        return create_embedded_engine(config);
+    }
+    
+    /**
+     * Creates a remote engine and connects to the specified server.
+     * 
+     * This is a convenience method for quickly creating a remote
+     * engine with default settings. The connection is established
+     * before returning.
+     * 
+     * @param host The server hostname (default: "localhost")
+     * @param port The server port (default: 9876)
+     * @return A new RemoteEngine instance, already connected
+     * @throws Core.EngineError if connection fails
+     */
+    public static Core.Engine create_remote(string host = "localhost", uint16 port = 9876) throws Core.EngineError {
+        var config = EngineConfiguration.remote(host, port);
+        return create_remote_engine(config);
+    }
+    
+    /**
+     * Creates an engine from a connection string.
+     * 
+     * This is a convenience method for creating engines from
+     * configuration files or command-line arguments.
+     * 
+     * Supported formats:
+     * - Short form embedded: lmdb:///path/to/db, gdbm:///path/to/db, filesystem:///path/to/db
+     * - Full form embedded: implexus://embedded?backend=lmdb&path=/path/to/db
+     * - Remote: implexus://hostname:9876, implexus://hostname:9876?timeout=30
+     * 
+     * @param connection_string The connection string to parse
+     * @return A new Engine instance configured as specified
+     * @throws ConnectionStringError if the connection string is invalid
+     * @throws Core.EngineError if engine creation or connection fails
+     * 
+     * Example:
+     * {{{
+     * // Embedded mode
+     * var engine = EngineFactory.from_connection_string("lmdb:///var/lib/myapp/db");
+     * 
+     * // Remote mode
+     * var engine = EngineFactory.from_connection_string("implexus://server.example.com:9876");
+     * }}}
+     */
+    public static Core.Engine from_connection_string(string connection_string) throws ConnectionStringError, Core.EngineError {
+        var cs = new ConnectionString(connection_string);
+        return cs.create_engine();
+    }
+    
+    // === Internal Factory Methods ===
+    
+    /**
+     * Creates an embedded engine from configuration.
+     */
+    private static Core.Engine create_embedded_engine(EngineConfiguration config) throws Core.EngineError {
+        try {
+            // Create the appropriate Dbm backend based on storage_type
+            Storage.Dbm dbm;
+            string storage_type = config.storage_type.down();
+            
+            switch (storage_type) {
+                case "lmdb":
+                    var lmdb = new Storage.LmdbDbm();
+                    lmdb.open(config.storage_path);
+                    dbm = lmdb;
+                    break;
+                case "gdbm":
+                    var gdbm = new Storage.GdbmDbm();
+                    gdbm.open(config.storage_path);
+                    dbm = gdbm;
+                    break;
+                case "filesystem":
+                default:
+                    dbm = new Storage.FilesystemDbm(config.storage_path);
+                    break;
+            }
+            
+            // Create storage with the selected Dbm backend
+            var storage = new Storage.BasicStorage(dbm);
+            var storage_config = new Core.StorageConfiguration(storage);
+            storage_config.storage_path = config.storage_path;
+            storage_config.enable_cache = config.enable_cache;
+            storage_config.cache_size = config.cache_size;
+            storage_config.auto_sync = config.auto_sync;
+            
+            // Create and return the embedded engine
+            return new EmbeddedEngine(storage_config);
+            
+        } catch (Storage.StorageError e) {
+            throw new Core.EngineError.STORAGE_ERROR(
+                "Failed to create embedded engine: %s".printf(e.message)
+            );
+        } catch (Error e) {
+            throw new Core.EngineError.STORAGE_ERROR(
+                "Failed to create embedded engine: %s".printf(e.message)
+            );
+        }
+    }
+    
+    /**
+     * Creates a remote engine from configuration.
+     *
+     * Note: This is a synchronous wrapper around the async connect_async().
+     * It blocks the calling thread until connection is established.
+     */
+    private static Core.Engine create_remote_engine(EngineConfiguration config) throws Core.EngineError {
+        var engine = new RemoteEngine(config.host, config.port);
+        
+        // Attempt connection with retry logic using synchronous wrapper
+        uint attempts = 0;
+        Error? last_error = null;
+        
+        while (attempts <= config.max_retries) {
+            try {
+                // Synchronous wrapper for async connect
+                var loop = new MainLoop();
+                GLib.Error? async_error = null;
+                engine.connect_async.begin((obj, res) => {
+                    try {
+                        engine.connect_async.end(res);
+                    } catch (GLib.Error e) {
+                        async_error = e;
+                    }
+                    loop.quit();
+                });
+                loop.run();
+                
+                if (async_error != null) {
+                    throw (!) async_error;
+                }
+                
+                return engine;
+                
+            } catch (Error e) {
+                last_error = e;
+                attempts++;
+                
+                if (attempts <= config.max_retries) {
+                    // Wait before retrying
+                    Thread.usleep((ulong) config.retry_delay_ms * 1000);
+                }
+            }
+        }
+        
+        // All retries failed
+        throw new Core.EngineError.CONNECTION_ERROR(
+            "Failed to connect to %s:%u after %u attempts: %s".printf(
+                config.host, config.port, attempts,
+                last_error != null ? ((!) last_error).message : "unknown error"
+            )
+        );
+    }
+}
+
+} // namespace Implexus.Engine

+ 1175 - 0
src/Engine/HookManager.vala

@@ -0,0 +1,1175 @@
+/**
+ * HookManager - Notification system for entity changes with batch support
+ *
+ * Provides a hook mechanism for notifying indexed entities when
+ * documents are created, modified, or deleted so they can update
+ * their indices.
+ *
+ * Supports batched execution during transactions for improved performance.
+ *
+ * @version 0.2
+ * @since 0.1
+ */
+namespace Implexus.Engine {
+
+/**
+ * Types of entity changes that can trigger hooks.
+ */
+public enum EntityChangeType {
+    /**
+     * An entity was created.
+     */
+    CREATED,
+    
+    /**
+     * An entity was modified.
+     */
+    MODIFIED,
+    
+    /**
+     * An entity was deleted.
+     */
+    DELETED
+}
+
+/**
+ * Types of state changes for tracking during batching.
+ */
+internal enum StateChangeType {
+    CREATED,
+    MODIFIED,
+    DELETED
+}
+
+/**
+ * Represents a queued hook event for batched processing.
+ */
+public class HookEvent : Object {
+    /**
+     * The type of change that occurred.
+     */
+    public EntityChangeType change_type { get; construct set; }
+    
+    /**
+     * The entity path affected.
+     */
+    public Core.EntityPath entity_path { get; construct set; }
+    
+    /**
+     * The entity type.
+     */
+    public Core.EntityType entity_type { get; construct set; }
+    
+    /**
+     * The type label for documents.
+     */
+    public string? type_label { get; construct set; }
+    
+    /**
+     * The property name for property changes.
+     */
+    public string? property_name { get; construct set; }
+    
+    /**
+     * The old property value.
+     */
+    public Invercargill.Element? old_value { get; construct set; }
+    
+    /**
+     * The new property value.
+     */
+    public Invercargill.Element? new_value { get; construct set; }
+    
+    /**
+     * The entity reference (for CREATED/MODIFIED events, avoids lookup in handlers).
+     * This is internal to avoid exposing mutable entity references externally.
+     */
+    internal weak Core.Entity? entity_ref { get; set; }
+    
+    /**
+     * Creates a new hook event.
+     */
+    public HookEvent(
+        EntityChangeType change_type,
+        Core.EntityPath entity_path,
+        Core.EntityType entity_type,
+        string? type_label = null,
+        string? property_name = null,
+        Invercargill.Element? old_value = null,
+        Invercargill.Element? new_value = null,
+        Core.Entity? entity = null
+    ) {
+        Object(
+            change_type: change_type,
+            entity_path: entity_path,
+            entity_type: entity_type,
+            type_label: type_label,
+            property_name: property_name,
+            old_value: old_value,
+            new_value: new_value
+        );
+        this.entity_ref = entity;
+    }
+    
+    /**
+     * Gets the entity for this event.
+     * For DELETED events, this returns null.
+     * For CREATED/MODIFIED events, this returns the cached entity if available,
+     * otherwise falls back to looking up via the engine.
+     *
+     * @param engine The engine to use for fallback lookup (must be EmbeddedEngine for sync access)
+     * @return The entity, or null if deleted or not found
+     */
+    public Core.Entity? get_entity(Core.Engine? engine = null) {
+        if (change_type == EntityChangeType.DELETED) {
+            return null;
+        }
+        if (entity_ref != null) {
+            return entity_ref;
+        }
+        if (engine != null) {
+            // Hooks run synchronously in DBM thread, so we need the sync method
+            var embedded = engine as EmbeddedEngine;
+            if (embedded != null) {
+                return ((!) embedded).get_entity_or_null_sync(entity_path);
+            }
+        }
+        return null;
+    }
+}
+
+/**
+ * Represents a property change with old and new values.
+ */
+public class PropertyChange : Object {
+    /**
+     * The previous value.
+     */
+    public Invercargill.Element? old_value;
+    
+    /**
+     * The new value.
+     */
+    public Invercargill.Element? new_value;
+    
+    /**
+     * Creates a new PropertyChange.
+     */
+    public PropertyChange(Invercargill.Element? old_value, Invercargill.Element? new_value) {
+        this.old_value = old_value;
+        this.new_value = new_value;
+    }
+}
+
+/**
+ * Tracks the final state of an entity during a transaction.
+ */
+internal class EntityFinalState : Object {
+    /**
+     * The entity being tracked.
+     */
+    public Core.Entity entity;
+    
+    /**
+     * Whether the entity was created during the transaction.
+     */
+    public bool was_created = false;
+    
+    /**
+     * Whether the entity was deleted during the transaction.
+     */
+    public bool was_deleted = false;
+    
+    /**
+     * Property changes accumulated during the transaction.
+     */
+    public Invercargill.DataStructures.Dictionary<string, PropertyChange> property_changes;
+    
+    /**
+     * Creates a new EntityFinalState for an entity.
+     */
+    public EntityFinalState(Core.Entity entity) {
+        this.entity = entity;
+        this.property_changes = new Invercargill.DataStructures.Dictionary<string, PropertyChange>();
+    }
+    
+    /**
+     * Records a state change.
+     */
+    public void record_change(StateChangeType change_type) {
+        switch (change_type) {
+            case StateChangeType.CREATED:
+                was_created = true;
+                break;
+            case StateChangeType.DELETED:
+                was_deleted = true;
+                break;
+            case StateChangeType.MODIFIED:
+                break;
+        }
+    }
+    
+    /**
+     * Records a property change.
+     */
+    public void record_property_change(
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        if (!property_changes.has(property_name)) {
+            property_changes.set(property_name, new PropertyChange(old_value, new_value));
+        } else {
+            // Update the new value, keep the original old value
+            var existing = property_changes.get(property_name);
+            existing.new_value = new_value;
+        }
+    }
+}
+
+/**
+ * Accumulates hook events during a transaction for batched execution.
+ *
+ * The HookBatch collects entity change events during a transaction
+ * and provides methods to consolidate and execute them efficiently.
+ */
+public class HookBatch : Object {
+    
+    // === Private Fields ===
+    
+    /**
+     * Accumulated events in order of occurrence.
+     */
+    private Invercargill.DataStructures.Vector<HookEvent> _events;
+    
+    /**
+     * Map of entity path to final state for consolidation.
+     */
+    private Invercargill.DataStructures.Dictionary<string, EntityFinalState> _entity_states;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new empty HookBatch.
+     */
+    public HookBatch() {
+        _events = new Invercargill.DataStructures.Vector<HookEvent>();
+        _entity_states = new Invercargill.DataStructures.Dictionary<string, EntityFinalState>();
+    }
+    
+    // === Event Recording ===
+    
+    /**
+     * Records an entity creation event.
+     *
+     * @param entity The entity that was created
+     */
+    public void record_created(Core.Entity entity) {
+        var evt = new HookEvent(
+            EntityChangeType.CREATED,
+            entity.path,
+            entity.entity_type,
+            entity.type_label,
+            null, null, null,
+            entity  // Cache entity reference to avoid lookup in handlers
+        );
+        _events.add(evt);
+        update_entity_state(entity.path.to_string(), StateChangeType.CREATED, entity);
+    }
+    
+    /**
+     * Records an entity modification event.
+     *
+     * @param entity The entity that was modified
+     */
+    public void record_modified(Core.Entity entity) {
+        var evt = new HookEvent(
+            EntityChangeType.MODIFIED,
+            entity.path,
+            entity.entity_type,
+            entity.type_label,
+            null, null, null,
+            entity  // Cache entity reference to avoid lookup in handlers
+        );
+        _events.add(evt);
+        update_entity_state(entity.path.to_string(), StateChangeType.MODIFIED, entity);
+    }
+    
+    /**
+     * Records an entity deletion event.
+     *
+     * @param path The path of the deleted entity
+     * @param entity_type The type of the deleted entity
+     * @param type_label The type label if it was a document
+     */
+    public void record_deleted(
+        Core.EntityPath path,
+        Core.EntityType entity_type,
+        string? type_label
+    ) {
+        var evt = new HookEvent(
+            EntityChangeType.DELETED,
+            path,
+            entity_type,
+            type_label
+        );
+        _events.add(evt);
+        update_entity_state(path.to_string(), StateChangeType.DELETED, null);
+    }
+    
+    /**
+     * Records a property change event.
+     *
+     * @param document The document whose property changed
+     * @param property_name The name of the property
+     * @param old_value The previous value
+     * @param new_value The new value
+     */
+    public void record_property_change(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        var evt = new HookEvent(
+            EntityChangeType.MODIFIED,
+            document.path,
+            document.entity_type,
+            document.type_label,
+            property_name,
+            old_value,
+            new_value
+        );
+        _events.add(evt);
+        
+        // Track property changes in entity state
+        var path = document.path.to_string();
+        if (!_entity_states.has(path)) {
+            _entity_states.set(path, new EntityFinalState(document));
+        }
+        var state = _entity_states.get(path);
+        if (state != null) {
+            ((!) state).record_property_change(property_name, old_value, new_value);
+        }
+    }
+    
+    // === Event Consolidation ===
+    
+    /**
+     * Gets consolidated events for efficient batch processing.
+     *
+     * This method consolidates multiple events for the same entity:
+     * - CREATED followed by MODIFIED → just CREATED with final state
+     * - Multiple MODIFIED → single MODIFIED with final state
+     * - CREATED followed by DELETED → no events (cancelled out)
+     *
+     * @return Consolidated vector of events
+     */
+    public Invercargill.DataStructures.Vector<HookEvent> get_consolidated_events() {
+        var consolidated = new Invercargill.DataStructures.Vector<HookEvent>();
+        
+        // Group by entity path
+        var events_by_path = new Invercargill.DataStructures.Dictionary<
+            string, 
+            Invercargill.DataStructures.Vector<HookEvent>
+        >();
+        
+        foreach (var evt in _events) {
+            var path = evt.entity_path.to_string();
+            if (!events_by_path.has(path)) {
+                events_by_path.set(path, new Invercargill.DataStructures.Vector<HookEvent>());
+            }
+            var vec = events_by_path.get(path);
+            if (vec != null) {
+                ((!) vec).add(evt);
+            }
+        }
+        
+        // Consolidate each entity's events
+        foreach (var path in events_by_path.keys) {
+            var events = events_by_path.get(path);
+            if (events != null) {
+                var final_event = consolidate_entity_events((!) events);
+                
+                if (final_event != null) {
+                    consolidated.add((!) final_event);
+                }
+            }
+        }
+        
+        return consolidated;
+    }
+    
+    /**
+     * Consolidates events for a single entity.
+     * Preserves the entity reference from the last non-null event.
+     */
+    private HookEvent? consolidate_entity_events(
+        Invercargill.DataStructures.Vector<HookEvent> events
+    ) {
+        if (events.peek_count() == 0) {
+            return null;
+        }
+        
+        bool was_created = false;
+        bool was_deleted = false;
+        HookEvent? last_event = null;
+        Core.Entity? entity_ref = null;
+        
+        foreach (var evt in events) {
+            switch (evt.change_type) {
+                case EntityChangeType.CREATED:
+                    was_created = true;
+                    break;
+                case EntityChangeType.DELETED:
+                    was_deleted = true;
+                    break;
+                case EntityChangeType.MODIFIED:
+                    break;
+            }
+            last_event = evt;
+            // Preserve entity reference from any event that has it
+            if (evt.entity_ref != null) {
+                entity_ref = evt.entity_ref;
+            }
+        }
+        
+        if (last_event == null) {
+            return null;
+        }
+        
+        // If created and deleted in same transaction, cancel out
+        if (was_created && was_deleted) {
+            return null;
+        }
+        
+        // Return appropriate event with preserved entity reference
+        if (was_created) {
+            return new HookEvent(
+                EntityChangeType.CREATED,
+                ((!) last_event).entity_path,
+                ((!) last_event).entity_type,
+                ((!) last_event).type_label,
+                null, null, null,
+                entity_ref  // Preserve entity reference
+            );
+        }
+        
+        if (was_deleted) {
+            return new HookEvent(
+                EntityChangeType.DELETED,
+                ((!) last_event).entity_path,
+                ((!) last_event).entity_type,
+                ((!) last_event).type_label,
+                null, null, null,
+                null  // No entity reference for deletions
+            );
+        }
+        
+        // Just modifications
+        return new HookEvent(
+            EntityChangeType.MODIFIED,
+            ((!) last_event).entity_path,
+            ((!) last_event).entity_type,
+            ((!) last_event).type_label,
+            null, null, null,
+            entity_ref  // Preserve entity reference
+        );
+    }
+    
+    // === Batch Execution ===
+    
+    /**
+     * Executes all batched events through the hook manager.
+     *
+     * @param hook_manager The hook manager to notify
+     */
+    public void execute(HookManager hook_manager) {
+        var consolidated = get_consolidated_events();
+        
+        foreach (var evt in consolidated) {
+            switch (evt.change_type) {
+                case EntityChangeType.CREATED:
+                case EntityChangeType.MODIFIED:
+                case EntityChangeType.DELETED:
+                    // Get entity if not deleted - use sync method since hooks run in DBM thread
+                    Core.Entity? entity = null;
+                    if (evt.change_type != EntityChangeType.DELETED) {
+                        var embedded = hook_manager.engine as EmbeddedEngine;
+                        if (embedded != null) {
+                            entity = ((!) embedded).get_entity_or_null_sync(evt.entity_path);
+                        }
+                    }
+                    
+                    if (entity != null || evt.change_type == EntityChangeType.DELETED) {
+                        hook_manager.notify_entity_change_from_event(evt, entity);
+                    }
+                    break;
+            }
+        }
+        
+        // Execute property change events
+        execute_property_changes(hook_manager);
+    }
+    
+    /**
+     * Executes property change events.
+     */
+    private void execute_property_changes(HookManager hook_manager) {
+        foreach (var path in _entity_states.keys) {
+            var state = _entity_states.get(path);
+            if (state == null) {
+                continue;
+            }
+            
+            foreach (var prop_name in ((!) state).property_changes.keys) {
+                var change = ((!) state).property_changes.get(prop_name);
+                if (change != null) {
+                    hook_manager.notify_document_property_change(
+                        ((!) state).entity,
+                        prop_name,
+                        ((!) change).old_value,
+                        ((!) change).new_value
+                    );
+                }
+            }
+        }
+    }
+    
+    /**
+     * Gets the entity states for batch handlers.
+     *
+     * @return Dictionary of entity path to final state
+     */
+    internal Invercargill.DataStructures.Dictionary<string, EntityFinalState> get_entity_states() {
+        return _entity_states;
+    }
+    
+    // === Utility Methods ===
+    
+    /**
+     * Updates the entity state tracking.
+     */
+    private void update_entity_state(
+        string path,
+        StateChangeType change_type,
+        Core.Entity? entity
+    ) {
+        if (!_entity_states.has(path)) {
+            if (entity != null) {
+                _entity_states.set(path, new EntityFinalState((!) entity));
+            }
+        }
+        
+        var state = _entity_states.get(path);
+        if (state != null) {
+            ((!) state).record_change(change_type);
+        }
+    }
+    
+    /**
+     * Clears all accumulated events.
+     */
+    public void clear() {
+        _events.clear();
+        _entity_states.clear();
+    }
+    
+    /**
+     * Gets the number of accumulated events.
+     */
+    public int event_count {
+        get { return (int) _events.peek_count(); }
+    }
+    
+    /**
+     * Checks if there are any events to process.
+     */
+    public bool has_events {
+        get { return _events.peek_count() > 0; }
+    }
+}
+
+/**
+ * Interface for entity change notification handlers.
+ *
+ * Implement this interface to receive notifications when entities
+ * are created, modified, or deleted.
+ */
+public interface EntityChangeHandler : Object {
+    /**
+     * Called when an entity changes.
+     *
+     * @param entity The entity that changed
+     * @param change_type The type of change that occurred
+     */
+    public abstract void on_entity_change(Core.Entity entity, EntityChangeType change_type);
+}
+
+/**
+ * Interface for document property change notification handlers.
+ *
+ * Implement this interface to receive notifications when document
+ * properties are changed.
+ */
+public interface DocumentPropertyChangeHandler : Object {
+    /**
+     * Called when a document property changes.
+     *
+     * @param document The document whose property changed
+     * @param property_name The name of the property that changed
+     * @param old_value The previous value, or null if the property was new
+     * @param new_value The new value, or null if the property was removed
+     */
+    public abstract void on_document_property_change(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    );
+}
+
+/**
+ * Interface for hooks that can process batched events efficiently.
+ *
+ * Implementing this interface allows hooks to optimize their index
+ * updates when processing multiple changes at once.
+ */
+public interface BatchedHookHandler : Object, EntityChangeHandler {
+    
+    /**
+     * Called with a batch of entity changes.
+     *
+     * This method receives all changes for entities matching the
+     * hook's type filter. The hook can optimize storage writes
+     * by processing all changes together.
+     *
+     * @param events The consolidated events for matching entities
+     */
+    public abstract void on_batch_change(Invercargill.DataStructures.Vector<HookEvent> events);
+    
+    /**
+     * Called with batched property changes.
+     *
+     * @param document The document that changed
+     * @param changes Map of property name to old/new values
+     */
+    public abstract void on_batch_property_change(
+        Core.Entity document,
+        Invercargill.DataStructures.Dictionary<string, PropertyChange> changes
+    );
+    
+    /**
+     * Indicates whether this handler prefers batch processing.
+     *
+     * If true, on_batch_change will be called instead of individual
+     * on_entity_change calls.
+     */
+    public abstract bool supports_batch { get; }
+}
+
+/**
+ * Manages hooks for entity change notifications with batch support.
+ *
+ * The HookManager provides a central notification system for entity
+ * changes. Indexed entities (Category, Catalogue, Index) register
+ * handlers to be notified when documents of their interested type
+ * are created, modified, or deleted.
+ *
+ * When batch mode is active (during a transaction), events are
+ * accumulated and executed at commit time for better performance.
+ *
+ * Example usage:
+ * {{{
+ * public class MyHandler : Object, EntityChangeHandler {
+ *     public void on_entity_change(Core.Entity entity, EntityChangeType change_type) {
+ *         if (change_type == EntityChangeType.CREATED) {
+ *             message("Entity created: %s", entity.path.to_string());
+ *         }
+ *     }
+ * }
+ * 
+ * var hooks = new HookManager();
+ * var handler = new MyHandler();
+ * hooks.register_handler(handler);
+ * 
+ * // Notify of a change
+ * hooks.notify_entity_change(document, EntityChangeType.CREATED);
+ * 
+ * // Unregister when done
+ * hooks.unregister_handler(handler);
+ * }}}
+ */
+public class HookManager : Object {
+    
+    // === Private Fields ===
+    
+    /**
+     * Registered handlers for entity changes.
+     */
+    private Invercargill.DataStructures.Vector<EntityChangeHandler> _handlers;
+    
+    /**
+     * Registered handlers for document property changes.
+     */
+    private Invercargill.DataStructures.Vector<DocumentPropertyChangeHandler> _property_handlers;
+    
+    /**
+     * Registered handlers that support batch processing.
+     */
+    private Invercargill.DataStructures.Vector<BatchedHookHandler> _batched_handlers;
+    
+    /**
+     * The current batch for transaction mode, or null if not in transaction.
+     */
+    private HookBatch? _current_batch = null;
+    
+    /**
+     * Whether batch mode is active (i.e., within a transaction).
+     */
+    private bool _batch_mode = false;
+    
+    // === Public Properties ===
+    
+    /**
+     * The engine this hook manager is associated with.
+     */
+    public weak Core.Engine engine { get; set; }
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new HookManager.
+     */
+    public HookManager() {
+        _handlers = new Invercargill.DataStructures.Vector<EntityChangeHandler>();
+        _property_handlers = new Invercargill.DataStructures.Vector<DocumentPropertyChangeHandler>();
+        _batched_handlers = new Invercargill.DataStructures.Vector<BatchedHookHandler>();
+    }
+    
+    // === Batch Mode Control ===
+    
+    /**
+     * Begins batch mode for transaction processing.
+     *
+     * In batch mode, all events are accumulated instead of being
+     * immediately dispatched to handlers.
+     */
+    public void begin_batch() {
+        _batch_mode = true;
+        _current_batch = new HookBatch();
+    }
+    
+    /**
+     * Commits the current batch, executing all accumulated events.
+     *
+     * This executes batch handlers first with consolidated events,
+     * then executes individual events for non-batched handlers.
+     */
+    public void commit_batch() {
+        if (_current_batch == null) {
+            return;
+        }
+        
+        // Execute batch for batched handlers
+        execute_batch_for_handlers((!) _current_batch);
+        
+        // Only execute individual events if there are non-batched handlers
+        // This avoids expensive entity lookups when all handlers support batching
+        if (has_non_batched_handlers()) {
+            ((!) _current_batch).execute(this);
+        }
+        
+        // Clear batch
+        _current_batch = null;
+        _batch_mode = false;
+    }
+    
+    /**
+     * Checks if there are any handlers that don't support batch processing.
+     *
+     * If all handlers support batching, we can skip the expensive
+     * batch.execute() call which does entity lookups.
+     *
+     * @return true if there are non-batched handlers, false otherwise
+     */
+    private bool has_non_batched_handlers() {
+        foreach (var handler in _handlers) {
+            if (!(handler is BatchedHookHandler)) {
+                return true;
+            }
+            var batched = (BatchedHookHandler) handler;
+            if (!batched.supports_batch) {
+                return true;
+            }
+        }
+        return false;
+    }
+    
+    /**
+     * Rolls back the current batch, discarding all accumulated events.
+     */
+    public void rollback_batch() {
+        if (_current_batch != null) {
+            ((!) _current_batch).clear();
+        }
+        _current_batch = null;
+        _batch_mode = false;
+    }
+    
+    /**
+     * Checks if batch mode is currently active.
+     */
+    public bool is_batch_mode {
+        get { return _batch_mode; }
+    }
+    
+    // === Entity Change Handlers ===
+    
+    /**
+     * Registers a handler for entity changes.
+     *
+     * The handler will be invoked whenever notify_entity_change()
+     * is called. Handlers are invoked in the order they were registered.
+     *
+     * If the handler implements BatchedHookHandler, it will also
+     * receive batch notifications during transaction commits.
+     *
+     * @param handler The handler to register
+     */
+    public void register_handler(EntityChangeHandler handler) {
+        _handlers.add(handler);
+        
+        // Also track as batched handler if applicable
+        if (handler is BatchedHookHandler) {
+            _batched_handlers.add((BatchedHookHandler) handler);
+        }
+    }
+    
+    /**
+     * Unregisters a handler for entity changes.
+     *
+     * Safely handles the case where the handler was never registered
+     * or was already unregistered (works around Invercargill Vector
+     * bug where remove_first_where crashes on missing elements).
+     *
+     * @param handler The handler to unregister
+     */
+    public void unregister_handler(EntityChangeHandler handler) {
+        safe_remove_from_vector(_handlers, handler);
+        
+        if (handler is BatchedHookHandler) {
+            safe_remove_from_vector(_batched_handlers, (BatchedHookHandler) handler);
+        }
+    }
+    
+    /**
+     * Notifies all registered handlers of an entity change.
+     *
+     * In batch mode, events are queued. Otherwise, handlers are
+     * invoked immediately.
+     *
+     * Handlers are invoked in the order they were registered.
+     * If a handler throws an error, it is logged but does not
+     * prevent other handlers from being invoked.
+     *
+     * @param entity The entity that changed
+     * @param change_type The type of change that occurred
+     */
+    public void notify_entity_change(Core.Entity entity, EntityChangeType change_type) {
+        if (_batch_mode && _current_batch != null) {
+            // Queue the event
+            switch (change_type) {
+                case EntityChangeType.CREATED:
+                    ((!) _current_batch).record_created(entity);
+                    break;
+                case EntityChangeType.MODIFIED:
+                    ((!) _current_batch).record_modified(entity);
+                    break;
+                case EntityChangeType.DELETED:
+                    ((!) _current_batch).record_deleted(
+                        entity.path,
+                        entity.entity_type,
+                        entity.type_label
+                    );
+                    break;
+            }
+        } else {
+            // Immediate dispatch
+            notify_entity_change_immediate(entity, change_type);
+        }
+    }
+    
+    // === Document Property Change Handlers ===
+    
+    /**
+     * Registers a handler for document property changes.
+     *
+     * The handler will be invoked whenever notify_document_property_change()
+     * is called. Handlers are invoked in the order they were registered.
+     *
+     * @param handler The handler to register
+     */
+    public void register_property_handler(DocumentPropertyChangeHandler handler) {
+        _property_handlers.add(handler);
+    }
+    
+    /**
+     * Unregisters a handler for document property changes.
+     *
+     * Safely handles the case where the handler was never registered
+     * or was already unregistered (works around Invercargill Vector
+     * bug where remove_first_where crashes on missing elements).
+     *
+     * @param handler The handler to unregister
+     */
+    public void unregister_property_handler(DocumentPropertyChangeHandler handler) {
+        safe_remove_from_vector(_property_handlers, handler);
+    }
+    
+    /**
+     * Notifies all registered handlers of a document property change.
+     *
+     * In batch mode, events are queued. Otherwise, handlers are
+     * invoked immediately.
+     *
+     * This is used to notify indexed entities when a specific property
+     * changes, allowing them to update their indices efficiently.
+     *
+     * Handlers are invoked in the order they were registered.
+     * If a handler throws an error, it is logged but does not
+     * prevent other handlers from being invoked.
+     *
+     * @param document The document whose property changed
+     * @param property_name The name of the property that changed
+     * @param old_value The previous value, or null if the property was new
+     * @param new_value The new value, or null if the property was removed
+     */
+    public void notify_document_property_change(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        if (_batch_mode && _current_batch != null) {
+            ((!) _current_batch).record_property_change(
+                document,
+                property_name,
+                old_value,
+                new_value
+            );
+        } else {
+            notify_property_change_immediate(document, property_name, old_value, new_value);
+        }
+    }
+    
+    // === Internal Methods ===
+    
+    /**
+     * Notifies handlers from a stored event.
+     *
+     * This is used during batch execution to dispatch consolidated events.
+     */
+    internal void notify_entity_change_from_event(HookEvent evt, Core.Entity? entity) {
+        if (entity != null) {
+            notify_entity_change_immediate((!) entity, evt.change_type);
+        }
+    }
+    
+    /**
+     * Immediately notifies all handlers.
+     *
+     * When called from batch execution (during commit_batch), skips handlers
+     * that support batch processing since they already received events via
+     * on_batch_change().
+     */
+    private void notify_entity_change_immediate(Core.Entity entity, EntityChangeType change_type) {
+        foreach (var handler in _handlers) {
+            // Skip batched handlers during batch execution - they already processed events
+            // via execute_batch_for_handlers()
+            if (_batch_mode && handler is BatchedHookHandler) {
+                var batched = (BatchedHookHandler) handler;
+                if (batched.supports_batch) {
+                    continue;
+                }
+            }
+            try {
+                handler.on_entity_change(entity, change_type);
+            } catch (Error e) {
+                warning("Hook handler threw error for %s: %s",
+                    entity.path.to_string(), e.message);
+            }
+        }
+    }
+    
+    /**
+     * Immediately notifies all property handlers.
+     *
+     * When called from batch execution (during commit_batch), skips handlers
+     * that support batch processing since they already received events via
+     * on_batch_property_change().
+     */
+    private void notify_property_change_immediate(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        foreach (var handler in _property_handlers) {
+            // Skip batched handlers during batch execution - they already processed events
+            // via execute_batch_property_changes()
+            if (_batch_mode && handler is BatchedHookHandler) {
+                var batched = (BatchedHookHandler) handler;
+                if (batched.supports_batch) {
+                    continue;
+                }
+            }
+            try {
+                handler.on_document_property_change(document, property_name, old_value, new_value);
+            } catch (Error e) {
+                warning("Property hook handler threw error for %s.%s: %s",
+                    document.path.to_string(), property_name, e.message);
+            }
+        }
+    }
+    
+    /**
+     * Executes batch for handlers that support batch processing.
+     */
+    private void execute_batch_for_handlers(HookBatch batch) {
+        var consolidated = batch.get_consolidated_events();
+        
+        foreach (var handler in _batched_handlers) {
+            if (handler.supports_batch) {
+                try {
+                    // Filter events by type_label if handler is type-specific
+                    var filtered = filter_events_for_handler(consolidated, handler);
+                    if (filtered.peek_count() > 0) {
+                        handler.on_batch_change(filtered);
+                    }
+                } catch (Error e) {
+                    warning("Batched hook handler threw error: %s", e.message);
+                }
+            }
+        }
+        
+        // Execute batch property changes
+        execute_batch_property_changes(batch);
+    }
+    
+    /**
+     * Executes batch property changes for handlers that support batch processing.
+     */
+    private void execute_batch_property_changes(HookBatch batch) {
+        var entity_states = batch.get_entity_states();
+        
+        foreach (var handler in _batched_handlers) {
+            if (!handler.supports_batch) {
+                continue;
+            }
+            
+            foreach (var path in entity_states.keys) {
+                var state = entity_states.get(path);
+                if (state == null) {
+                    continue;
+                }
+                
+                var changes = ((!) state).property_changes;
+                if (changes.count() > 0) {
+                    try {
+                        handler.on_batch_property_change(((!) state).entity, changes);
+                    } catch (Error e) {
+                        warning("Batched property handler threw error: %s", e.message);
+                    }
+                }
+            }
+        }
+    }
+    
+    /**
+     * Filters events to only those relevant to a handler.
+     */
+    private Invercargill.DataStructures.Vector<HookEvent> filter_events_for_handler(
+        Invercargill.DataStructures.Vector<HookEvent> events,
+        BatchedHookHandler handler
+    ) {
+        // For now, return all events - handlers can filter internally
+        // based on type_label
+        return events;
+    }
+    
+    // === Utility Methods ===
+    
+    /**
+     * Safely removes an item from a vector, handling the case where the item
+     * is not found.
+     *
+     * This works around a bug in Invercargill's Vector.remove_first_where()
+     * which crashes when the item is not found (it passes null to a non-nullable
+     * parameter in remove_internal).
+     *
+     * @param vector The vector to remove from
+     * @param item The item to remove
+     */
+    private void safe_remove_from_vector<T>(Invercargill.DataStructures.Vector<T> vector, T item) {
+        // Find the index manually to avoid the crash in remove_first_where
+        uint? index = null;
+        uint i = 0;
+        foreach (var element in vector) {
+            if (element == item) {
+                index = i;
+                break;
+            }
+            i++;
+        }
+        
+        // Only remove if found
+        if (index != null) {
+            try {
+                vector.remove_at((!) index);
+            } catch (Error e) {
+                warning("Failed to remove handler from vector: %s", e.message);
+            }
+        }
+    }
+    
+    /**
+     * Clears all registered handlers.
+     *
+     * This is useful during shutdown or when resetting the hook manager.
+     */
+    public void clear_all() {
+        _handlers = new Invercargill.DataStructures.Vector<EntityChangeHandler>();
+        _property_handlers = new Invercargill.DataStructures.Vector<DocumentPropertyChangeHandler>();
+        _batched_handlers = new Invercargill.DataStructures.Vector<BatchedHookHandler>();
+    }
+    
+    /**
+     * Gets the number of registered entity change handlers.
+     *
+     * @return The number of registered handlers
+     */
+    public uint handler_count {
+        get { return (uint) _handlers.peek_count(); }
+    }
+    
+    /**
+     * Gets the number of registered property change handlers.
+     *
+     * @return The number of registered handlers
+     */
+    public uint property_handler_count {
+        get { return (uint) _property_handlers.peek_count(); }
+    }
+    
+    /**
+     * Gets the number of registered batched handlers.
+     *
+     * @return The number of registered batched handlers
+     */
+    public uint batched_handler_count {
+        get { return (uint) _batched_handlers.peek_count(); }
+    }
+}
+
+} // namespace Implexus.Engine

+ 831 - 0
src/Engine/RemoteEngine.vala

@@ -0,0 +1,831 @@
+/**
+ * RemoteEngine - Client-side engine implementation for Implexus
+ * 
+ * Provides a client implementation that connects to a remote
+ * Implexus daemon and implements the Engine interface.
+ * 
+ * All I/O operations are async - use yield when calling these methods.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Engine {
+
+/**
+ * Client-side engine implementation.
+ * 
+ * RemoteEngine connects to a remote Implexus server via TCP and
+ * provides the same Engine interface as EmbeddedEngine. This allows
+ * applications to use either embedded or remote mode transparently.
+ * 
+ * Example usage:
+ * {{{
+ * var engine = new RemoteEngine("localhost", 9090);
+ * try {
+ *     yield engine.connect_async();
+ *     
+ *     var root = yield engine.get_root_async();
+ *     var users = yield root.create_category_async("users", "User", "true");
+ *     
+ *     yield engine.disconnect_async();
+ * } catch (Error e) {
+ *     warning("Failed to connect: %s", e.message);
+ * }
+ * }}}
+ */
+public class RemoteEngine : Object, Core.Engine {
+    
+    // === Private Fields ===
+    
+    private string _host;
+    private uint16 _port;
+    private SocketConnection? _connection = null;
+    private Protocol.MessageReader? _reader = null;
+    private Protocol.MessageWriter? _writer = null;
+    private bool _connected = false;
+    private RemoteEntity? _root = null;
+    private Core.StorageConfiguration _configuration;
+    private RemoteTransaction? _current_transaction = null;
+    
+    // === Properties ===
+    
+    public string host { get { return _host; } }
+    public uint16 port { get { return _port; } }
+    public bool is_connected { get { return _connected; } }
+    
+    public bool in_transaction {
+        get { return _current_transaction != null && ((!) _current_transaction).active; }
+    }
+    
+    public Core.StorageConfiguration configuration {
+        owned get { return _configuration; }
+    }
+    
+    // === Constructors ===
+    
+    public RemoteEngine(string host, uint16 port = 9090) {
+        _host = host;
+        _port = port;
+        _configuration = new Core.StorageConfiguration(new Storage.BasicStorage.with_directory("/dev/null"));
+    }
+    
+    // === Connection Management ===
+    
+    public async void connect_async() throws GLib.Error {
+        if (_connected) {
+            return;
+        }
+        
+        try {
+            var resolver = Resolver.get_default();
+            var addresses = yield resolver.lookup_by_name_async(_host, null);
+            if (addresses == null) {
+                throw new Protocol.ProtocolError.CONNECTION_FAILED(
+                    "Failed to resolve host: %s".printf(_host)
+                );
+            }
+            
+            // Get the first address from the list
+            InetAddress? first_address = null;
+            foreach (var addr in (!) addresses) {
+                first_address = addr;
+                break;
+            }
+            
+            if (first_address == null) {
+                throw new Protocol.ProtocolError.CONNECTION_FAILED(
+                    "No addresses found for host: %s".printf(_host)
+                );
+            }
+            
+            var socket_client = new SocketClient();
+            var socket_address = new InetSocketAddress((!) first_address, _port);
+            _connection = yield socket_client.connect_async((SocketConnectable) socket_address);
+            
+            if (_connection == null) {
+                throw new Protocol.ProtocolError.CONNECTION_FAILED(
+                    "Failed to connect to %s:%d".printf(_host, _port)
+                );
+            }
+            
+            _reader = new Protocol.MessageReader(((!) _connection).get_input_stream());
+            _writer = new Protocol.MessageWriter(((!) _connection).get_output_stream());
+            
+            var welcome = yield ((!) _reader).read_message_async();
+            if (welcome == null || ((!) welcome).message_type != Protocol.MessageType.WELCOME) {
+                throw new Protocol.ProtocolError.INVALID_MESSAGE(
+                    "Expected welcome message from server"
+                );
+            }
+            
+            _connected = true;
+            
+        } catch (Protocol.ProtocolError e) {
+            throw e;
+        } catch (Error e) {
+            throw new Protocol.ProtocolError.CONNECTION_FAILED(
+                "Connection failed: %s".printf(e.message)
+            );
+        }
+    }
+    
+    public async void disconnect_async() {
+        if (!_connected) {
+            return;
+        }
+        
+        _connected = false;
+        
+        if (_reader != null) {
+            ((!) _reader).close();
+        }
+        if (_writer != null) {
+            ((!) _writer).close();
+        }
+        
+        if (_connection != null) {
+            try {
+                yield ((!) _connection).close_async();
+            } catch (Error e) {
+                // Ignore close errors
+            }
+        }
+        
+        _connection = null;
+        _reader = null;
+        _writer = null;
+        _root = null;
+        
+        if (_current_transaction != null && ((!) _current_transaction).active) {
+            yield ((!) _current_transaction).rollback_async();
+        }
+        _current_transaction = null;
+    }
+    
+    // === Engine Interface Implementation ===
+    
+    public async Core.Entity get_root_async() throws Core.EngineError {
+        if (_root != null) {
+            return (!) _root;
+        }
+        
+        // For remote engine, we create a local root proxy
+        // The actual root existence is managed by the server
+        _root = new RemoteEntity(this, new Core.EntityPath.root(), Core.EntityType.CONTAINER, "");
+        return (!) _root;
+    }
+    
+    public async Core.Entity? get_entity_async(Core.EntityPath path) throws Core.EngineError {
+        ensure_connected();
+        
+        try {
+            var request = new Protocol.GetEntityRequest.for_path(path);
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+            if (response.message_type == Protocol.MessageType.ENTITY_RESPONSE) {
+                var entity_response = (Protocol.EntityResponse) response;
+                return create_remote_entity_from_data(entity_response.entity_data);
+            }
+            
+            throw new Core.EngineError.PROTOCOL_ERROR("Unexpected response type");
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    public async Core.Entity? get_entity_or_null_async(Core.EntityPath path) throws Core.EngineError {
+        try {
+            return yield get_entity_async(path);
+        } catch (Core.EngineError e) {
+            return null;
+        }
+    }
+    
+    public async bool entity_exists_async(Core.EntityPath path) throws Core.EngineError {
+        ensure_connected();
+        
+        try {
+            var request = new Protocol.EntityExistsRequest.for_path(path);
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.BOOLEAN_RESPONSE) {
+                return ((Protocol.BooleanResponse) response).value;
+            }
+            
+            return false;
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    public async Core.Entity[] query_by_type_async(string type_label) throws Core.EngineError {
+        ensure_connected();
+        
+        var results = new Core.Entity[0];
+        
+        try {
+            var request = new Protocol.QueryByTypeRequest();
+            request.type_label = type_label;
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.QUERY_RESPONSE) {
+                var query_response = (Protocol.QueryResponse) response;
+                
+                foreach (var path_str in query_response.paths) {
+                    try {
+                        var path = new Core.EntityPath(path_str);
+                        var entity = yield get_entity_async(path);
+                        if (entity != null) {
+                            results += (!) entity;
+                        }
+                    } catch (Core.EngineError e) {
+                        // Skip entities that fail to load
+                    }
+                }
+            }
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+        
+        return results;
+    }
+    
+    public async Core.Entity[] query_by_expression_async(
+        string type_label, 
+        string expression
+    ) throws Core.EngineError {
+        ensure_connected();
+        
+        var results = new Core.Entity[0];
+        
+        // Get all entities of the type first
+        var all_entities = yield query_by_type_async(type_label);
+        
+        // Filter by expression locally
+        foreach (var entity in all_entities) {
+            try {
+                if (yield evaluate_expression_async(entity, expression)) {
+                    results += entity;
+                }
+            } catch (Core.EngineError e) {
+                // Skip entities that fail evaluation
+            }
+        }
+        
+        return results;
+    }
+    
+    public async Core.Transaction begin_transaction_async() throws Core.EngineError {
+        ensure_connected();
+        
+        if (_current_transaction != null && ((!) _current_transaction).active) {
+            throw new Core.EngineError.TRANSACTION_ERROR("A transaction is already active");
+        }
+        
+        try {
+            var request = new Protocol.BeginTransactionRequest();
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+            _current_transaction = new RemoteTransaction(this);
+            return (!) _current_transaction;
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    public async void commit_async() throws Core.EngineError {
+        ensure_connected();
+        
+        try {
+            var request = new Protocol.CommitTransactionRequest();
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+            _current_transaction = null;
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    public async void rollback_async() {
+        if (!_connected) {
+            _current_transaction = null;
+            return;
+        }
+        
+        try {
+            var request = new Protocol.RollbackTransactionRequest();
+            yield send_request_and_wait_async(request);
+        } catch (Protocol.ProtocolError e) {
+            // Ignore errors on rollback
+        } catch (Core.EngineError e) {
+            // Ignore errors on rollback
+        }
+        
+        _current_transaction = null;
+    }
+    
+    // === Internal Methods for RemoteEntity ===
+    
+    internal async Protocol.Message send_request_and_wait_async(Protocol.Message request) throws Protocol.ProtocolError {
+        ensure_connected();
+        
+        var request_id = yield ((!) _writer).write_request_async(request);
+        return yield ((!) _reader).read_response_async();
+    }
+    
+    internal async Core.Entity create_container_async(Core.EntityPath parent_path, string name) throws Core.EngineError {
+        ensure_connected();
+        
+        var path = parent_path.append_child(name);
+        
+        try {
+            var request = new Protocol.CreateContainerRequest.for_path(path);
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+            if (response.message_type == Protocol.MessageType.ENTITY_RESPONSE) {
+                var entity_response = (Protocol.EntityResponse) response;
+                var entity = create_remote_entity_from_data(entity_response.entity_data);
+                entity_created(entity);
+                return entity;
+            }
+            
+            throw new Core.EngineError.PROTOCOL_ERROR("Unexpected response type");
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    internal async Core.Entity create_document_async(Core.EntityPath parent_path, string name, string type_label) throws Core.EngineError {
+        ensure_connected();
+        
+        var path = parent_path.append_child(name);
+        
+        try {
+            var request = new Protocol.CreateDocumentRequest.for_path_and_type(path, type_label);
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+            if (response.message_type == Protocol.MessageType.ENTITY_RESPONSE) {
+                var entity_response = (Protocol.EntityResponse) response;
+                var entity = create_remote_entity_from_data(entity_response.entity_data);
+                entity_created(entity);
+                return entity;
+            }
+            
+            throw new Core.EngineError.PROTOCOL_ERROR("Unexpected response type");
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    internal async void delete_entity_async(Core.EntityPath path) throws Core.EngineError {
+        ensure_connected();
+        
+        try {
+            var request = new Protocol.DeleteEntityRequest.for_path(path);
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+            entity_deleted(path);
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    internal async Invercargill.Element? get_property_async(Core.EntityPath path, string property_name) throws Core.EngineError {
+        ensure_connected();
+        
+        try {
+            var request = new Protocol.GetPropertyRequest();
+            request.path = path;
+            request.property_name = property_name;
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+            if (response.message_type == Protocol.MessageType.PROPERTY_RESPONSE) {
+                return ((Protocol.PropertyResponse) response).value;
+            }
+            
+            return null;
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    internal async void set_property_async(Core.EntityPath path, string property_name, Invercargill.Element value) throws Core.EngineError {
+        ensure_connected();
+        
+        try {
+            var request = new Protocol.SetPropertyRequest();
+            request.path = path;
+            request.property_name = property_name;
+            request.value = value;
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+    }
+    
+    internal async Core.Entity[] get_children_async(Core.EntityPath path) throws Core.EngineError {
+        ensure_connected();
+        
+        var results = new Core.Entity[0];
+        
+        try {
+            var request = new Protocol.GetChildrenRequest();
+            request.path = path;
+            var response = yield send_request_and_wait_async(request);
+            
+            if (response.message_type == Protocol.MessageType.ERROR) {
+                var error = (Protocol.ErrorResponse) response;
+                throw error_code_to_engine_error(error.error_code, error.error_message);
+            }
+            
+            if (response.message_type == Protocol.MessageType.CHILDREN_RESPONSE) {
+                var children_response = (Protocol.ChildrenResponse) response;
+                
+                foreach (var child_data in children_response.children) {
+                    results += create_remote_entity_from_data(child_data);
+                }
+            }
+            
+        } catch (Protocol.ProtocolError e) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Protocol error: %s".printf(e.message));
+        }
+        
+        return results;
+    }
+    
+    internal async Invercargill.ReadOnlySet<string> get_child_names_async(Core.EntityPath path) throws Core.EngineError {
+        ensure_connected();
+        
+        var results = new Invercargill.DataStructures.HashSet<string>();
+        
+        try {
+            foreach (var child in yield get_children_async(path)) {
+                results.add(child.name);
+            }
+        } catch (Core.EngineError e) {
+            // Return empty on error
+        }
+        
+        return results;
+    }
+    
+    internal async Invercargill.Properties get_properties_async(Core.EntityPath path) throws Core.EngineError {
+        ensure_connected();
+        
+        var props = new Invercargill.DataStructures.PropertyDictionary();
+        
+        try {
+            var entity = yield get_entity_async(path);
+            if (entity != null) {
+                var entity_props = yield ((!) entity).get_properties_async();
+                foreach (var kvp in entity_props) {
+                    try {
+                        props.set(kvp.key, kvp.value);
+                    } catch (Invercargill.IndexError e) {
+                        // Skip invalid entries
+                    }
+                }
+            }
+        } catch (Core.EngineError e) {
+            // Return empty on error
+        }
+        
+        return props;
+    }
+    
+    // === Private Helper Methods ===
+    
+    private void ensure_connected() throws Core.EngineError {
+        if (!_connected) {
+            throw new Core.EngineError.PROTOCOL_ERROR("Not connected to server");
+        }
+    }
+    
+    private RemoteEntity create_remote_entity_from_data(Protocol.EntityData data) {
+        return new RemoteEntity(this, data.path, data.entity_type, data.type_label ?? "");
+    }
+    
+    private Core.EngineError error_code_to_engine_error(int code, string message) {
+        switch (code) {
+            case 0: return new Core.EngineError.ENTITY_NOT_FOUND(message);
+            case 1: return new Core.EngineError.ENTITY_ALREADY_EXISTS(message);
+            case 2: return new Core.EngineError.INVALID_PATH(message);
+            case 3: return new Core.EngineError.INVALID_OPERATION(message);
+            case 4: return new Core.EngineError.TYPE_MISMATCH(message);
+            case 5: return new Core.EngineError.EXPRESSION_ERROR(message);
+            case 6: return new Core.EngineError.TRANSACTION_ERROR(message);
+            case 7: return new Core.EngineError.STORAGE_ERROR(message);
+            case 8: return new Core.EngineError.CONNECTION_ERROR(message);
+            case 9: return new Core.EngineError.PROTOCOL_ERROR(message);
+            default: return new Core.EngineError.PROTOCOL_ERROR(message);
+        }
+    }
+    
+    private async bool evaluate_expression_async(Core.Entity entity, string expression) throws Core.EngineError {
+        if (expression.contains("==")) {
+            var parts = expression.split("==", 2);
+            if (parts.length == 2) {
+                var prop_name = parts[0].strip();
+                var expected = parts[1].strip();
+                
+                var value = yield entity.get_entity_property_async(prop_name);
+                if (value == null) {
+                    return false;
+                }
+                
+                return ((!) value).to_string() == expected;
+            }
+        }
+        
+        if (expression.contains("!=")) {
+            var parts = expression.split("!=", 2);
+            if (parts.length == 2) {
+                var prop_name = parts[0].strip();
+                var expected = parts[1].strip();
+                
+                var value = yield entity.get_entity_property_async(prop_name);
+                if (value == null) {
+                    return true;
+                }
+                
+                return ((!) value).to_string() != expected;
+            }
+        }
+        
+        var value = yield entity.get_entity_property_async(expression.strip());
+        return value != null && !((!) value).is_null();
+    }
+}
+
+/**
+ * Remote entity implementation.
+ */
+internal class RemoteEntity : Object, Core.Entity {
+    
+    private weak RemoteEngine _engine;
+    private Core.EntityPath _path;
+    private Core.EntityType _entity_type;
+    private string _type_label;
+    
+    // === Entity Interface Properties (Synchronous - No I/O) ===
+    
+    public unowned Core.Engine engine { 
+        get { return _engine; } 
+    }
+    
+    public Core.EntityPath path { 
+        owned get { return _path; } 
+    }
+    
+    public Core.EntityType entity_type { 
+        get { return _entity_type; } 
+    }
+    
+    public string name { 
+        owned get { return _path.name; } 
+    }
+    
+    public string type_label { 
+        owned get { return _type_label; } 
+    }
+    
+    public string configured_expression {
+        owned get { return ""; }
+    }
+    
+    public string configured_type_label {
+        owned get { return ""; }
+    }
+    
+    public bool exists {
+        get {
+            // Synchronous check - this could be improved with caching
+            try {
+                var loop = new MainLoop();
+                bool result = false;
+                _engine.entity_exists_async.begin(_path, (obj, res) => {
+                    try {
+                        result = _engine.entity_exists_async.end(res);
+                    } catch (Core.EngineError e) {
+                        result = false;
+                    }
+                    loop.quit();
+                });
+                loop.run();
+                return result;
+            } catch (Error e) {
+                return false;
+            }
+        }
+    }
+    
+    // === Constructor ===
+    
+    public RemoteEntity(
+        RemoteEngine engine,
+        Core.EntityPath path,
+        Core.EntityType entity_type,
+        string type_label
+    ) {
+        _engine = engine;
+        _path = path;
+        _entity_type = entity_type;
+        _type_label = type_label;
+    }
+    
+    // === Entity Interface Methods (Async) ===
+    
+    public async Core.Entity? get_parent_async() throws Core.EntityError {
+        if (_path.is_root) {
+            return null;
+        }
+        
+        try {
+            return yield _engine.get_entity_async(_path.parent);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.PARENT_NOT_FOUND("Failed to get parent: %s".printf(e.message));
+        }
+    }
+    
+    public async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError {
+        try {
+            return yield _engine.get_child_names_async(_path);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.IO_ERROR("Failed to get child names: %s".printf(e.message));
+        }
+    }
+    
+    public async Core.Entity? get_child_async(string name) throws Core.EntityError {
+        var child_path = _path.append_child(name);
+        try {
+            return yield _engine.get_entity_async(child_path);
+        } catch (Core.EngineError e) {
+            return null;
+        }
+    }
+    
+    public async Core.Entity[] get_children_async() throws Core.EntityError {
+        try {
+            return yield _engine.get_children_async(_path);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.IO_ERROR("Failed to get children: %s".printf(e.message));
+        }
+    }
+    
+    public async Core.Entity? create_container_async(string name) throws Core.EntityError {
+        try {
+            return yield _engine.create_container_async(_path, name);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.CREATE_FAILED("Failed to create container: %s".printf(e.message));
+        }
+    }
+    
+    public async Core.Entity? create_document_async(string name, string type_label) throws Core.EntityError {
+        try {
+            return yield _engine.create_document_async(_path, name, type_label);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.CREATE_FAILED("Failed to create document: %s".printf(e.message));
+        }
+    }
+    
+    public async Core.Entity? create_category_async(string name, string type_label, string expression) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Remote category creation not yet supported");
+    }
+    
+    public async Core.Entity? create_catalogue_async(string name, string type_label, string expression) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Remote catalogue creation not yet supported");
+    }
+    
+    public async Core.Entity? create_index_async(string name, string type_label, string expression) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Remote index creation not yet supported");
+    }
+    
+    public async void delete_async() throws Core.EntityError {
+        try {
+            yield _engine.delete_entity_async(_path);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.DELETE_FAILED("Failed to delete entity: %s".printf(e.message));
+        }
+    }
+    
+    public async Invercargill.Properties get_properties_async() throws Core.EntityError {
+        try {
+            return yield _engine.get_properties_async(_path);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.IO_ERROR("Failed to get properties: %s".printf(e.message));
+        }
+    }
+    
+    public async Invercargill.Element? get_entity_property_async(string name) throws Core.EntityError {
+        try {
+            return yield _engine.get_property_async(_path, name);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.IO_ERROR("Failed to get property: %s".printf(e.message));
+        }
+    }
+    
+    public async void set_entity_property_async(string name, Invercargill.Element value) throws Core.EntityError {
+        try {
+            yield _engine.set_property_async(_path, name, value);
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.IO_ERROR("Failed to set property: %s".printf(e.message));
+        }
+    }
+    
+    public async void remove_property_async(string name) throws Core.EntityError {
+        try {
+            yield _engine.set_property_async(_path, name, new Invercargill.NullElement());
+        } catch (Core.EngineError e) {
+            throw new Core.EntityError.IO_ERROR("Failed to remove property: %s".printf(e.message));
+        }
+    }
+    
+    public async Core.EntitySet as_set_async() {
+        return new Core.EntitySet(this);
+    }
+}
+
+/**
+ * Remote transaction implementation.
+ */
+internal class RemoteTransaction : Object, Core.Transaction {
+    
+    private weak RemoteEngine _engine;
+    private bool _active = true;
+    
+    public bool active { get { return _active; } }
+    
+    public RemoteTransaction(RemoteEngine engine) {
+        _engine = engine;
+    }
+    
+    public async void commit_async() throws Core.EngineError {
+        if (!_active) {
+            throw new Core.EngineError.TRANSACTION_ERROR("Transaction is not active");
+        }
+        
+        yield _engine.commit_async();
+        _active = false;
+    }
+    
+    public async void rollback_async() {
+        if (!_active) {
+            return;
+        }
+        
+        yield _engine.rollback_async();
+        _active = false;
+    }
+}
+
+} // namespace Implexus.Engine

+ 245 - 0
src/Entities/AbstractEntity.vala

@@ -0,0 +1,245 @@
+/**
+ * AbstractEntity - Base class for all entity implementations
+ * 
+ * This abstract class provides common functionality for all entity types
+ * in Implexus, implementing the Entity interface.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Entities {
+
+/**
+ * Abstract base class for all entity implementations.
+ * 
+ * Provides:
+ * - Common identity properties (path, name, entity_type)
+ * - Parent/child navigation (async)
+ * - Invercargill.Element interface implementation
+ * - Default implementations that throw INVALID_OPERATION for unsupported operations
+ */
+public abstract class AbstractEntity : Object, Core.Entity, Invercargill.Element {
+    
+    protected weak Core.Engine _engine;
+    protected Core.EntityPath _path;
+    
+    protected AbstractEntity(Core.Engine engine, Core.EntityPath path) {
+        _engine = engine;
+        _path = path;
+    }
+    
+    // === Entity Interface - Identity (Synchronous - No I/O) ===
+    
+    public unowned Core.Engine engine { get { return _engine; } }
+    public Core.EntityPath path { owned get { return _path; } }
+    public string name { owned get { return _path.name; } }
+    public abstract Core.EntityType entity_type { get; }
+    
+    // === Entity Interface - Parent/Child Navigation (Async) ===
+    
+    public async Core.Entity? get_parent_async() throws Core.EntityError {
+        if (_path.is_root) return null;
+        return yield _engine.get_entity_async(_path.parent);
+    }
+    
+    public virtual async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError { 
+        return new Invercargill.DataStructures.HashSet<string>();
+    }
+    
+    public virtual async Core.Entity? get_child_async(string name) throws Core.EntityError {
+        return null;
+    }
+    
+    public virtual async Core.Entity[] get_children_async() throws Core.EntityError {
+        return new Core.Entity[0];
+    }
+    
+    // === Entity Interface - Child Management (Async) ===
+    
+    public virtual async Core.Entity? create_container_async(string name) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION(
+            "Cannot create container on %s".printf(entity_type.to_string())
+        );
+    }
+    
+    public virtual async Core.Entity? create_document_async(string name, string type_label) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION(
+            "Cannot create document on %s".printf(entity_type.to_string())
+        );
+    }
+    
+    public virtual async Core.Entity? create_category_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION(
+            "Cannot create category on %s".printf(entity_type.to_string())
+        );
+    }
+    
+    public virtual async Core.Entity? create_catalogue_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION(
+            "Cannot create catalogue on %s".printf(entity_type.to_string())
+        );
+    }
+    
+    public virtual async Core.Entity? create_index_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION(
+            "Cannot create index on %s".printf(entity_type.to_string())
+        );
+    }
+    
+    // === Entity Interface - Document Operations (Async) ===
+    
+    public virtual string type_label { owned get { return ""; } }
+    
+    public virtual async Invercargill.Properties get_properties_async() throws Core.EntityError { 
+        throw new Core.EntityError.INVALID_OPERATION("Not a document");
+    }
+    
+    public virtual async Invercargill.Element? get_entity_property_async(string name) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Not a document");
+    }
+    
+    public virtual async void set_entity_property_async(string name, Invercargill.Element value) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Not a document");
+    }
+    
+    public virtual async void remove_property_async(string name) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Not a document");
+    }
+    
+    // === Entity Interface - Category/Index Configuration ===
+    
+    public virtual string configured_expression { owned get { return ""; } }
+    public virtual string configured_type_label { owned get { return ""; } }
+    
+    // === Entity Interface - Lifecycle (Async) ===
+    
+    public virtual async void delete_async() throws Core.EntityError {
+        // Get parent before deletion
+        var parent_entity = yield get_parent_async();
+        
+        // Check if we're in a transaction
+        var embedded_engine = _engine as Engine.EmbeddedEngine;
+        var tx = embedded_engine != null ? embedded_engine.current_transaction : null;
+        
+        if (tx != null) {
+            // Queue operations in transaction
+            tx.record_delete_entity(_path);
+            if (parent_entity != null) {
+                tx.record_remove_child(((!) parent_entity).path, name);
+            }
+        } else {
+            // Direct write when not in transaction
+            var storage = _engine.configuration.storage;
+            
+            try {
+                storage.delete_entity(_path);
+            } catch (Storage.StorageError e) {
+                throw new Core.EntityError.STORAGE_ERROR("Failed to delete entity: %s".printf(e.message));
+            }
+            
+            if (parent_entity != null) {
+                try {
+                    storage.remove_child(((!) parent_entity).path, name);
+                } catch (Storage.StorageError e) {
+                    throw new Core.EntityError.STORAGE_ERROR("Failed to remove child: %s".printf(e.message));
+                }
+            }
+        }
+        
+        _engine.entity_deleted(_path);
+    }
+    
+    public virtual bool exists {
+        get {
+            // Use sync method via EmbeddedEngine cast
+            var embedded = _engine as Engine.EmbeddedEngine;
+            if (embedded != null) {
+                return ((!) embedded).entity_exists_sync(_path);
+            }
+            // For remote engine, we can't do sync property access
+            // Return true as a fallback (remote should override this)
+            return true;
+        }
+    }
+    
+    // === Entity Interface - Set Operations (Async) ===
+    
+    public async Core.EntitySet as_set_async() {
+        return new Core.EntitySet(this);
+    }
+    
+    // === Invercargill.Element Implementation ===
+    
+    public virtual Type? type() { return typeof(Core.Entity); }
+    public virtual string type_name() { return "Entity"; }
+    public virtual bool is_null() { return false; }
+    
+    public virtual bool is_type(Type t) { 
+        return t.is_a(typeof(Core.Entity)) || t.is_a(get_type());
+    }
+    
+    public virtual bool assignable_to_type(Type t) { 
+        return is_type(t);
+    }
+    
+    public virtual T? @as<T>() throws Invercargill.ElementError { 
+        if (is_type(typeof(T))) {
+            return (T) this;
+        }
+        throw new Invercargill.ElementError.INVALID_CONVERSION(
+            "Cannot cast %s to %s".printf(get_type().name(), typeof(T).name())
+        );
+    }
+    
+    public virtual T assert_as<T>() {
+        return (T) this;
+    }
+    
+    public virtual T? as_or_default<T>() { 
+        if (is_type(typeof(T))) {
+            return (T) this;
+        }
+        return null;
+    }
+    
+    public virtual bool try_get_as<T>(out T result) { 
+        if (is_type(typeof(T))) {
+            result = (T) this;
+            return true;
+        }
+        result = null;
+        return false;
+    }
+    
+    public virtual GLib.Value to_value(GLib.Type requested_type) throws GLib.Error {
+        if (requested_type.is_a(typeof(Core.Entity))) {
+            var v = Value(typeof(Core.Entity));
+            v.set_object(this);
+            return v;
+        }
+        var error = new GLib.Error.literal(
+            GLib.Quark.from_string("Implexus"),
+            1,
+            "Cannot convert Entity to %s".printf(requested_type.name())
+        );
+        throw error;
+    }
+    
+    public virtual string to_string() {
+        return "Entity(%s)".printf(_path.to_string());
+    }
+}
+
+} // namespace Implexus.Entities

+ 925 - 0
src/Entities/Catalogue.vala

@@ -0,0 +1,925 @@
+/**
+ * Catalogue - Key-based groupings of documents
+ *
+ * A Catalogue constructs groupings keyed by the result of an expression.
+ * Think of it as Dictionary<TKey, List<TItems>> or Invercargill's Catalogue<TKey, TValue>.
+ *
+ * Supports batched hook processing for improved performance during
+ * bulk operations within transactions.
+ *
+ * @version 0.2
+ * @since 0.1
+ */
+namespace Implexus.Entities {
+
+/**
+ * Catalogue entity that groups documents by a key extracted from an expression.
+ *
+ * A Catalogue is configured with:
+ * - A type_label: The document type to catalogue
+ * - An expression: An expression to extract the grouping key (e.g., "author", "status")
+ *
+ * The catalogue maintains pre-computed groupings stored in CatalogueStore,
+ * updated in real-time via hooks when documents are created, modified, or deleted.
+ *
+ * Example usage:
+ * {{{
+ * // Create documents with an "author" property
+ * var posts = yield (yield engine.get_root_async()).create_container_async("posts");
+ * var post1 = yield posts.create_document_async("post1", "Post");
+ * yield post1.set_entity_property_async("author", new Invercargill.NativeElement<string>("john"));
+ *
+ * var post2 = yield posts.create_document_async("post2", "Post");
+ * yield post2.set_entity_property_async("author", new Invercargill.NativeElement<string>("jane"));
+ *
+ * var post3 = yield posts.create_document_async("post3", "Post");
+ * yield post3.set_entity_property_async("author", new Invercargill.NativeElement<string>("john"));
+ *
+ * // Create a catalogue grouping posts by author
+ * var by_author = yield posts.create_catalogue_async("by-author", "Post", "author");
+ *
+ * // Query the catalogue - returns virtual CatalogueGroup entities
+ * // /posts/by-author/john returns [post1, post3]
+ * // /posts/by-author/jane returns [post2]
+ * foreach (var group in yield by_author.get_children_async()) {
+ *     print("Group: %s\n", group.name);
+ * }
+ * }}}
+ */
+public class Catalogue : AbstractEntity, 
+                          Engine.EntityChangeHandler, 
+                          Engine.DocumentPropertyChangeHandler,
+                          Engine.BatchedHookHandler {
+    
+    // === Configuration ===
+    
+    /**
+     * The document type to catalogue.
+     */
+    private string _type_label;
+    
+    /**
+     * The expression to extract the grouping key.
+     */
+    private string _expression;
+    
+    /**
+     * The parsed expression tree (cached).
+     */
+    private Invercargill.Expressions.Expression? _parsed_expression = null;
+    
+    /**
+     * Flag indicating whether configuration has been loaded.
+     */
+    private bool _config_loaded = false;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new Catalogue with the given engine, path, type label, and expression.
+     *
+     * @param engine The engine that manages this entity
+     * @param path The path to this catalogue
+     * @param type_label The document type to catalogue
+     * @param expression The expression to extract the grouping key
+     */
+    public Catalogue(
+        Core.Engine engine,
+        Core.EntityPath path,
+        string type_label,
+        string expression
+    ) {
+        base(engine, path);
+        _type_label = type_label;
+        _expression = expression;
+        _config_loaded = true;
+    }
+    
+    /**
+     * Creates a Catalogue instance that loads configuration from storage.
+     *
+     * @param engine The engine that manages this entity
+     * @param path The path to this catalogue
+     */
+    public Catalogue.from_storage(Core.Engine engine, Core.EntityPath path) {
+        base(engine, path);
+        _type_label = "";
+        _expression = "";
+        _config_loaded = false;
+    }
+    
+    // === Entity Type ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override Core.EntityType entity_type {
+        get { return Core.EntityType.CATALOGUE; }
+    }
+    
+    // === Configuration ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the expression used for key extraction.
+     */
+    public override string configured_expression {
+        owned get {
+            ensure_config_loaded();
+            return _expression;
+        }
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the document type being catalogued.
+     */
+    public override string configured_type_label {
+        owned get {
+            ensure_config_loaded();
+            return _type_label;
+        }
+    }
+    
+    // === Child Navigation (Virtual CatalogueGroups - Async) ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the names of all group keys.
+     */
+    public override async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError {
+        ensure_config_loaded();
+        var names = new Invercargill.DataStructures.HashSet<string>();
+        
+        var store = get_catalogue_store();
+        if (store == null) {
+            return names;
+        }
+        
+        foreach (var key in ((!) store).get_group_keys(_path)) {
+            names.add(key);
+        }
+        
+        return names;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the CatalogueGroup for the given key name.
+     */
+    public override async Core.Entity? get_child_async(string name) throws Core.EntityError {
+        ensure_config_loaded();
+        
+        var store = get_catalogue_store();
+        if (store == null) {
+            return null;
+        }
+        
+        // Check if this key exists
+        foreach (var key in ((!) store).get_group_keys(_path)) {
+            if (key == name) {
+                return new CatalogueGroup(_engine, this, name);
+            }
+        }
+        
+        return null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns all CatalogueGroup entities (eager loading).
+     */
+    public override async Core.Entity[] get_children_async() throws Core.EntityError {
+        ensure_config_loaded();
+        var children = new Core.Entity[0];
+        
+        var store = get_catalogue_store();
+        if (store == null) {
+            return children;
+        }
+        
+        foreach (var key in ((!) store).get_group_keys(_path)) {
+            children += new CatalogueGroup(_engine, this, key);
+        }
+        
+        return children;
+    }
+    
+    // === Group Management ===
+    
+    /**
+     * Gets all group keys.
+     *
+     * @return An enumerable of group keys
+     */
+    public Invercargill.Enumerable<string> get_group_keys() {
+        ensure_config_loaded();
+        
+        var store = get_catalogue_store();
+        if (store == null) {
+            return new Invercargill.DataStructures.Vector<string>().as_enumerable();
+        }
+        
+        return ((!) store).get_group_keys(_path);
+    }
+    
+    /**
+     * Gets the documents in a specific group.
+     *
+     * @param group_key The group key
+     * @return An array of document entities
+     */
+    public async Core.Entity[] get_group_documents_async(string group_key) throws Core.EntityError {
+        ensure_config_loaded();
+        var documents = new Core.Entity[0];
+        
+        var store = get_catalogue_store();
+        if (store == null) {
+            return documents;
+        }
+        
+        foreach (var doc_path in ((!) store).get_group_members(_path, group_key)) {
+            var entity_path = Core.EntityPath.parse(doc_path);
+            var entity = yield _engine.get_entity_async(entity_path);
+            if (entity != null) {
+                documents += (!) entity;
+            }
+        }
+        
+        return documents;
+    }
+    
+    // === Index Management ===
+    
+    /**
+     * Populates the index by scanning all existing documents of the configured type.
+     *
+     * This should be called when a Catalogue is first created.
+     * This method is synchronous as it runs in the DBM thread context.
+     *
+     * @throws Core.EngineError if population fails
+     */
+    public void populate_index() throws Core.EngineError {
+        ensure_config_loaded();
+        
+        var store = get_catalogue_store();
+        if (store == null) {
+            throw new Core.EngineError.STORAGE_ERROR("CatalogueStore not available");
+        }
+        
+        // Clear existing index
+        try {
+            ((!) store).clear_index(_path);
+        } catch (Storage.StorageError e) {
+            warning("Failed to clear catalogue index: %s", e.message);
+        }
+        
+        // Scan all documents of the configured type via EmbeddedEngine's entity_store
+        var embedded = _engine as Engine.EmbeddedEngine;
+        if (embedded != null) {
+            foreach (var doc_path in ((!) embedded).entity_store.get_documents_by_type(_type_label)) {
+                var entity_path = Core.EntityPath.parse(doc_path);
+                var entity = ((!) embedded).get_entity_or_null_sync(entity_path);
+                if (entity != null) {
+                    string? key = evaluate_key((!) entity);
+                    if (key != null) {
+                        add_document_to_group((!) key, doc_path);
+                    }
+                }
+            }
+        }
+    }
+    
+    /**
+     * Adds a document to a group.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param group_key The group key
+     * @param doc_path The document path
+     */
+    public void add_document_to_group(string group_key, string doc_path) throws Storage.StorageError {
+        ensure_config_loaded();
+        
+        var store = get_catalogue_store();
+        if (store != null) {
+            ((!) store).add_to_group(_path, group_key, doc_path);
+        }
+    }
+    
+    /**
+     * Removes a document from a group.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param group_key The group key
+     * @param doc_path The document path
+     */
+    public void remove_document_from_group(string group_key, string doc_path) throws Storage.StorageError {
+        ensure_config_loaded();
+        
+        var store = get_catalogue_store();
+        if (store != null) {
+            ((!) store).remove_from_group(_path, group_key, doc_path);
+            
+            // Check if group is now empty - if so, remove the key
+            var members = ((!) store).get_group_members(_path, group_key);
+            var has_members = false;
+            foreach (var m in members) {
+                has_members = true;
+                break;
+            }
+            // Note: Key cleanup is handled internally by the store
+        }
+    }
+    
+    // === BatchedHookHandler Implementation ===
+    
+    /**
+     * Indicates this handler supports batch processing.
+     */
+    public bool supports_batch {
+        get { return true; }
+    }
+    
+    /**
+     * Handles a batch of entity changes efficiently.
+     *
+     * This method processes all changes in one pass, then performs
+     * batch updates to the catalogue index.
+     *
+     * Note: This runs synchronously in the DBM thread context.
+     *
+     * @param events The consolidated events for matching entities
+     */
+    public void on_batch_change(Invercargill.DataStructures.Vector<Engine.HookEvent> events) {
+        ensure_config_loaded();
+        
+        // Track changes to apply: doc_path -> (old_key, new_key)
+        var changes = new Invercargill.DataStructures.Dictionary<string, BatchGroupChange>();
+        
+        foreach (var evt in events) {
+            // Skip non-documents and wrong type
+            if (evt.entity_type != Core.EntityType.DOCUMENT) {
+                continue;
+            }
+            if (evt.type_label != _type_label) {
+                continue;
+            }
+            
+            var doc_path = evt.entity_path.to_string();
+            
+            switch (evt.change_type) {
+                case Engine.EntityChangeType.CREATED:
+                    // Evaluate key and add to appropriate group
+                    // Use cached entity from event to avoid storage lookup
+                    var entity = evt.get_entity(_engine);
+                    if (entity != null) {
+                        string? key = evaluate_key((!) entity);
+                        if (key != null) {
+                            if (!changes.has(doc_path)) {
+                                changes.set(doc_path, new BatchGroupChange());
+                            }
+                            var change = changes.get(doc_path);
+                            if (change != null) {
+                                ((!) change).new_key = (!) key;
+                            }
+                        }
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.MODIFIED:
+                    // Re-evaluate key and update group membership
+                    // Use cached entity from event to avoid storage lookup
+                    var entity = evt.get_entity(_engine);
+                    if (entity != null) {
+                        string? new_key = evaluate_key((!) entity);
+                        string? old_key = find_document_group(doc_path);
+                        
+                        if (new_key != old_key) {
+                            if (!changes.has(doc_path)) {
+                                changes.set(doc_path, new BatchGroupChange());
+                            }
+                            var change = changes.get(doc_path);
+                            if (change != null) {
+                                ((!) change).old_key = old_key;
+                                ((!) change).new_key = new_key;
+                            }
+                        }
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.DELETED:
+                    // Find current group and remove
+                    string? old_key = find_document_group(doc_path);
+                    if (old_key != null) {
+                        if (!changes.has(doc_path)) {
+                            changes.set(doc_path, new BatchGroupChange());
+                        }
+                        var change = changes.get(doc_path);
+                        if (change != null) {
+                            ((!) change).old_key = old_key;
+                            ((!) change).deleted = true;
+                        }
+                    }
+                    break;
+            }
+        }
+        
+        // Apply all changes
+        try {
+            batch_apply_group_changes(changes);
+        } catch (Storage.StorageError e) {
+            warning("Failed to batch update catalogue: %s", e.message);
+        }
+    }
+    
+    /**
+     * Handles batched property changes.
+     *
+     * For catalogue, we just need to re-evaluate the key.
+     * This is handled by on_batch_change.
+     *
+     * @param document The document that changed
+     * @param changes Map of property name to old/new values
+     */
+    public void on_batch_property_change(
+        Core.Entity document,
+        Invercargill.DataStructures.Dictionary<string, Engine.PropertyChange> changes
+    ) {
+        // For catalogue, we just need to re-evaluate the key
+        // This is handled by on_batch_change which is called first
+    }
+    
+    /**
+     * Tracks a group change for batch processing.
+     */
+    private class BatchGroupChange : Object {
+        public string? old_key = null;
+        public string? new_key = null;
+        public bool deleted = false;
+    }
+    
+    /**
+     * Applies all group changes in batch.
+     *
+     * Uses batch operations with O(1) HashSet membership checking.
+     */
+    private void batch_apply_group_changes(
+        Invercargill.DataStructures.Dictionary<string, BatchGroupChange> changes
+    ) throws Storage.StorageError {
+        foreach (var doc_path in changes.keys) {
+            var change = changes.get(doc_path);
+            if (change == null) {
+                continue;
+            }
+            
+            // Remove from old group if present
+            if (((!) change).old_key != null) {
+                remove_document_from_group(((!) ((!) change).old_key), doc_path);
+            }
+            
+            // Add to new group if not deleted and has new key
+            if (!((!) change).deleted && ((!) change).new_key != null) {
+                add_document_to_group(((!) ((!) change).new_key), doc_path);
+            }
+        }
+    }
+    
+    // === EntityChangeHandler Implementation ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Handles entity creation, modification, and deletion events.
+     * This is called for non-batched processing (outside transactions).
+     * Note: This runs synchronously in the DBM thread context.
+     */
+    public void on_entity_change(Core.Entity entity, Engine.EntityChangeType change_type) {
+        ensure_config_loaded();
+        
+        // Only interested in documents of our configured type
+        if (entity.entity_type != Core.EntityType.DOCUMENT) {
+            return;
+        }
+        
+        if (entity.type_label != _type_label) {
+            return;
+        }
+        
+        try {
+            switch (change_type) {
+                case Engine.EntityChangeType.CREATED:
+                    // Evaluate key and add to appropriate group
+                    string? key = evaluate_key(entity);
+                    if (key != null) {
+                        add_document_to_group((!) key, entity.path.to_string());
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.MODIFIED:
+                    // Re-evaluate key and update group membership
+                    update_document_group(entity);
+                    break;
+                    
+                case Engine.EntityChangeType.DELETED:
+                    // Remove from all groups (need to find which group it was in)
+                    remove_document_from_all_groups(entity.path.to_string());
+                    break;
+            }
+        } catch (Storage.StorageError e) {
+            warning("Failed to update catalogue index: %s", e.message);
+        }
+    }
+    
+    // === DocumentPropertyChangeHandler Implementation ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Handles document property change events.
+     * Re-evaluates the key when the relevant property changes.
+     * Note: This runs synchronously in the DBM thread context.
+     */
+    public void on_document_property_change(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        ensure_config_loaded();
+        
+        // Only interested in documents of our configured type
+        if (document.entity_type != Core.EntityType.DOCUMENT) {
+            return;
+        }
+        
+        if (document.type_label != _type_label) {
+            return;
+        }
+        
+        // Re-evaluate key and update group membership
+        try {
+            update_document_group(document);
+        } catch (Storage.StorageError e) {
+            warning("Failed to update document group: %s", e.message);
+        }
+    }
+    
+    // === Hook Registration ===
+    
+    /**
+     * Registers this catalogue with the hook manager to receive change notifications.
+     */
+    public void register_hooks() {
+        var hook_manager = get_hook_manager();
+        if (hook_manager != null) {
+            ((!) hook_manager).register_handler(this);
+            ((!) hook_manager).register_property_handler(this);
+        }
+    }
+    
+    /**
+     * Unregisters this catalogue from the hook manager.
+     */
+    public void unregister_hooks() {
+        var hook_manager = get_hook_manager();
+        if (hook_manager != null) {
+            ((!) hook_manager).unregister_handler(this);
+            ((!) hook_manager).unregister_property_handler(this);
+        }
+    }
+    
+    // === Lifecycle (Async) ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Deletes this catalogue and clears its index data.
+     */
+    public override async void delete_async() throws Core.EntityError {
+        // Unregister from hooks
+        unregister_hooks();
+        
+        // Clear index data
+        var store = get_catalogue_store();
+        if (store != null) {
+            try {
+                ((!) store).clear_index(_path);
+            } catch (Storage.StorageError e) {
+                warning("Failed to clear catalogue index on delete: %s", e.message);
+            }
+        }
+        
+        // Delete entity metadata
+        yield base.delete_async();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override bool exists {
+        get {
+            // Use sync method via EmbeddedEngine cast
+            var embedded = _engine as Engine.EmbeddedEngine;
+            if (embedded != null) {
+                return ((!) embedded).entity_exists_sync(_path);
+            }
+            return true;
+        }
+    }
+    
+    // === Private Methods ===
+    
+    /**
+     * Ensures configuration is loaded from storage.
+     */
+    private void ensure_config_loaded() {
+        if (_config_loaded) {
+            return;
+        }
+        
+        var storage = _engine.configuration.storage;
+        try {
+            var config = storage.get_catalogue_config(_path);
+            if (config != null) {
+                _type_label = ((!) config).type_label;
+                _expression = ((!) config).expression;
+            }
+        } catch (Storage.StorageError e) {
+            warning("Failed to load catalogue config: %s", e.message);
+        }
+        
+        _config_loaded = true;
+    }
+    
+    /**
+     * Gets the CatalogueStore from the engine if available.
+     *
+     * @return The CatalogueStore, or null if not available
+     */
+    internal Storage.HighLevel.CatalogueStore? get_catalogue_store() {
+        // Check if engine is an EmbeddedEngine with store access
+        if (_engine is Engine.EmbeddedEngine) {
+            return ((Engine.EmbeddedEngine) _engine).catalogue_store;
+        }
+        return null;
+    }
+    
+    /**
+     * Gets the HookManager from the engine configuration.
+     *
+     * @return The HookManager, or null if not available
+     */
+    internal Engine.HookManager? get_hook_manager() {
+        var config = _engine.configuration;
+        return config.hook_manager;
+    }
+    
+    /**
+     * Evaluates the expression to get the group key for a document.
+     *
+     * Note: This method is synchronous and accesses properties synchronously
+     * because it runs in the DBM thread context where synchronous access is safe.
+     *
+     * @param doc The document to evaluate against
+     * @return The group key, or null if the property doesn't exist
+     */
+    private string? evaluate_key(Core.Entity doc) {
+        try {
+            // Parse expression if not cached
+            if (_parsed_expression == null) {
+                _parsed_expression = Invercargill.Expressions.ExpressionParser.parse(_expression);
+            }
+            
+            // For hook evaluation, we need synchronous property access
+            // This works because hooks run in the DBM thread context
+            var doc_impl = doc as Document;
+            if (doc_impl == null) {
+                return null;
+            }
+            
+            // Access properties synchronously (safe in DBM thread)
+            var properties = ((!) doc_impl).get_properties_sync();
+            
+            // Create evaluator with document properties as root values
+            var evaluator = new Invercargill.Expressions.ExpressionEvaluator();
+            
+            // Evaluate the expression
+            var result = evaluator.evaluate((!) _parsed_expression, properties);
+            
+            if (result.is_null()) {
+                return null;
+            }
+            
+            // Convert result to string
+            return result.as<string>();
+        } catch (Error e) {
+            // If evaluation fails, document doesn't have the key
+            debug("Expression evaluation failed for %s: %s", doc.path.to_string(), e.message);
+            return null;
+        }
+    }
+    
+    /**
+     * Updates a document's group membership based on current key evaluation.
+     *
+     * @param doc The document to update
+     */
+    private void update_document_group(Core.Entity doc) throws Storage.StorageError {
+        string? new_key = evaluate_key(doc);
+        string? old_key = find_document_group(doc.path.to_string());
+        
+        if (new_key == old_key) {
+            // No change needed
+            return;
+        }
+        
+        // Remove from old group if present
+        if (old_key != null) {
+            remove_document_from_group((!) old_key, doc.path.to_string());
+        }
+        
+        // Add to new group if key exists
+        if (new_key != null) {
+            add_document_to_group((!) new_key, doc.path.to_string());
+        }
+    }
+    
+    /**
+     * Finds which group a document is currently in.
+     *
+     * Uses O(1) HashSet membership checking via store.
+     *
+     * @param doc_path The document path
+     * @return The group key, or null if not in any group
+     */
+    private string? find_document_group(string doc_path) {
+        var store = get_catalogue_store();
+        if (store == null) {
+            return null;
+        }
+        
+        foreach (var key in ((!) store).get_group_keys(_path)) {
+            foreach (var member in ((!) store).get_group_members(_path, key)) {
+                if (member == doc_path) {
+                    return key;
+                }
+            }
+        }
+        
+        return null;
+    }
+    
+    /**
+     * Removes a document from all groups.
+     *
+     * @param doc_path The document path
+     */
+    private void remove_document_from_all_groups(string doc_path) throws Storage.StorageError {
+        // Find and remove from current group
+        string? current_key = find_document_group(doc_path);
+        if (current_key != null) {
+            remove_document_from_group((!) current_key, doc_path);
+        }
+    }
+    
+    /**
+     * Returns a string representation of this catalogue.
+     */
+    public new string to_string() {
+        return "Catalogue(%s, type=%s, expr=%s)".printf(_path.to_string(), _type_label, _expression);
+    }
+}
+
+/**
+ * Virtual entity representing a single group within a Catalogue.
+ *
+ * A CatalogueGroup is a virtual container that holds all documents
+ * that share the same key value in the parent Catalogue.
+ */
+public class CatalogueGroup : AbstractEntity {
+    
+    /**
+     * The parent catalogue.
+     */
+    private Catalogue _parent;
+    
+    /**
+     * The group key.
+     */
+    private string _group_key;
+    
+    /**
+     * Creates a new CatalogueGroup.
+     *
+     * @param engine The engine that manages this entity
+     * @param parent The parent catalogue
+     * @param group_key The group key
+     */
+    public CatalogueGroup(Core.Engine engine, Catalogue parent, string group_key) {
+        // Create path by appending group_key to parent path
+        base(engine, parent.path.append_child(group_key));
+        _parent = parent;
+        _group_key = group_key;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override Core.EntityType entity_type {
+        get { return Core.EntityType.CATALOGUE; }  // Same type as parent for simplicity
+    }
+    
+    /**
+     * The group key for this group.
+     */
+    public string group_key {
+        get { return _group_key; }
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the names of all documents in this group.
+     */
+    public override async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError {
+        var names = new Invercargill.DataStructures.HashSet<string>();
+        
+        var store = _parent.get_catalogue_store();
+        if (store == null) {
+            return names;
+        }
+        
+        foreach (var doc_path in ((!) store).get_group_members(_parent.path, _group_key)) {
+            var entity_path = Core.EntityPath.parse(doc_path);
+            names.add(entity_path.name);
+        }
+        
+        return names;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the document with the given name if it's in this group.
+     */
+    public override async Core.Entity? get_child_async(string name) throws Core.EntityError {
+        var store = _parent.get_catalogue_store();
+        if (store == null) {
+            return null;
+        }
+        
+        foreach (var doc_path in ((!) store).get_group_members(_parent.path, _group_key)) {
+            var entity_path = Core.EntityPath.parse(doc_path);
+            if (entity_path.name == name) {
+                return yield _engine.get_entity_async(entity_path);
+            }
+        }
+        
+        return null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns all documents in this group (eager loading).
+     */
+    public override async Core.Entity[] get_children_async() throws Core.EntityError {
+        return yield _parent.get_group_documents_async(_group_key);
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override bool exists {
+        get {
+            var store = _parent.get_catalogue_store();
+            if (store == null) {
+                return false;
+            }
+            
+            foreach (var key in ((!) store).get_group_keys(_parent.path)) {
+                if (key == _group_key) {
+                    return true;
+                }
+            }
+            
+            return false;
+        }
+    }
+    
+    /**
+     * Returns a string representation of this group.
+     */
+    public new string to_string() {
+        return "CatalogueGroup(%s, key=%s)".printf(_path.to_string(), _group_key);
+    }
+}
+
+} // namespace Implexus.Entities

+ 719 - 0
src/Entities/Category.vala

@@ -0,0 +1,719 @@
+/**
+ * Category - Indexed container for documents matching a boolean predicate
+ *
+ * A Category maintains a pre-computed index of document paths that match
+ * a configured boolean predicate expression. This provides O(k) lookups
+ * where k = number of matching documents, instead of O(n) scans.
+ *
+ * Supports batched hook processing for improved performance during
+ * bulk operations within transactions.
+ *
+ * @version 0.3
+ * @since 0.1
+ */
+namespace Implexus.Entities {
+
+/**
+ * Category entity that contains documents matching a boolean predicate.
+ *
+ * A Category is configured with:
+ * - A type_label: The document type to filter
+ * - An expression: A boolean predicate expression (e.g., "!draft", "status == \"active\"")
+ *
+ * The category maintains a pre-computed member set stored in CategoryStore,
+ * updated in real-time via hooks when documents are created, modified, or deleted.
+ *
+ * Example usage:
+ * {{{
+ * // Create documents with a "draft" property
+ * var posts = yield (yield engine.get_root_async()).create_container_async("posts");
+ * var post1 = yield posts.create_document_async("post1", "Post");
+ * yield post1.set_entity_property_async("draft", new Invercargill.NativeElement<bool?>(true));
+ *
+ * var post2 = yield posts.create_document_async("post2", "Post");
+ * yield post2.set_entity_property_async("draft", new Invercargill.NativeElement<bool?>(false));
+ *
+ * // Create a category for active (non-draft) posts
+ * var active = yield posts.create_category_async("active", "Post", "!draft");
+ *
+ * // Query the category - returns post2 only
+ * foreach (var doc in yield active.get_children_async()) {
+ *     print("%s\n", doc.name);
+ * }
+ * }}}
+ */
+public class Category : AbstractEntity, 
+                          Engine.EntityChangeHandler, 
+                          Engine.DocumentPropertyChangeHandler,
+                          Engine.BatchedHookHandler {
+    
+    // === Configuration ===
+    
+    /**
+     * The document type to filter.
+     */
+    private string _type_label;
+    
+    /**
+     * The boolean predicate expression.
+     */
+    private string _expression;
+    
+    /**
+     * Whether expression evaluation is case-sensitive.
+     */
+    private bool _case_sensitive;
+    
+    /**
+     * The parsed expression tree (cached).
+     */
+    private Invercargill.Expressions.Expression? _parsed_expression = null;
+    
+    /**
+     * Flag indicating whether configuration has been loaded.
+     */
+    private bool _config_loaded = false;
+    
+    // === Index Storage Keys ===
+    
+    /**
+     * Key suffix for category configuration.
+     */
+    private const string CONFIG_KEY = "config";
+    
+    /**
+     * Key suffix for category members.
+     */
+    private const string MEMBERS_KEY = "members";
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new Category with the given engine, path, type label, and expression.
+     *
+     * @param engine The engine that manages this entity
+     * @param path The path to this category
+     * @param type_label The document type to filter
+     * @param expression The boolean predicate expression
+     * @param case_sensitive Whether expression evaluation is case-sensitive (default: false)
+     */
+    public Category(
+        Core.Engine engine,
+        Core.EntityPath path,
+        string type_label,
+        string expression,
+        bool case_sensitive = false
+    ) {
+        base(engine, path);
+        _type_label = type_label;
+        _expression = expression;
+        _case_sensitive = case_sensitive;
+        _config_loaded = true;
+    }
+    
+    /**
+     * Creates a Category instance that loads configuration from storage.
+     *
+     * @param engine The engine that manages this entity
+     * @param path The path to this category
+     */
+    public Category.from_storage(Core.Engine engine, Core.EntityPath path) {
+        base(engine, path);
+        _type_label = "";
+        _expression = "";
+        _case_sensitive = false;
+        _config_loaded = false;
+    }
+    
+    // === Entity Type ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override Core.EntityType entity_type {
+        get { return Core.EntityType.CATEGORY; }
+    }
+    
+    // === Configuration ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the expression used for filtering.
+     */
+    public override string configured_expression {
+        owned get {
+            ensure_config_loaded();
+            return _expression;
+        }
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the document type being filtered.
+     */
+    public override string configured_type_label {
+        owned get {
+            ensure_config_loaded();
+            return _type_label;
+        }
+    }
+    
+    /**
+     * Whether expression evaluation is case-sensitive.
+     */
+    public bool case_sensitive {
+        get {
+            ensure_config_loaded();
+            return _case_sensitive;
+        }
+    }
+    
+    // === Child Navigation (Indexed - Async) ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the names of all documents in the member set.
+     */
+    public override async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError {
+        ensure_config_loaded();
+        var names = new Invercargill.DataStructures.HashSet<string>();
+        
+        var store = get_category_store();
+        if (store == null) {
+            return names;
+        }
+        
+        foreach (var doc_path in ((!) store).get_members(_path)) {
+            var entity_path = Core.EntityPath.parse(doc_path);
+            names.add(entity_path.name);
+        }
+        
+        return names;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns the document with the given name if it's in the member set.
+     */
+    public override async Core.Entity? get_child_async(string name) throws Core.EntityError {
+        ensure_config_loaded();
+        
+        var store = get_category_store();
+        if (store == null) {
+            return null;
+        }
+        
+        // Check each member to find one with matching name
+        foreach (var doc_path in ((!) store).get_members(_path)) {
+            var entity_path = Core.EntityPath.parse(doc_path);
+            if (entity_path.name == name) {
+                return yield _engine.get_entity_async(entity_path);
+            }
+        }
+        
+        return null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Returns all documents in the member set (eager loading).
+     * This is an O(k) operation where k = number of members.
+     */
+    public override async Core.Entity[] get_children_async() throws Core.EntityError {
+        ensure_config_loaded();
+        var children = new Core.Entity[0];
+        
+        var store = get_category_store();
+        if (store == null) {
+            return children;
+        }
+        
+        foreach (var doc_path in ((!) store).get_members(_path)) {
+            var entity_path = Core.EntityPath.parse(doc_path);
+            var entity = yield _engine.get_entity_async(entity_path);
+            if (entity != null) {
+                children += (!) entity;
+            }
+        }
+        
+        return children;
+    }
+    
+    // === Index Management ===
+    
+    /**
+     * Populates the index by scanning all existing documents of the configured type.
+     *
+     * This should be called when a Category is first created.
+     * This method is synchronous as it runs in the DBM thread context.
+     *
+     * @throws Core.EngineError if population fails
+     */
+    public void populate_index() throws Core.EngineError {
+        ensure_config_loaded();
+        
+        var store = get_category_store();
+        if (store == null) {
+            throw new Core.EngineError.STORAGE_ERROR("CategoryStore not available");
+        }
+        
+        // Clear existing members
+        try {
+            ((!) store).clear_index(_path);
+        } catch (Storage.StorageError e) {
+            warning("Failed to clear category index: %s", e.message);
+        }
+        
+        // Scan all documents of the configured type via EmbeddedEngine's entity_store
+        var members = new Invercargill.DataStructures.Vector<string>();
+        var embedded = _engine as Engine.EmbeddedEngine;
+        
+        if (embedded != null) {
+            // Use the type index from entity_store to get all documents of this type
+            foreach (var doc_path in ((!) embedded).entity_store.get_documents_by_type(_type_label)) {
+                var entity_path = Core.EntityPath.parse(doc_path);
+                var entity = ((!) embedded).get_entity_or_null_sync(entity_path);
+                if (entity != null && evaluate_predicate((!) entity)) {
+                    members.add(doc_path);
+                }
+            }
+        }
+        
+        // Store the member set
+        try {
+            ((!) store).set_members(_path, members.as_enumerable());
+        } catch (Storage.StorageError e) {
+            throw new Core.EngineError.STORAGE_ERROR("Failed to populate index: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Checks if a document is in this category's member set.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param doc_path The document path to check
+     * @return true if the document is a member
+     */
+    public bool contains_document(string doc_path) {
+        ensure_config_loaded();
+        
+        var store = get_category_store();
+        if (store == null) {
+            return false;
+        }
+        
+        return ((!) store).has_member(_path, doc_path);
+    }
+    
+    /**
+     * Adds a document to the member set.
+     *
+     * This is called by hooks when a document starts matching the predicate.
+     *
+     * @param doc_path The document path to add
+     */
+    public void add_document(string doc_path) throws Storage.StorageError {
+        ensure_config_loaded();
+        
+        var store = get_category_store();
+        if (store != null) {
+            ((!) store).add_member(_path, doc_path);
+        }
+    }
+    
+    /**
+     * Removes a document from the member set.
+     *
+     * This is called by hooks when a document stops matching or is deleted.
+     *
+     * @param doc_path The document path to remove
+     */
+    public void remove_document(string doc_path) throws Storage.StorageError {
+        ensure_config_loaded();
+        
+        var store = get_category_store();
+        if (store != null) {
+            ((!) store).remove_member(_path, doc_path);
+        }
+    }
+    
+    // === BatchedHookHandler Implementation ===
+    
+    /**
+     * Indicates this handler supports batch processing.
+     */
+    public bool supports_batch {
+        get { return true; }
+    }
+    
+    /**
+     * Handles a batch of entity changes efficiently.
+     *
+     * This method processes all changes in one pass, then performs
+     * a single batch update to the category index.
+     *
+     * Note: This runs synchronously in the DBM thread context.
+     *
+     * @param events The consolidated events for matching entities
+     */
+    public void on_batch_change(Invercargill.DataStructures.Vector<Engine.HookEvent> events) {
+        ensure_config_loaded();
+        
+        var to_add = new Invercargill.DataStructures.Vector<string>();
+        var to_remove = new Invercargill.DataStructures.Vector<string>();
+        
+        foreach (var evt in events) {
+            // Skip non-documents and wrong type
+            if (evt.entity_type != Core.EntityType.DOCUMENT) {
+                continue;
+            }
+            if (evt.type_label != _type_label) {
+                continue;
+            }
+            
+            var doc_path = evt.entity_path.to_string();
+            
+            switch (evt.change_type) {
+                case Engine.EntityChangeType.CREATED:
+                    // Evaluate predicate and add if matches
+                    // Use cached entity from event to avoid storage lookup
+                    var entity = evt.get_entity(_engine);
+                    if (entity != null && evaluate_predicate((!) entity)) {
+                        to_add.add(doc_path);
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.MODIFIED:
+                    // Re-evaluate and update membership
+                    // Use cached entity from event to avoid storage lookup
+                    var entity = evt.get_entity(_engine);
+                    if (entity != null) {
+                        bool should_include = evaluate_predicate((!) entity);
+                        bool is_included = contains_document(doc_path);
+                        
+                        if (should_include && !is_included) {
+                            to_add.add(doc_path);
+                        } else if (!should_include && is_included) {
+                            to_remove.add(doc_path);
+                        }
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.DELETED:
+                    to_remove.add(doc_path);
+                    break;
+            }
+        }
+        
+        // Batch update the index
+        try {
+            batch_update_members(to_add, to_remove);
+        } catch (Storage.StorageError e) {
+            warning("Failed to batch update category: %s", e.message);
+        }
+    }
+    
+    /**
+     * Handles batched property changes.
+     *
+     * For category, we just need to re-evaluate the predicate.
+     * This is handled by on_batch_change.
+     *
+     * @param document The document that changed
+     * @param changes Map of property name to old/new values
+     */
+    public void on_batch_property_change(
+        Core.Entity document,
+        Invercargill.DataStructures.Dictionary<string, Engine.PropertyChange> changes
+    ) {
+        // For category, we just need to re-evaluate the predicate
+        // This is handled by on_batch_change which is called first
+    }
+    
+    /**
+     * Batch updates the category membership.
+     *
+     * Uses batch operations with O(1) HashSet membership checking.
+     *
+     * @param to_add Paths to add to the category
+     * @param to_remove Paths to remove from the category
+     */
+    private void batch_update_members(
+        Invercargill.DataStructures.Vector<string> to_add,
+        Invercargill.DataStructures.Vector<string> to_remove
+    ) throws Storage.StorageError {
+        var store = get_category_store();
+        if (store == null) {
+            return;
+        }
+        
+        // Use batch operations for efficiency
+        if (to_add.count() > 0) {
+            ((!) store).add_members(_path, to_add.as_enumerable());
+        }
+        
+        if (to_remove.count() > 0) {
+            ((!) store).remove_members(_path, to_remove.as_enumerable());
+        }
+    }
+    
+    // === EntityChangeHandler Implementation ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Handles entity creation, modification, and deletion events.
+     * This is called for non-batched processing (outside transactions).
+     * Note: This runs synchronously in the DBM thread context.
+     */
+    public void on_entity_change(Core.Entity entity, Engine.EntityChangeType change_type) {
+        ensure_config_loaded();
+        
+        // Only interested in documents of our configured type
+        if (entity.entity_type != Core.EntityType.DOCUMENT) {
+            return;
+        }
+        
+        if (entity.type_label != _type_label) {
+            return;
+        }
+        
+        try {
+            switch (change_type) {
+                case Engine.EntityChangeType.CREATED:
+                    // Evaluate predicate and add if matches
+                    if (evaluate_predicate(entity)) {
+                        add_document(entity.path.to_string());
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.MODIFIED:
+                    // Re-evaluate predicate and update membership
+                    update_document_membership(entity);
+                    break;
+                    
+                case Engine.EntityChangeType.DELETED:
+                    // Remove from members if present
+                    remove_document(entity.path.to_string());
+                    break;
+            }
+        } catch (Storage.StorageError e) {
+            warning("Failed to update category index: %s", e.message);
+        }
+    }
+    
+    // === DocumentPropertyChangeHandler Implementation ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Handles document property change events.
+     * Re-evaluates the predicate when any property changes.
+     * Note: This runs synchronously in the DBM thread context.
+     */
+    public void on_document_property_change(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        ensure_config_loaded();
+        
+        // Only interested in documents of our configured type
+        if (document.entity_type != Core.EntityType.DOCUMENT) {
+            return;
+        }
+        
+        if (document.type_label != _type_label) {
+            return;
+        }
+        
+        // Re-evaluate predicate and update membership
+        try {
+            update_document_membership(document);
+        } catch (Storage.StorageError e) {
+            warning("Failed to update document membership: %s", e.message);
+        }
+    }
+    
+    // === Hook Registration ===
+    
+    /**
+     * Registers this category with the hook manager to receive change notifications.
+     */
+    public void register_hooks() {
+        var hook_manager = get_hook_manager();
+        if (hook_manager != null) {
+            ((!) hook_manager).register_handler(this);
+            ((!) hook_manager).register_property_handler(this);
+        }
+    }
+    
+    /**
+     * Unregisters this category from the hook manager.
+     */
+    public void unregister_hooks() {
+        var hook_manager = get_hook_manager();
+        if (hook_manager != null) {
+            ((!) hook_manager).unregister_handler(this);
+            ((!) hook_manager).unregister_property_handler(this);
+        }
+    }
+    
+    // === Lifecycle (Async) ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Deletes this category and clears its index data.
+     */
+    public override async void delete_async() throws Core.EntityError {
+        // Unregister from hooks
+        unregister_hooks();
+        
+        // Clear index data
+        var store = get_category_store();
+        if (store != null) {
+            try {
+                ((!) store).clear_index(_path);
+            } catch (Storage.StorageError e) {
+                warning("Failed to clear category index on delete: %s", e.message);
+            }
+        }
+        
+        // Delete entity metadata
+        yield base.delete_async();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override bool exists {
+        get {
+            // Use sync method via EmbeddedEngine cast
+            var embedded = _engine as Engine.EmbeddedEngine;
+            if (embedded != null) {
+                return ((!) embedded).entity_exists_sync(_path);
+            }
+            return true;
+        }
+    }
+    
+    // === Private Methods ===
+    
+    /**
+     * Ensures configuration is loaded from storage.
+     */
+    private void ensure_config_loaded() {
+        if (_config_loaded) {
+            return;
+        }
+        
+        var storage = _engine.configuration.storage;
+        try {
+            var config = storage.get_category_config(_path);
+            if (config != null) {
+                _type_label = ((!) config).type_label;
+                _expression = ((!) config).expression;
+                // Note: case_sensitive not yet stored in config, default to false
+                _case_sensitive = false;
+            }
+        } catch (Storage.StorageError e) {
+            warning("Failed to load category config: %s", e.message);
+        }
+        
+        _config_loaded = true;
+    }
+    
+    /**
+     * Gets the CategoryStore from the engine if available.
+     *
+     * @return The CategoryStore, or null if not available
+     */
+    private Storage.HighLevel.CategoryStore? get_category_store() {
+        // Check if engine is an EmbeddedEngine with store access
+        if (_engine is Engine.EmbeddedEngine) {
+            return ((Engine.EmbeddedEngine) _engine).category_store;
+        }
+        return null;
+    }
+    
+    /**
+     * Gets the HookManager from the engine configuration.
+     *
+     * @return The HookManager, or null if not available
+     */
+    private Engine.HookManager? get_hook_manager() {
+        var config = _engine.configuration;
+        return config.hook_manager;
+    }
+    
+    /**
+     * Evaluates the predicate expression against a document.
+     *
+     * Note: This method is synchronous and accesses properties synchronously
+     * because it runs in the DBM thread context where synchronous access is safe.
+     *
+     * @param doc The document to evaluate against
+     * @return true if the document matches the predicate
+     */
+    private bool evaluate_predicate(Core.Entity doc) {
+        try {
+            // Parse expression if not cached
+            if (_parsed_expression == null) {
+                _parsed_expression = Invercargill.Expressions.ExpressionParser.parse(_expression);
+            }
+            
+            // For hook evaluation, we need synchronous property access
+            // This works because hooks run in the DBM thread context
+            var doc_impl = doc as Document;
+            if (doc_impl == null) {
+                return false;
+            }
+            
+            // Access properties synchronously (safe in DBM thread)
+            var properties = ((!) doc_impl).get_properties_sync();
+            
+            // Create evaluator with document properties as root values
+            var evaluator = new Invercargill.Expressions.ExpressionEvaluator();
+            
+            // Evaluate the expression
+            return evaluator.evaluate_as_bool((!) _parsed_expression, properties);
+        } catch (Error e) {
+            // If evaluation fails, document doesn't match
+            debug("Expression evaluation failed for %s: %s", doc.path.to_string(), e.message);
+            return false;
+        }
+    }
+    
+    /**
+     * Updates a document's membership based on current predicate evaluation.
+     *
+     * @param doc The document to update
+     */
+    private void update_document_membership(Core.Entity doc) throws Storage.StorageError {
+        bool should_include = evaluate_predicate(doc);
+        bool is_included = contains_document(doc.path.to_string());
+        
+        if (should_include && !is_included) {
+            add_document(doc.path.to_string());
+        } else if (!should_include && is_included) {
+            remove_document(doc.path.to_string());
+        }
+    }
+    
+    /**
+     * Returns a string representation of this category.
+     */
+    public new string to_string() {
+        return "Category(%s, type=%s, expr=%s)".printf(_path.to_string(), _type_label, _expression);
+    }
+}
+
+} // namespace Implexus.Entities

+ 452 - 0
src/Entities/Container.vala

@@ -0,0 +1,452 @@
+/**
+ * Container - Container entity for child entities
+ * 
+ * A Container is a folder-like container that can hold child entities
+ * of any type (Container, Document, Category, or Index).
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Entities {
+
+/**
+ * Container entity that can hold child entities.
+ * 
+ * Containers are similar to filesystem folders - they can contain
+ * any type of entity including other containers, documents, categories,
+ * and indexes.
+ * 
+ * Example usage:
+ * {{{
+ * var root = yield engine.get_root_async();
+ * var users = yield root.create_container_async("users");
+ * var john = yield users.create_document_async("john", "User");
+ * yield john.set_entity_property_async("email", new Invercargill.NativeElement<string>("john@example.com"));
+ * }}}
+ */
+public class Container : AbstractEntity {
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new Container with the given engine and path.
+     * 
+     * @param engine The engine that manages this entity
+     * @param path The path to this container
+     */
+    public Container(Core.Engine engine, Core.EntityPath path) {
+        base(engine, path);
+    }
+    
+    // === Entity Type ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override Core.EntityType entity_type { 
+        get { return Core.EntityType.CONTAINER; }
+    }
+    
+    // === Child Navigation (Async) ===
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns the names of all children stored in this container.
+     */
+    public override async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError { 
+        var storage = _engine.configuration.storage;
+        try {
+            var children = storage.get_children(_path);
+            var set = new Invercargill.DataStructures.HashSet<string>();
+            foreach (var name in children) {
+                set.add(name);
+            }
+            return set;
+        } catch (Storage.StorageError e) {
+            throw new Core.EntityError.STORAGE_ERROR("Failed to get children: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Gets a child entity by name from storage.
+     */
+    public override async Core.Entity? get_child_async(string name) throws Core.EntityError {
+        var child_path = _path.append_child(name);
+        return yield _engine.get_entity_async(child_path);
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns all child entities (eager loading).
+     */
+    public override async Core.Entity[] get_children_async() throws Core.EntityError {
+        var names = yield get_child_names_async();
+        var children = new Core.Entity[0];
+        foreach (var name in names) {
+            var child = yield get_child_async(name);
+            if (child != null) {
+                children += (!) child;
+            }
+        }
+        return children;
+    }
+    
+    // === Child Creation (Async) ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Creates a new Container child in this container.
+     *
+     * @param name The name for the new container
+     * @return The created container entity
+     * @throws Core.EntityError.INVALID_PATH if name is empty
+     * @throws Core.EntityError.ENTITY_ALREADY_EXISTS if an entity with this name already exists
+     */
+    public override async Core.Entity? create_container_async(string name) throws Core.EntityError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.append_child(name);
+        
+        // Check if we're in a transaction
+        var embedded_engine = _engine as Engine.EmbeddedEngine;
+        var tx = embedded_engine != null ? embedded_engine.current_transaction : null;
+        
+        if (tx != null) {
+            // Queue operations in transaction
+            tx.record_create_entity(child_path, Core.EntityType.CONTAINER, null);
+            tx.record_add_child(_path, name);
+        } else {
+            // Direct write when not in transaction
+            var storage = _engine.configuration.storage;
+            try {
+                storage.store_entity_metadata(child_path, Core.EntityType.CONTAINER, null);
+                storage.add_child(_path, name);
+            } catch (Storage.StorageError e) {
+                throw new Core.EntityError.STORAGE_ERROR("Failed to create container: %s".printf(e.message));
+            }
+        }
+        
+        // Create entity instance and notify engine
+        var container = new Container(_engine, child_path);
+        _engine.entity_created(container);
+        
+        return container;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Creates a new Document child in this container.
+     *
+     * @param name The name for the new document
+     * @param type_label The application-defined type for the document
+     * @return The created document entity
+     * @throws Core.EntityError.INVALID_PATH if name is empty
+     * @throws Core.EntityError.ENTITY_ALREADY_EXISTS if an entity with this name already exists
+     */
+    public override async Core.Entity? create_document_async(string name, string type_label) throws Core.EntityError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.append_child(name);
+        
+        // Check if we're in a transaction
+        var embedded_engine = _engine as Engine.EmbeddedEngine;
+        var tx = embedded_engine != null ? embedded_engine.current_transaction : null;
+        
+        if (tx != null) {
+            // Queue operations in transaction
+            tx.record_create_entity(child_path, Core.EntityType.DOCUMENT, type_label);
+            tx.record_add_child(_path, name);
+            // Note: CREATE_ENTITY for DOCUMENT already stores empty properties
+        } else {
+            // Direct write when not in transaction
+            var storage = _engine.configuration.storage;
+            try {
+                storage.store_entity_metadata(child_path, Core.EntityType.DOCUMENT, type_label);
+                storage.store_properties(child_path, new Invercargill.DataStructures.PropertyDictionary());
+                storage.add_child(_path, name);
+                
+                // Register document in type index for Index.populate_index()
+                if (embedded_engine != null) {
+                    ((!) embedded_engine).entity_store.register_document_type(type_label, child_path.to_string());
+                }
+            } catch (Storage.StorageError e) {
+                throw new Core.EntityError.STORAGE_ERROR("Failed to create document: %s".printf(e.message));
+            }
+        }
+        
+        // Create entity instance and notify engine
+        var document = new Document(_engine, child_path, type_label);
+        _engine.entity_created(document);
+        
+        return document;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Creates a new Category child in this container.
+     *
+     * The category will be populated with all existing documents that match
+     * the predicate expression, and will automatically register for change
+     * notifications to keep its index up to date.
+     *
+     * @param name The name for the new category
+     * @param type_label The document type to filter
+     * @param expression The boolean predicate expression for filtering
+     * @return The created category entity
+     * @throws Core.EntityError.INVALID_PATH if name is empty
+     * @throws Core.EntityError.ENTITY_ALREADY_EXISTS if an entity with this name already exists
+     */
+    public override async Core.Entity? create_category_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws Core.EntityError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.append_child(name);
+        
+        // Check if we're in a transaction
+        var embedded_engine = _engine as Engine.EmbeddedEngine;
+        var tx = embedded_engine != null ? embedded_engine.current_transaction : null;
+        
+        if (tx != null) {
+            // Queue operations in transaction
+            tx.record_create_entity(child_path, Core.EntityType.CATEGORY, type_label);
+            tx.record_save_category_config(child_path, type_label, expression);
+            tx.record_add_child(_path, name);
+        } else {
+            // Direct write when not in transaction
+            var storage = _engine.configuration.storage;
+            try {
+                storage.store_entity_metadata(child_path, Core.EntityType.CATEGORY, type_label);
+                storage.store_category_config(child_path, type_label, expression);
+                storage.add_child(_path, name);
+            } catch (Storage.StorageError e) {
+                throw new Core.EntityError.STORAGE_ERROR("Failed to create category: %s".printf(e.message));
+            }
+        }
+        
+        // Create entity instance
+        var category = new Category(_engine, child_path, type_label, expression);
+        
+        // Populate the index with existing documents
+        try {
+            category.populate_index();
+        } catch (Core.EngineError e) {
+            warning("Failed to populate category index: %s", e.message);
+        }
+        
+        // Register with hook manager for change notifications
+        category.register_hooks();
+        
+        // Notify engine of creation
+        _engine.entity_created(category);
+        
+        return category;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Creates a new Catalogue child in this container.
+     *
+     * The catalogue will be populated with all existing documents grouped
+     * by the key extracted from the expression, and will automatically
+     * register for change notifications to keep its index up to date.
+     *
+     * @param name The name for the new catalogue
+     * @param type_label The document type to catalogue
+     * @param expression The expression to extract the grouping key
+     * @return The created catalogue entity
+     * @throws Core.EntityError.INVALID_PATH if name is empty
+     * @throws Core.EntityError.ENTITY_ALREADY_EXISTS if an entity with this name already exists
+     */
+    public override async Core.Entity? create_catalogue_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws Core.EntityError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.append_child(name);
+        
+        // Check if we're in a transaction
+        var embedded_engine = _engine as Engine.EmbeddedEngine;
+        var tx = embedded_engine != null ? embedded_engine.current_transaction : null;
+        
+        if (tx != null) {
+            // Queue operations in transaction
+            tx.record_create_entity(child_path, Core.EntityType.CATALOGUE, type_label);
+            tx.record_save_catalogue_config(child_path, type_label, expression);
+            tx.record_add_child(_path, name);
+        } else {
+            // Direct write when not in transaction
+            var storage = _engine.configuration.storage;
+            try {
+                storage.store_entity_metadata(child_path, Core.EntityType.CATALOGUE, type_label);
+                storage.store_catalogue_config(child_path, type_label, expression);
+                storage.add_child(_path, name);
+            } catch (Storage.StorageError e) {
+                throw new Core.EntityError.STORAGE_ERROR("Failed to create catalogue: %s".printf(e.message));
+            }
+        }
+        
+        // Create entity instance
+        var catalogue = new Catalogue(_engine, child_path, type_label, expression);
+        
+        // Populate the index with existing documents
+        try {
+            catalogue.populate_index();
+        } catch (Core.EngineError e) {
+            warning("Failed to populate catalogue index: %s", e.message);
+        }
+        
+        // Register with hook manager for change notifications
+        catalogue.register_hooks();
+        
+        // Notify engine of creation
+        _engine.entity_created(catalogue);
+        
+        return catalogue;
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Creates a new Index child in this container.
+     *
+     * The index will be populated with n-gram indices for all existing
+     * documents of the configured type, and will automatically register
+     * for change notifications to keep its index up to date.
+     *
+     * @param name The name for the new index
+     * @param type_label The document type to index
+     * @param expression The expression/property to index for text search
+     * @return The created index entity
+     * @throws Core.EntityError.INVALID_PATH if name is empty
+     * @throws Core.EntityError.ENTITY_ALREADY_EXISTS if an entity with this name already exists
+     */
+    public override async Core.Entity? create_index_async(
+        string name,
+        string type_label,
+        string expression
+    ) throws Core.EntityError {
+        validate_can_create_child(name);
+        
+        var child_path = _path.append_child(name);
+        
+        // Check if we're in a transaction
+        var embedded_engine = _engine as Engine.EmbeddedEngine;
+        var tx = embedded_engine != null ? embedded_engine.current_transaction : null;
+        
+        if (tx != null) {
+            // Queue operations in transaction
+            tx.record_create_entity(child_path, Core.EntityType.INDEX, type_label);
+            tx.record_save_category_config(child_path, type_label, expression);
+            tx.record_add_child(_path, name);
+        } else {
+            // Direct write when not in transaction
+            var storage = _engine.configuration.storage;
+            try {
+                storage.store_entity_metadata(child_path, Core.EntityType.INDEX, type_label);
+                storage.store_category_config(child_path, type_label, expression);
+                storage.add_child(_path, name);
+            } catch (Storage.StorageError e) {
+                throw new Core.EntityError.STORAGE_ERROR("Failed to create index: %s".printf(e.message));
+            }
+        }
+        
+        // Create entity instance
+        var index = new Index(_engine, child_path, type_label, expression);
+        
+        // Populate the n-gram index with existing documents
+        try {
+            index.populate_index();
+        } catch (Core.EngineError e) {
+            warning("Failed to populate index: %s", e.message);
+        }
+        
+        // Register with hook manager for change notifications
+        index.register_hooks();
+        
+        // Notify engine of creation
+        _engine.entity_created(index);
+        
+        return index;
+    }
+    
+    // === Child Deletion (Async) ===
+    
+    /**
+     * Deletes a child entity from this container.
+     * 
+     * @param name The name of the child to delete
+     * @throws Core.EntityError if deletion fails
+     */
+    public async void delete_child_async(string name) throws Core.EntityError {
+        var child = yield get_child_async(name);
+        if (child == null) {
+            throw new Core.EntityError.ENTITY_NOT_FOUND(
+                "Child not found: %s".printf(name)
+            );
+        }
+        yield ((!) child).delete_async();
+    }
+    
+    // === Lifecycle (Async) ===
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Deletes this container and all its children recursively.
+     */
+    public override async void delete_async() throws Core.EntityError {
+        // Delete all children first (recursively)
+        var names = yield get_child_names_async();
+        foreach (var child_name in names) {
+            var child = yield get_child_async(child_name);
+            if (child != null) {
+                try {
+                    yield ((!) child).delete_async();
+                } catch (Core.EntityError e) {
+                    warning("Failed to delete child %s: %s", child_name, e.message);
+                }
+            }
+        }
+        
+        // Then delete this container
+        yield base.delete_async();
+    }
+    
+    // === Validation ===
+    
+    /**
+     * Validates that a child with the given name can be created.
+     * 
+     * @param name The name to validate
+     * @throws Core.EntityError.INVALID_PATH if name is empty
+     * @throws Core.EntityError.ENTITY_ALREADY_EXISTS if an entity already exists
+     */
+    private void validate_can_create_child(string name) throws Core.EntityError {
+        if (name == null || name == "") {
+            throw new Core.EntityError.INVALID_PATH("Child name cannot be empty");
+        }
+        
+        var child_path = _path.append_child(name);
+        var embedded = _engine as Engine.EmbeddedEngine;
+        if (embedded != null && ((!) embedded).entity_exists_sync(child_path)) {
+            throw new Core.EntityError.ENTITY_ALREADY_EXISTS(
+                "Entity already exists: %s".printf(child_path.to_string())
+            );
+        }
+    }
+}
+
+} // namespace Implexus.Entities

+ 305 - 0
src/Entities/Document.vala

@@ -0,0 +1,305 @@
+/**
+ * Document - Properties-based document entity
+ * 
+ * A Document is a typed object with properties. The type_label is
+ * application-defined and used for querying.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Entities {
+
+/**
+ * Document entity that stores properties.
+ * 
+ * Documents are the primary data storage entities in Implexus.
+ * Each document has:
+ * - A type_label: Application-defined type for querying
+ * - Properties: Key-value pairs stored using Invercargill.Properties
+ * 
+ * Documents cannot have children.
+ * 
+ * Example usage:
+ * {{{
+ * var user = yield users.create_document_async("john", "User");
+ * yield user.set_entity_property_async("email", new Invercargill.NativeElement<string>("john@example.com"));
+ * yield user.set_entity_property_async("age", new Invercargill.NativeElement<int>(30));
+ * 
+ * var email = yield user.get_entity_property_async("email");
+ * }}}
+ */
+public class Document : AbstractEntity {
+    
+    // === Private Fields ===
+    
+    /**
+     * The application-defined type label for this document.
+     */
+    private string _type_label;
+    
+    /**
+     * The properties stored in this document.
+     * Lazy-loaded from storage on first access.
+     */
+    private Invercargill.Properties? _properties = null;
+    
+    /**
+     * Flag indicating whether properties have been loaded.
+     */
+    private bool _properties_loaded = false;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new Document with the given engine, path, and type label.
+     * 
+     * @param engine The engine that manages this entity
+     * @param path The path to this document
+     * @param type_label The application-defined type for this document
+     */
+    public Document(Core.Engine engine, Core.EntityPath path, string type_label) {
+        base(engine, path);
+        _type_label = type_label;
+    }
+    
+    // === Entity Type ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override Core.EntityType entity_type { 
+        get { return Core.EntityType.DOCUMENT; }
+    }
+    
+    // === Type Label ===
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns the application-defined type label for this document.
+     */
+    public override string type_label { 
+        owned get { return _type_label; }
+    }
+    
+    // === Properties (Async) ===
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns the properties stored in this document.
+     * Properties are lazy-loaded from storage on first access.
+     */
+    public override async Invercargill.Properties get_properties_async() throws Core.EntityError { 
+        yield ensure_properties_loaded_async();
+        return (!) _properties;
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Gets a property value by name.
+     * 
+     * @param name The property name
+     * @return The property value, or null if not found
+     */
+    public override async Invercargill.Element? get_entity_property_async(string name) throws Core.EntityError {
+        yield ensure_properties_loaded_async();
+        try {
+            var element = ((!) _properties).get(name);
+            if (element.is_null()) {
+                return null;
+            }
+            return element;
+        } catch (Invercargill.IndexError e) {
+            return null;
+        }
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Sets a property value and persists the change to storage.
+     * 
+     * @param name The property name
+     * @param value The property value
+     */
+    public override async void set_entity_property_async(string name, Invercargill.Element value) throws Core.EntityError {
+        yield ensure_properties_loaded_async();
+        try {
+            ((!) _properties).set(name, value);
+        } catch (Invercargill.IndexError e) {
+            throw new Core.EntityError.STORAGE_ERROR("Failed to set property: %s".printf(e.message));
+        }
+        yield save_properties_async();
+        _engine.entity_modified(this);
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Removes a property and persists the change to storage.
+     * 
+     * @param name The property name
+     */
+    public override async void remove_property_async(string name) throws Core.EntityError {
+        yield ensure_properties_loaded_async();
+        try {
+            ((!) _properties).remove(name);
+        } catch (Invercargill.IndexError e) {
+            // Property doesn't exist, that's fine
+        }
+        yield save_properties_async();
+        _engine.entity_modified(this);
+    }
+    
+    /**
+     * Gets a property value as a specific type.
+     * 
+     * This is a convenience method that combines get_entity_property_async
+     * with type casting.
+     * 
+     * @param name The property name
+     * @return The property value as type T, or null if not found or wrong type
+     */
+    public async T? get_property_as_async<T>(string name) throws Core.EntityError, Invercargill.ElementError {
+        var element = yield get_entity_property_async(name);
+        if (element == null) {
+            return null;
+        }
+        return ((!) element).as<T>();
+    }
+    
+    /**
+     * Sets a property value from a native type.
+     * 
+     * This is a convenience method that wraps the value in a NativeElement.
+     * 
+     * @param name The property name
+     * @param value The property value
+     */
+    public async void set_property_from_async<T>(string name, T value) throws Core.EntityError {
+        yield set_entity_property_async(name, new Invercargill.NativeElement<T>(value));
+    }
+    
+    // === Child Operations (Documents don't have children - Async) ===
+    
+    /**
+     * Documents don't have children.
+     * 
+     * @return An empty set
+     */
+    public override async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError { 
+        return new Invercargill.DataStructures.HashSet<string>();
+    }
+    
+    /**
+     * Documents don't have children.
+     * 
+     * @return null
+     */
+    public override async Core.Entity? get_child_async(string name) throws Core.EntityError {
+        return null;
+    }
+    
+    /**
+     * Documents don't have children.
+     * 
+     * @return An empty array
+     */
+    public override async Core.Entity[] get_children_async() throws Core.EntityError {
+        return new Core.Entity[0];
+    }
+    
+    // === Private Methods ===
+    
+    /**
+     * Ensures properties are loaded from storage (async).
+     */
+    private async void ensure_properties_loaded_async() throws Core.EntityError {
+        if (_properties_loaded) {
+            return;
+        }
+        
+        var storage = _engine.configuration.storage;
+        try {
+            var loaded = storage.load_properties(_path);
+            if (loaded != null) {
+                _properties = (!) loaded;
+            } else {
+                _properties = new Invercargill.DataStructures.PropertyDictionary();
+            }
+        } catch (Storage.StorageError e) {
+            throw new Core.EntityError.STORAGE_ERROR("Failed to load properties: %s".printf(e.message));
+        }
+        
+        _properties_loaded = true;
+    }
+    
+    /**
+     * Saves properties to storage (async).
+     *
+     * If a transaction is active, the save is deferred to the transaction
+     * for batched writes. Otherwise, properties are saved immediately.
+     */
+    private async void save_properties_async() throws Core.EntityError {
+        if (_properties == null) {
+            return;
+        }
+        
+        // Check if we're in a transaction
+        var embedded_engine = _engine as Engine.EmbeddedEngine;
+        var tx = embedded_engine != null ? embedded_engine.current_transaction : null;
+        
+        if (tx != null) {
+            // Defer property save to transaction - will be batched at commit
+            tx.record_save_properties(_path, (!) _properties);
+        } else {
+            // Direct write when not in transaction
+            var storage = _engine.configuration.storage;
+            try {
+                storage.store_properties(_path, (!) _properties);
+            } catch (Storage.StorageError e) {
+                throw new Core.EntityError.STORAGE_ERROR("Failed to save properties: %s".printf(e.message));
+            }
+        }
+    }
+    
+    /**
+     * Returns a string representation of this document.
+     */
+    public new string to_string() {
+        return "Document(%s, type=%s)".printf(_path.to_string(), _type_label);
+    }
+    
+    // === Synchronous Access for Hook Evaluation ===
+    
+    /**
+     * Gets properties synchronously for hook evaluation.
+     *
+     * This is used by Category and Index hooks that run in the DBM thread
+     * context where synchronous access is safe.
+     *
+     * @return The properties collection
+     * @throws Core.EntityError if loading fails
+     */
+    public Invercargill.Properties get_properties_sync() throws Core.EntityError {
+        if (!_properties_loaded) {
+            var storage = _engine.configuration.storage;
+            try {
+                var loaded = storage.load_properties(_path);
+                if (loaded != null) {
+                    _properties = (!) loaded;
+                } else {
+                    _properties = new Invercargill.DataStructures.PropertyDictionary();
+                }
+            } catch (Storage.StorageError e) {
+                throw new Core.EntityError.STORAGE_ERROR("Failed to load properties: %s".printf(e.message));
+            }
+            _properties_loaded = true;
+        }
+        return (!) _properties;
+    }
+}
+
+} // namespace Implexus.Entities

+ 1456 - 0
src/Entities/Index.vala

@@ -0,0 +1,1456 @@
+/**
+ * Index - Text search entity with n-gram indexing for fast full-text search
+ *
+ * An Index provides full-text search over documents using a hierarchical
+ * n-gram index structure for O(k) lookups where k = number of candidates,
+ * instead of O(n) scans.
+ *
+ * Supports batched hook processing for improved performance during
+ * bulk operations within transactions.
+ *
+ * @version 0.3
+ * @since 0.1
+ */
+namespace Implexus.Entities {
+
+/**
+ * Search pattern types for text search.
+ */
+public enum SearchPatternType {
+    /**
+     * Contains the term anywhere (e.g., *world*)
+     */
+    CONTAINS,
+    
+    /**
+     * Starts with the term (e.g., world*)
+     */
+    STARTS_WITH,
+    
+    /**
+     * Ends with the term (e.g., *world)
+     */
+    ENDS_WITH,
+    
+    /**
+     * Exact match (e.g., world)
+     */
+    EXACT
+}
+
+/**
+ * Parsed search pattern with type and term.
+ */
+public class SearchPattern : Object {
+    /**
+     * The type of pattern match.
+     */
+    public SearchPatternType pattern_type { get; construct set; }
+    
+    /**
+     * The search term (without wildcards).
+     */
+    public string term { get; construct set; }
+    
+    /**
+     * Creates a new search pattern.
+     */
+    public SearchPattern(SearchPatternType pattern_type, string term) {
+        Object(pattern_type: pattern_type, term: term);
+    }
+}
+
+/**
+ * Text search entity with n-gram indexing.
+ *
+ * An Index is configured with:
+ * - A type_label: The document type to search
+ * - An expression: The property name or expression to search within
+ * - case_sensitive: Whether search is case-sensitive (default: false)
+ *
+ * The index uses a hierarchical n-gram structure:
+ * - Trigram index: Maps 3-char sequences to document paths
+ * - Bigram reverse index: Maps 2-char sequences to trigrams containing them
+ * - Unigram reverse index: Maps single chars to bigrams starting with them
+ *
+ * This allows efficient search for patterns of any length.
+ *
+ * Example usage:
+ * {{{
+ * // Create documents with searchable content
+ * var articles = yield (yield engine.get_root_async()).create_container_async("articles");
+ * var article1 = yield articles.create_document_async("article1", "Article");
+ * yield article1.set_entity_property_async("content", new Invercargill.NativeElement<string>("Introduction to Vala"));
+ * 
+ * var article2 = yield articles.create_document_async("article2", "Article");
+ * yield article2.set_entity_property_async("content", new Invercargill.NativeElement<string>("Advanced Vala Techniques"));
+ * 
+ * // Create an index over the content property
+ * var search = yield articles.create_index_async("search", "Article", "content");
+ * 
+ * // Search for documents
+ * var results = yield search.get_child_async("*Vala*");  // Contains "Vala"
+ * var intro = yield search.get_child_async("Intro*");    // Starts with "Intro"
+ * }}}
+ */
+public class Index : AbstractEntity, 
+                      Engine.EntityChangeHandler, 
+                      Engine.DocumentPropertyChangeHandler,
+                      Engine.BatchedHookHandler {
+    
+    // === Configuration ===
+    
+    /**
+     * The document type to index.
+     */
+    private string _type_label;
+    
+    /**
+     * The property name or expression to index.
+     */
+    private string _expression;
+    
+    /**
+     * Whether indexing is case-sensitive.
+     */
+    private bool _case_sensitive;
+    
+    /**
+     * Flag indicating whether configuration has been loaded.
+     */
+    private bool _config_loaded = false;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new Index with the given engine, path, type label, and expression.
+     *
+     * @param engine The engine that manages this entity
+     * @param path The path to this index
+     * @param type_label The document type to index
+     * @param expression The property name or expression to search within
+     * @param case_sensitive Whether indexing is case-sensitive (default: false)
+     */
+    public Index(
+        Core.Engine engine,
+        Core.EntityPath path,
+        string type_label,
+        string expression,
+        bool case_sensitive = false
+    ) {
+        base(engine, path);
+        _type_label = type_label;
+        _expression = expression;
+        _case_sensitive = case_sensitive;
+        _config_loaded = true;
+    }
+    
+    /**
+     * Creates an Index instance that loads configuration from storage.
+     *
+     * @param engine The engine that manages this entity
+     * @param path The path to this index
+     */
+    public Index.from_storage(Core.Engine engine, Core.EntityPath path) {
+        base(engine, path);
+        _type_label = "";
+        _expression = "";
+        _case_sensitive = false;
+        _config_loaded = false;
+    }
+    
+    // === Entity Type ===
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override Core.EntityType entity_type { 
+        get { return Core.EntityType.INDEX; }
+    }
+    
+    // === Configuration ===
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns the expression/property being indexed.
+     */
+    public override string configured_expression { 
+        owned get {
+            ensure_config_loaded();
+            return _expression;
+        }
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns the document type being indexed.
+     */
+    public override string configured_type_label { 
+        owned get {
+            ensure_config_loaded();
+            return _type_label;
+        }
+    }
+    
+    /**
+     * Whether indexing is case-sensitive.
+     */
+    public bool case_sensitive {
+        get {
+            ensure_config_loaded();
+            return _case_sensitive;
+        }
+    }
+    
+    // === Child Navigation (Virtual - Async) ===
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Index children are opaque - returns an empty set.
+     * Use get_child_async() with a search pattern to find matching documents.
+     */
+    public override async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError { 
+        // Indexes have opaque children - return empty set
+        return new Invercargill.DataStructures.HashSet<string>();
+    }
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Performs a text search and returns an IndexResult with matching documents.
+     * The name parameter is used as the search pattern.
+     *
+     * Supported patterns:
+     * - `*world*` - contains "world" (anywhere)
+     * - `world*` - starts with "world"
+     * - `*world` - ends with "world"
+     * - `world` - exact match
+     *
+     * @param pattern The search pattern
+     * @return An IndexResult containing matching documents, or null if no matches
+     */
+    public override async Core.Entity? get_child_async(string pattern) throws Core.EntityError {
+        return search(pattern);
+    }
+    
+    /**
+     * Performs a text search and returns a Container with matching documents.
+     *
+     * @param pattern The search pattern (e.g., "*world*", "hello*", "*goodbye)
+     * @return An IndexResult containing matching documents, or null if no matches
+     */
+    public IndexResult? search(string pattern) {
+        ensure_config_loaded();
+        
+        var parsed = parse_pattern(pattern);
+        var matching_paths = execute_search(parsed);
+        
+        if (matching_paths.count() == 0) {
+            return null;
+        }
+        
+        // Convert paths to entities (synchronously - search runs in DBM context)
+        var matching_docs = new Invercargill.DataStructures.Vector<Core.Entity>();
+        var embedded = _engine as Engine.EmbeddedEngine;
+        foreach (var doc_path in matching_paths) {
+            var entity_path = Core.EntityPath.parse(doc_path);
+            Core.Entity? entity = null;
+            if (embedded != null) {
+                entity = ((!) embedded).get_entity_or_null_sync(entity_path);
+            }
+            if (entity != null) {
+                matching_docs.add((!) entity);
+            }
+        }
+        
+        if (matching_docs.peek_count() == 0) {
+            return null;
+        }
+        
+        return new IndexResult(_engine, _path.append_child(pattern), pattern, matching_docs);
+    }
+    
+    // === Pattern Parsing ===
+    
+    /**
+     * Parses a search pattern into type and term.
+     *
+     * @param pattern The raw pattern string
+     * @return A SearchPattern object with type and normalized term
+     */
+    private SearchPattern parse_pattern(string pattern) {
+        bool prefix_wildcard = pattern.has_prefix("*");
+        bool suffix_wildcard = pattern.has_suffix("*");
+        
+        // Remove wildcards and normalize case
+        string term = pattern;
+        if (prefix_wildcard) {
+            term = term.substring(1);
+        }
+        if (suffix_wildcard && term.length > 0) {
+            term = term.substring(0, term.length - 1);
+        }
+        
+        // Normalize case if needed
+        if (!_case_sensitive) {
+            term = term.down();
+        }
+        
+        SearchPatternType pattern_type;
+        if (!prefix_wildcard && !suffix_wildcard) {
+            pattern_type = SearchPatternType.EXACT;
+        } else if (prefix_wildcard && suffix_wildcard) {
+            pattern_type = SearchPatternType.CONTAINS;
+        } else if (suffix_wildcard) {
+            pattern_type = SearchPatternType.STARTS_WITH;
+        } else {
+            pattern_type = SearchPatternType.ENDS_WITH;
+        }
+        
+        return new SearchPattern(pattern_type, term);
+    }
+    
+    // === Search Implementation ===
+    
+    /**
+     * Executes a search using the n-gram index.
+     *
+     * @param pattern The parsed search pattern
+     * @return A set of matching document paths
+     */
+    private Invercargill.DataStructures.HashSet<string> execute_search(SearchPattern pattern) {
+        var store = get_index_store();
+        if (store == null) {
+            return new Invercargill.DataStructures.HashSet<string>();
+        }
+        
+        string term = pattern.term;
+        
+        if (term.length == 0) {
+            return new Invercargill.DataStructures.HashSet<string>();
+        }
+        
+        switch (pattern.pattern_type) {
+            case SearchPatternType.CONTAINS:
+                return search_contains((!) store, term);
+            case SearchPatternType.STARTS_WITH:
+                return search_prefix((!) store, term);
+            case SearchPatternType.ENDS_WITH:
+                return search_suffix((!) store, term);
+            case SearchPatternType.EXACT:
+                return search_exact((!) store, term);
+            default:
+                return new Invercargill.DataStructures.HashSet<string>();
+        }
+    }
+    
+    /**
+     * Searches for documents containing the term anywhere.
+     *
+     * Uses trigram intersection for terms >= 3 chars,
+     * bigram→trigram for 2-char terms,
+     * unigram→bigram→trigram for 1-char terms.
+     */
+    private Invercargill.DataStructures.HashSet<string> search_contains(
+        Storage.HighLevel.IndexStore store,
+        string term
+    ) {
+        var candidates = new Invercargill.DataStructures.HashSet<string>();
+        
+        if (term.length >= 3) {
+            // Use trigram intersection
+            var trigrams = generate_trigrams(term);
+            if (trigrams.peek_count() == 0) {
+                return candidates;
+            }
+            
+            // Get document sets for each trigram
+            var doc_sets = new Invercargill.DataStructures.Vector<Invercargill.Enumerable<string>>();
+            foreach (var trigram in trigrams) {
+                var docs = store.get_documents_for_trigram(_path, trigram);
+                doc_sets.add(docs);
+            }
+            
+            // Intersect all sets
+            bool first = true;
+            foreach (var doc_set in doc_sets) {
+                if (first) {
+                    foreach (var doc in doc_set) {
+                        candidates.add(doc);
+                    }
+                    first = false;
+                } else {
+                    // Keep only documents in both sets
+                    var doc_set_hash = new Invercargill.DataStructures.HashSet<string>();
+                    foreach (var d in doc_set) {
+                        doc_set_hash.add(d);
+                    }
+
+                    var to_remove = new Invercargill.DataStructures.Vector<string>();
+                    foreach (var doc in candidates) {
+                        if (!doc_set_hash.has(doc)) {
+                            to_remove.add(doc);
+                        }
+                    }
+                    foreach (var doc in to_remove) {
+                        candidates.remove(doc);
+                    }
+                }
+            }
+        } else if (term.length == 2) {
+            // Use bigram→trigram→docs
+            var trigrams = store.get_trigrams_for_bigram(_path, term);
+            foreach (var trigram in trigrams) {
+                foreach (var doc in store.get_documents_for_trigram(_path, trigram)) {
+                    candidates.add(doc);
+                }
+            }
+        } else if (term.length == 1) {
+            // Use unigram→bigram→trigram→docs
+            var bigrams = store.get_bigrams_for_unigram(_path, term);
+            foreach (var bigram in bigrams) {
+                var trigrams = store.get_trigrams_for_bigram(_path, bigram);
+                foreach (var trigram in trigrams) {
+                    foreach (var doc in store.get_documents_for_trigram(_path, trigram)) {
+                        candidates.add(doc);
+                    }
+                }
+            }
+        }
+        
+        // Verify candidates actually contain the term
+        var verified = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc_path in candidates) {
+            var content = store.get_document_content(_path, doc_path);
+            if (content != null && ((!) content).contains(term)) {
+                verified.add(doc_path);
+            }
+        }
+        
+        return verified;
+    }
+    
+    /**
+     * Searches for documents starting with the term.
+     */
+    private Invercargill.DataStructures.HashSet<string> search_prefix(
+        Storage.HighLevel.IndexStore store,
+        string term
+    ) {
+        var candidates = new Invercargill.DataStructures.HashSet<string>();
+        
+        if (term.length >= 3) {
+            // Use first trigram to find candidates
+            string first_trigram = term.substring(0, 3);
+            foreach (var doc in store.get_documents_for_trigram(_path, first_trigram)) {
+                candidates.add(doc);
+            }
+        } else if (term.length == 2) {
+            // Use bigram→trigram→docs
+            var trigrams = store.get_trigrams_for_bigram(_path, term);
+            foreach (var trigram in trigrams) {
+                foreach (var doc in store.get_documents_for_trigram(_path, trigram)) {
+                    candidates.add(doc);
+                }
+            }
+        } else if (term.length == 1) {
+            // Use unigram→bigram→trigram→docs
+            var bigrams = store.get_bigrams_for_unigram(_path, term);
+            foreach (var bigram in bigrams) {
+                var trigrams = store.get_trigrams_for_bigram(_path, bigram);
+                foreach (var trigram in trigrams) {
+                    foreach (var doc in store.get_documents_for_trigram(_path, trigram)) {
+                        candidates.add(doc);
+                    }
+                }
+            }
+        }
+        
+        // Verify candidates actually start with the term
+        var verified = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc_path in candidates) {
+            var content = store.get_document_content(_path, doc_path);
+            if (content != null && ((!) content).has_prefix(term)) {
+                verified.add(doc_path);
+            }
+        }
+        
+        return verified;
+    }
+    
+    /**
+     * Searches for documents ending with the term.
+     */
+    private Invercargill.DataStructures.HashSet<string> search_suffix(
+        Storage.HighLevel.IndexStore store,
+        string term
+    ) {
+        var candidates = new Invercargill.DataStructures.HashSet<string>();
+        
+        if (term.length >= 3) {
+            // Use last trigram to find candidates
+            string last_trigram = term.substring(term.length - 3, 3);
+            foreach (var doc in store.get_documents_for_trigram(_path, last_trigram)) {
+                candidates.add(doc);
+            }
+        } else if (term.length == 2) {
+            // Use bigram→trigram→docs
+            var trigrams = store.get_trigrams_for_bigram(_path, term);
+            foreach (var trigram in trigrams) {
+                foreach (var doc in store.get_documents_for_trigram(_path, trigram)) {
+                    candidates.add(doc);
+                }
+            }
+        } else if (term.length == 1) {
+            // Use unigram→bigram→trigram→docs
+            var bigrams = store.get_bigrams_for_unigram(_path, term);
+            foreach (var bigram in bigrams) {
+                var trigrams = store.get_trigrams_for_bigram(_path, bigram);
+                foreach (var trigram in trigrams) {
+                    foreach (var doc in store.get_documents_for_trigram(_path, trigram)) {
+                        candidates.add(doc);
+                    }
+                }
+            }
+        }
+        
+        // Verify candidates actually end with the term
+        var verified = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc_path in candidates) {
+            var content = store.get_document_content(_path, doc_path);
+            if (content != null && ((!) content).has_suffix(term)) {
+                verified.add(doc_path);
+            }
+        }
+        
+        return verified;
+    }
+    
+    /**
+     * Searches for documents with exact match.
+     */
+    private Invercargill.DataStructures.HashSet<string> search_exact(
+        Storage.HighLevel.IndexStore store,
+        string term
+    ) {
+        var candidates = new Invercargill.DataStructures.HashSet<string>();
+        
+        if (term.length >= 3) {
+            // Use trigrams to narrow down candidates
+            var trigrams = generate_trigrams(term);
+            if (trigrams.peek_count() > 0) {
+                // Intersect document sets
+                bool first = true;
+                foreach (var trigram in trigrams) {
+                    var docs = store.get_documents_for_trigram(_path, trigram);
+                    if (first) {
+                        foreach (var doc in docs) {
+                            candidates.add(doc);
+                        }
+                        first = false;
+                    } else {
+                        var docs_hash = new Invercargill.DataStructures.HashSet<string>();
+                        foreach (var d in docs) {
+                            docs_hash.add(d);
+                        }
+                        
+                        var to_remove = new Invercargill.DataStructures.Vector<string>();
+                        foreach (var doc in candidates) {
+                            if (!docs_hash.has(doc)) {
+                                to_remove.add(doc);
+                            }
+                        }
+                        foreach (var doc in to_remove) {
+                            candidates.remove(doc);
+                        }
+                    }
+                }
+            }
+        } else {
+            // For short terms, scan all documents of the type
+            var embedded = _engine as Engine.EmbeddedEngine;
+            if (embedded != null) {
+                foreach (var doc_path in ((!) embedded).entity_store.get_documents_by_type(_type_label)) {
+                    candidates.add(doc_path);
+                }
+            }
+        }
+        
+        // Verify exact match
+        var verified = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc_path in candidates) {
+            var content = store.get_document_content(_path, doc_path);
+            if (content != null && ((!) content) == term) {
+                verified.add(doc_path);
+            }
+        }
+        
+        return verified;
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Index children are opaque - returns an empty array.
+     * Use search() or get_child_async() with a search pattern.
+     */
+    public override async Core.Entity[] get_children_async() throws Core.EntityError {
+        // Indexes have opaque children - return empty array
+        return new Core.Entity[0];
+    }
+    
+    // === N-gram Generation ===
+    
+    /**
+     * Generates trigrams from text.
+     *
+     * @param text The text to generate trigrams from
+     * @return A vector of 3-character trigrams
+     */
+    private Invercargill.DataStructures.Vector<string> generate_trigrams(string text) {
+        var trigrams = new Invercargill.DataStructures.Vector<string>();
+        var hash_set = new Invercargill.DataStructures.HashSet<string>();
+        string normalized = _case_sensitive ? text : text.down();
+        
+        for (int i = 0; i <= normalized.length - 3; i++) {
+            string trigram = normalized.substring(i, 3);
+            if (!hash_set.has(trigram)) {
+                hash_set.add(trigram);
+                trigrams.add(trigram);
+            }
+        }
+        
+        return trigrams;
+    }
+    
+    /**
+     * Generates bigrams from text.
+     *
+     * @param text The text to generate bigrams from
+     * @return A vector of 2-character bigrams
+     */
+    private Invercargill.DataStructures.Vector<string> generate_bigrams(string text) {
+        var bigrams = new Invercargill.DataStructures.Vector<string>();
+        var hash_set = new Invercargill.DataStructures.HashSet<string>();
+        string normalized = _case_sensitive ? text : text.down();
+        
+        for (int i = 0; i <= normalized.length - 2; i++) {
+            string bigram = normalized.substring(i, 2);
+            if (!hash_set.has(bigram)) {
+                hash_set.add(bigram);
+                bigrams.add(bigram);
+            }
+        }
+        
+        return bigrams;
+    }
+    
+    /**
+     * Extracts unique characters from text.
+     *
+     * @param text The text to extract characters from
+     * @return A vector of single characters
+     */
+    private Invercargill.DataStructures.Vector<string> extract_unique_chars(string text) {
+        var chars = new Invercargill.DataStructures.Vector<string>();
+        var hash_set = new Invercargill.DataStructures.HashSet<string>();
+        string normalized = _case_sensitive ? text : text.down();
+        
+        for (int i = 0; i < normalized.length; i++) {
+            string ch = normalized.substring(i, 1);
+            if (!hash_set.has(ch)) {
+                hash_set.add(ch);
+                chars.add(ch);
+            }
+        }
+        
+        return chars;
+    }
+    
+    // === Index Management ===
+    
+    /**
+     * Populates the index by scanning all existing documents of the configured type.
+     *
+     * This should be called when an Index is first created.
+     * This method is synchronous as it runs in the DBM thread context.
+     *
+     * @throws Core.EngineError if population fails
+     */
+    public void populate_index() throws Core.EngineError {
+        ensure_config_loaded();
+        
+        var store = get_index_store();
+        if (store == null) {
+            throw new Core.EngineError.STORAGE_ERROR("IndexStore not available");
+        }
+        
+        // Clear existing index data
+        try {
+            ((!) store).clear_index(_path);
+        } catch (Storage.StorageError e) {
+            warning("Failed to clear text index: %s", e.message);
+        }
+        
+        // Scan all documents of the configured type
+        var trigrams_to_add = new Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>>();
+        var bigrams_to_add = new Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>>();
+        var unigrams_to_add = new Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>>();
+        
+        var embedded = _engine as Engine.EmbeddedEngine;
+        
+        if (embedded != null) {
+            var doc_paths = ((!) embedded).entity_store.get_documents_by_type(_type_label);
+            
+            foreach (var doc_path in doc_paths) {
+                Core.Entity? doc = ((!) embedded).get_entity_or_null_sync(Core.EntityPath.parse(doc_path));
+                if (doc == null) continue;
+                
+                string? content = get_field_value((!) doc);
+                if (content == null || ((!) content).length == 0) continue;
+                
+                string normalized = _case_sensitive ? (!) content : ((!) content).down();
+                string doc_path_str = doc.path.to_string();
+                
+                try {
+                    ((!) store).store_document_content(_path, doc_path_str, normalized);
+                } catch (Storage.StorageError e) {}
+                
+                var trigrams = generate_trigrams(normalized);
+                foreach (var trigram in trigrams) {
+                    if (!trigrams_to_add.has(trigram)) {
+                        trigrams_to_add.set(trigram, new Invercargill.DataStructures.Vector<string>());
+                    }
+                    Invercargill.DataStructures.Vector<string> docs;
+                    trigrams_to_add.try_get(trigram, out docs);
+                    docs.add(doc_path_str);
+                    
+                    string bi1 = trigram.substring(0, 2);
+                    string bi2 = trigram.substring(1, 2);
+                    
+                    if (!bigrams_to_add.has(bi1)) bigrams_to_add.set(bi1, new Invercargill.DataStructures.Vector<string>());
+                    Invercargill.DataStructures.Vector<string> t1;
+                    bigrams_to_add.try_get(bi1, out t1); t1.add(trigram);
+                    
+                    if (bi1 != bi2) {
+                        if (!bigrams_to_add.has(bi2)) bigrams_to_add.set(bi2, new Invercargill.DataStructures.Vector<string>());
+                        Invercargill.DataStructures.Vector<string> t2;
+                        bigrams_to_add.try_get(bi2, out t2); t2.add(trigram);
+                    }
+                }
+                
+                var bigrams = generate_bigrams(normalized);
+                foreach (var bigram in bigrams) {
+                    string uni = bigram.substring(0, 1);
+                    if (!unigrams_to_add.has(uni)) unigrams_to_add.set(uni, new Invercargill.DataStructures.Vector<string>());
+                    Invercargill.DataStructures.Vector<string> b;
+                    unigrams_to_add.try_get(uni, out b); b.add(bigram);
+                }
+            }
+        }
+        
+        try {
+            if (trigrams_to_add.length > 0) {
+                ((!) store).add_trigrams_batch(_path, trigrams_to_add);
+            }
+            if (bigrams_to_add.length > 0) ((!) store).add_bigram_mappings_batch(_path, bigrams_to_add);
+            if (unigrams_to_add.length > 0) ((!) store).add_unigram_mappings_batch(_path, unigrams_to_add);
+        } catch (Storage.StorageError e) {
+            warning("Failed to batch save index items: %s", e.message);
+        }
+    }
+    
+    /**
+     * Indexes a document's content.
+     *
+     * Uses O(1) HashSet membership checking via IndexStore.
+     *
+     * @param doc The document to index
+     * @throws Storage.StorageError if indexing fails
+     */
+    private void index_document(Core.Entity doc) throws Storage.StorageError {
+        ensure_config_loaded();
+        
+        var store = get_index_store();
+        if (store == null) {
+            return;
+        }
+        
+        // Get the content to index
+        string? content = get_field_value(doc);
+        if (content == null || ((!) content).length == 0) {
+            return;
+        }
+        
+        string normalized = _case_sensitive ? (!) content : ((!) content).down();
+        string doc_path = doc.path.to_string();
+        
+        // Store content cache for verification
+        ((!) store).store_document_content(_path, doc_path, normalized);
+        
+        // Index trigrams
+        var trigrams = generate_trigrams(normalized);
+        foreach (var trigram in trigrams) {
+            ((!) store).add_trigram(_path, trigram, doc_path);
+        }
+        
+        // Update bigram reverse index
+        foreach (var trigram in trigrams) {
+            string bi1 = trigram.substring(0, 2);
+            string bi2 = trigram.substring(1, 2);
+            ((!) store).add_bigram_mapping(_path, bi1, trigram);
+            if (bi1 != bi2) {
+                ((!) store).add_bigram_mapping(_path, bi2, trigram);
+            }
+        }
+        
+        // Update unigram reverse index
+        // We still need bigrams to derive unigrams
+        var bigrams = generate_bigrams(normalized);
+        foreach (var bigram in bigrams) {
+            string uni = bigram.substring(0, 1);
+            ((!) store).add_unigram_mapping(_path, uni, bigram);
+        }
+    }
+    
+    /**
+     * Removes a document from the index.
+     *
+     * Uses O(1) HashSet membership checking via IndexStore.
+     *
+     * @param doc_path The document path to remove
+     * @throws Storage.StorageError if removal fails
+     */
+    private void unindex_document(string doc_path) throws Storage.StorageError {
+        ensure_config_loaded();
+        
+        var store = get_index_store();
+        if (store == null) {
+            return;
+        }
+        
+        // Get the cached content to find trigrams to remove
+        var content = ((!) store).get_document_content(_path, doc_path);
+        if (content == null) {
+            return;
+        }
+        
+        // Remove from trigram index
+        var trigrams = generate_trigrams((!) content);
+        foreach (var trigram in trigrams) {
+            ((!) store).remove_trigram(_path, trigram, doc_path);
+        }
+        
+        // Remove content cache
+        ((!) store).remove_document_content(_path, doc_path);
+        
+        // Note: We don't remove from reverse indexes as they're just string mappings
+        // and don't reference specific documents
+    }
+    
+    /**
+     * Re-indexes a document (removes old entries, adds new ones).
+     *
+     * @param doc The document to re-index
+     * @throws Storage.StorageError if re-indexing fails
+     */
+    private void reindex_document(Core.Entity doc) throws Storage.StorageError {
+        unindex_document(doc.path.to_string());
+        index_document(doc);
+    }
+    
+    /**
+     * Gets the field value from a document.
+     *
+     * Note: This method is synchronous and accesses properties synchronously
+     * because it runs in the DBM thread context where synchronous access is safe.
+     *
+     * @param doc The document to get the value from
+     * @return The field value, or null if not available
+     */
+    private string? get_field_value(Core.Entity doc) {
+        try {
+            // For hook evaluation, we need synchronous property access
+            // This works because hooks run in the DBM thread context
+            var doc_impl = doc as Document;
+            if (doc_impl == null) {
+                return null;
+            }
+            
+            // Access properties synchronously (safe in DBM thread)
+            var properties = ((!) doc_impl).get_properties_sync();
+            
+            // Try to get the property
+            try {
+                var element = properties.get(_expression);
+                if (!element.is_null()) {
+                    return element.as<string>();
+                }
+            } catch (Invercargill.IndexError e) {
+                // Property doesn't exist
+            }
+        } catch (Core.EntityError e) {
+            // Failed to load properties
+        }
+        return null;
+    }
+    
+    // === BatchedHookHandler Implementation ===
+    
+    /**
+     * Indicates this handler supports batch processing.
+     */
+    public bool supports_batch {
+        get { return true; }
+    }
+    
+    /**
+     * Handles a batch of entity changes efficiently.
+     *
+     * This method processes all changes in one pass, then performs
+     * batch updates to the text index using O(1) HashSet operations.
+     *
+     * Note: This runs synchronously in the DBM thread context.
+     *
+     * @param events The consolidated events for matching entities
+     */
+    public void on_batch_change(Invercargill.DataStructures.Vector<Engine.HookEvent> events) {
+        ensure_config_loaded();
+        
+        // Track documents to index/unindex
+        var to_index = new Invercargill.DataStructures.Vector<Core.Entity>();
+        var to_unindex = new Invercargill.DataStructures.Vector<string>();
+        var to_reindex = new Invercargill.DataStructures.Vector<Core.Entity>();
+        
+        foreach (var evt in events) {
+            // Skip non-documents and wrong type
+            if (evt.entity_type != Core.EntityType.DOCUMENT) {
+                continue;
+            }
+            if (evt.type_label != _type_label) {
+                continue;
+            }
+            
+            switch (evt.change_type) {
+                case Engine.EntityChangeType.CREATED:
+                    // Index the new document
+                    var entity = evt.get_entity(_engine);
+                    if (entity != null) {
+                        to_index.add((!) entity);
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.MODIFIED:
+                    // Re-index the document
+                    var entity = evt.get_entity(_engine);
+                    if (entity != null) {
+                        to_reindex.add((!) entity);
+                    }
+                    break;
+                    
+                case Engine.EntityChangeType.DELETED:
+                    // Remove from index
+                    to_unindex.add(evt.entity_path.to_string());
+                    break;
+            }
+        }
+        
+        // Apply all changes
+        try {
+            var store = get_index_store();
+            if (store == null) return;
+            
+            var trigrams_to_add = new Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>>();
+            var trigrams_to_remove = new Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>>();
+            var bigrams_to_add = new Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>>();
+            var unigrams_to_add = new Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>>();
+            
+            // Unindex first (deletes and reindexes)
+            var all_to_unindex = new Invercargill.DataStructures.Vector<string>();
+            foreach (var doc_path in to_unindex) all_to_unindex.add(doc_path);
+            foreach (var doc in to_reindex) all_to_unindex.add(doc.path.to_string());
+            
+            foreach (var doc_path in all_to_unindex) {
+                var content = ((!) store).get_document_content(_path, doc_path);
+                if (content != null) {
+                    var trigrams = generate_trigrams((!) content);
+                    foreach (var trigram in trigrams) {
+                        if (!trigrams_to_remove.has(trigram)) {
+                            trigrams_to_remove.set(trigram, new Invercargill.DataStructures.Vector<string>());
+                        }
+                        Invercargill.DataStructures.Vector<string> docs;
+                        trigrams_to_remove.try_get(trigram, out docs);
+                        docs.add(doc_path);
+                    }
+                    ((!) store).remove_document_content(_path, doc_path);
+                }
+            }
+            
+            // Index next (creates and reindexes)
+            var all_to_index = new Invercargill.DataStructures.Vector<Core.Entity>();
+            foreach (var doc in to_index) all_to_index.add(doc);
+            foreach (var doc in to_reindex) all_to_index.add(doc);
+            
+            foreach (var doc in all_to_index) {
+                string? content = get_field_value(doc);
+                if (content == null || ((!) content).length == 0) continue;
+                
+                string normalized = _case_sensitive ? (!) content : ((!) content).down();
+                string doc_path = doc.path.to_string();
+                
+                ((!) store).store_document_content(_path, doc_path, normalized);
+                
+                var trigrams = generate_trigrams(normalized);
+                foreach (var trigram in trigrams) {
+                    if (!trigrams_to_add.has(trigram)) {
+                        trigrams_to_add.set(trigram, new Invercargill.DataStructures.Vector<string>());
+                    }
+                    Invercargill.DataStructures.Vector<string> docs;
+                    trigrams_to_add.try_get(trigram, out docs);
+                    docs.add(doc_path);
+                    
+                    string bi1 = trigram.substring(0, 2);
+                    string bi2 = trigram.substring(1, 2);
+                    
+                    if (!bigrams_to_add.has(bi1)) bigrams_to_add.set(bi1, new Invercargill.DataStructures.Vector<string>());
+                    Invercargill.DataStructures.Vector<string> t1;
+                    bigrams_to_add.try_get(bi1, out t1); t1.add(trigram);
+                    
+                    if (bi1 != bi2) {
+                        if (!bigrams_to_add.has(bi2)) bigrams_to_add.set(bi2, new Invercargill.DataStructures.Vector<string>());
+                        Invercargill.DataStructures.Vector<string> t2;
+                        bigrams_to_add.try_get(bi2, out t2); t2.add(trigram);
+                    }
+                }
+                
+                var bigrams = generate_bigrams(normalized);
+                foreach (var bigram in bigrams) {
+                    string uni = bigram.substring(0, 1);
+                    if (!unigrams_to_add.has(uni)) unigrams_to_add.set(uni, new Invercargill.DataStructures.Vector<string>());
+                    Invercargill.DataStructures.Vector<string> b;
+                    unigrams_to_add.try_get(uni, out b); b.add(bigram);
+                }
+            }
+            
+            // Apply batches to backend
+            if (trigrams_to_remove.length > 0) ((!) store).remove_trigrams_batch(_path, trigrams_to_remove);
+            if (trigrams_to_add.length > 0) ((!) store).add_trigrams_batch(_path, trigrams_to_add);
+            if (bigrams_to_add.length > 0) ((!) store).add_bigram_mappings_batch(_path, bigrams_to_add);
+            if (unigrams_to_add.length > 0) ((!) store).add_unigram_mappings_batch(_path, unigrams_to_add);
+            
+        } catch (Storage.StorageError e) {
+            warning("Failed to batch update index: %s", e.message);
+        }
+    }
+    
+    /**
+     * Handles batched property changes.
+     *
+     * For index, we only need to re-index if the indexed property changed.
+     * This is handled by on_batch_change.
+     *
+     * @param document The document that changed
+     * @param changes Map of property name to old/new values
+     */
+    public void on_batch_property_change(
+        Core.Entity document,
+        Invercargill.DataStructures.Dictionary<string, Engine.PropertyChange> changes
+    ) {
+        // For index, we only need to re-index if the indexed property changed
+        // This is handled by on_batch_change which is called first
+    }
+    
+    // === EntityChangeHandler Implementation ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Handles entity creation, modification, and deletion events.
+     * This is called for non-batched processing (outside transactions).
+     * Note: This runs synchronously in the DBM thread context.
+     */
+    public void on_entity_change(Core.Entity entity, Engine.EntityChangeType change_type) {
+        ensure_config_loaded();
+        
+        // Only interested in documents of our configured type
+        if (entity.entity_type != Core.EntityType.DOCUMENT) {
+            return;
+        }
+        
+        if (entity.type_label != _type_label) {
+            return;
+        }
+        
+        try {
+            switch (change_type) {
+                case Engine.EntityChangeType.CREATED:
+                    // Index the new document
+                    index_document(entity);
+                    break;
+                    
+                case Engine.EntityChangeType.MODIFIED:
+                    // Re-index the document
+                    reindex_document(entity);
+                    break;
+                    
+                case Engine.EntityChangeType.DELETED:
+                    // Remove from index
+                    unindex_document(entity.path.to_string());
+                    break;
+            }
+        } catch (Storage.StorageError e) {
+            warning("Failed to update index: %s", e.message);
+        }
+    }
+    
+    // === DocumentPropertyChangeHandler Implementation ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Handles document property change events.
+     * Re-indexes when the indexed property changes.
+     * Note: This runs synchronously in the DBM thread context.
+     */
+    public void on_document_property_change(
+        Core.Entity document,
+        string property_name,
+        Invercargill.Element? old_value,
+        Invercargill.Element? new_value
+    ) {
+        ensure_config_loaded();
+        
+        // Only interested in documents of our configured type
+        if (document.entity_type != Core.EntityType.DOCUMENT) {
+            return;
+        }
+        
+        if (document.type_label != _type_label) {
+            return;
+        }
+        
+        // Only re-index if the indexed property changed
+        if (property_name == _expression) {
+            try {
+                reindex_document(document);
+            } catch (Storage.StorageError e) {
+                warning("Failed to re-index document: %s", e.message);
+            }
+        }
+    }
+    
+    // === Hook Registration ===
+    
+    /**
+     * Registers this index with the hook manager to receive change notifications.
+     */
+    public void register_hooks() {
+        var hook_manager = get_hook_manager();
+        if (hook_manager != null) {
+            ((!) hook_manager).register_handler(this);
+            ((!) hook_manager).register_property_handler(this);
+        }
+    }
+    
+    /**
+     * Unregisters this index from the hook manager.
+     */
+    public void unregister_hooks() {
+        var hook_manager = get_hook_manager();
+        if (hook_manager != null) {
+            ((!) hook_manager).unregister_handler(this);
+            ((!) hook_manager).unregister_property_handler(this);
+        }
+    }
+    
+    // === Lifecycle (Async) ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * Deletes this index and clears its index data.
+     */
+    public override async void delete_async() throws Core.EntityError {
+        // Unregister from hooks
+        unregister_hooks();
+        
+        // Clear index data
+        var store = get_index_store();
+        if (store != null) {
+            try {
+                ((!) store).clear_index(_path);
+            } catch (Storage.StorageError e) {
+                warning("Failed to clear text index on delete: %s", e.message);
+            }
+        }
+        
+        // Delete entity metadata
+        yield base.delete_async();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override bool exists {
+        get {
+            var embedded = _engine as Engine.EmbeddedEngine;
+            if (embedded != null) {
+                return ((!) embedded).entity_exists_sync(_path);
+            }
+            return true;
+        }
+    }
+    
+    // === Private Methods ===
+    
+    /**
+     * Ensures configuration is loaded from storage.
+     */
+    private void ensure_config_loaded() {
+        if (_config_loaded) {
+            return;
+        }
+        
+        var storage = _engine.configuration.storage;
+        try {
+            var config = storage.get_category_config(_path);
+            if (config != null) {
+                _type_label = ((!) config).type_label;
+                _expression = ((!) config).expression;
+                // Note: case_sensitive not yet stored in config, default to false
+                _case_sensitive = false;
+            }
+        } catch (Storage.StorageError e) {
+            warning("Failed to load index config: %s", e.message);
+        }
+        
+        _config_loaded = true;
+    }
+    
+    /**
+     * Gets the IndexStore from the engine if available.
+     *
+     * @return The IndexStore, or null if not available
+     */
+    private Storage.HighLevel.IndexStore? get_index_store() {
+        // Check if engine is an EmbeddedEngine with store access
+        if (_engine is Engine.EmbeddedEngine) {
+            return ((Engine.EmbeddedEngine) _engine).index_store;
+        }
+        return null;
+    }
+    
+    /**
+     * Gets the HookManager from the engine configuration.
+     *
+     * @return The HookManager, or null if not available
+     */
+    private Engine.HookManager? get_hook_manager() {
+        var config = _engine.configuration;
+        return config.hook_manager;
+    }
+    
+    /**
+     * Returns a string representation of this index.
+     */
+    public new string to_string() {
+        return "Index(%s, type=%s, expr=%s, case_sensitive=%s)".printf(
+            _path.to_string(),
+            _type_label,
+            _expression,
+            _case_sensitive.to_string()
+        );
+    }
+}
+
+/**
+ * Virtual container returned by an Index search.
+ *
+ * Contains documents that matched the search pattern.
+ * This is the Container returned by Index.search() or Index.get_child_async().
+ */
+public class IndexResult : AbstractEntity {
+    
+    /**
+     * The search pattern that produced this result.
+     */
+    private string _search_pattern;
+    
+    /**
+     * The documents that matched the search.
+     */
+    private Invercargill.DataStructures.Vector<Core.Entity> _documents;
+    
+    /**
+     * Creates a new IndexResult with the given search pattern and matching documents.
+     * 
+     * @param engine The engine that manages this entity
+     * @param path The path to this result
+     * @param search_pattern The search pattern
+     * @param documents The matching documents
+     */
+    public IndexResult(
+        Core.Engine engine, 
+        Core.EntityPath path, 
+        string search_pattern,
+        Invercargill.DataStructures.Vector<Core.Entity> documents
+    ) {
+        base(engine, path);
+        _search_pattern = search_pattern;
+        _documents = documents;
+    }
+    
+    // === Entity Type ===
+    
+    /**
+     * {@inheritDoc}
+     *
+     * IndexResult appears as a CONTAINER to allow navigation of results.
+     */
+    public override Core.EntityType entity_type {
+        get { return Core.EntityType.CONTAINER; }
+    }
+    
+    // === Search Pattern ===
+    
+    /**
+     * The search pattern that produced this result.
+     * 
+     * @return The search pattern
+     */
+    public string search_pattern { 
+        get { return _search_pattern; }
+    }
+    
+    // === Child Navigation (Async) ===
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns the names of all matching documents.
+     */
+    public override async Invercargill.ReadOnlySet<string> get_child_names_async() throws Core.EntityError {
+        var names = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc in _documents) {
+            names.add(doc.name);
+        }
+        return names;
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns the document with the given name, or null if not found.
+     */
+    public override async Core.Entity? get_child_async(string name) throws Core.EntityError {
+        foreach (var doc in _documents) {
+            if (doc.name == name) {
+                return doc;
+            }
+        }
+        return null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     * 
+     * Returns all matching documents (eager loading).
+     */
+    public override async Core.Entity[] get_children_async() throws Core.EntityError {
+        var children = new Core.Entity[0];
+        foreach (var doc in _documents) {
+            children += doc;
+        }
+        return children;
+    }
+    
+    // === Result Count ===
+    
+    /**
+     * Returns the number of matching documents.
+     * 
+     * @return The result count
+     */
+    public int result_count {
+        get { return (int) _documents.peek_count(); }
+    }
+    
+    // === Read-Only Operations (Async) ===
+    
+    /**
+     * IndexResults are read-only.
+     *
+     * @throws Core.EntityError.INVALID_OPERATION always
+     */
+    public override async Core.Entity? create_container_async(string name) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Cannot create children in an index result");
+    }
+    
+    /**
+     * IndexResults are read-only.
+     * 
+     * @throws Core.EntityError.INVALID_OPERATION always
+     */
+    public override async Core.Entity? create_document_async(string name, string type_label) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Cannot create children in an index result");
+    }
+    
+    /**
+     * IndexResults are read-only.
+     * 
+     * @throws Core.EntityError.INVALID_OPERATION always
+     */
+    public override async Core.Entity? create_category_async(string name, string type_label, string expression) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Cannot create children in an index result");
+    }
+    
+    /**
+     * IndexResults are read-only.
+     * 
+     * @throws Core.EntityError.INVALID_OPERATION always
+     */
+    public override async Core.Entity? create_catalogue_async(string name, string type_label, string expression) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Cannot create children in an index result");
+    }
+    
+    /**
+     * IndexResults are read-only.
+     * 
+     * @throws Core.EntityError.INVALID_OPERATION always
+     */
+    public override async Core.Entity? create_index_async(string name, string type_label, string expression) throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Cannot create children in an index result");
+    }
+    
+    /**
+     * IndexResults cannot be deleted.
+     * 
+     * @throws Core.EntityError.INVALID_OPERATION always
+     */
+    public override async void delete_async() throws Core.EntityError {
+        throw new Core.EntityError.INVALID_OPERATION("Cannot delete an index result");
+    }
+    
+    /**
+     * IndexResults are virtual and don't exist in storage.
+     */
+    public override bool exists {
+        get { return true; }  // Virtual entities always "exist"
+    }
+    
+    /**
+     * Returns a string representation of this index result.
+     */
+    public new string to_string() {
+        return "IndexResult(%s, pattern='%s', %d results)".printf(
+            _path.to_string(), 
+            _search_pattern, 
+            (int) _documents.peek_count()
+        );
+    }
+}
+
+} // namespace Implexus.Entities

+ 138 - 0
src/Implexus.vala

@@ -0,0 +1,138 @@
+/**
+ * Implexus - Path-based document database library for Vala
+ * 
+ * Implexus provides a hierarchical document database with path-based addressing,
+ * supporting containers, documents, categories, and indexes. It offers both
+ * embedded and client-server modes of operation.
+ * 
+ * == Namespaces ==
+ * 
+ * - '''Implexus.Core''': Core interfaces and types (Engine, Entity, Path, Transaction)
+ * - '''Implexus.Entities''': Entity implementations (Container, Document, Category, Index)
+ * - '''Implexus.Engine''': Engine implementations (EmbeddedEngine, RemoteEngine, EngineFactory)
+ * - '''Implexus.Storage''': Storage layer (Storage, Dbm, BasicStorage)
+ * - '''Implexus.Protocol''': Client-server protocol (Message, Request, Response)
+ * - '''Implexus.Server''': Server implementation (Server, ClientConnection)
+ * 
+ * == Quick Start ==
+ * 
+ * The easiest way to get started is using the EngineFactory:
+ * 
+ * {{{
+ * using Implexus;
+ * using Implexus.Core;
+ * using Implexus.Engine;
+ * 
+ * public static int main(string[] args) {
+ *     try {
+ *         // Create an embedded engine (default mode)
+ *         var config = EngineConfiguration.embedded("./my_database");
+ *         var engine = EngineFactory.create(config);
+ *         
+ *         // Or create a remote engine
+ *         // var config = EngineConfiguration.remote("localhost", 9876);
+ *         // var engine = EngineFactory.create(config);
+ *         
+ *         // Work with entities - same API regardless of mode
+ *         var root = engine.get_root();
+ *         var users = root.create_category("users");
+ *         var john = users.create_document("john", "User");
+ *         john.set_entity_property("email", new Invercargill.NativeElement<string>("john@example.com"));
+ *         
+ *         // Query
+ *         foreach (var user in engine.query_by_type("User")) {
+ *             stdout.printf("Found user: %s\n", user.name);
+ *         }
+ *         
+ *         return 0;
+ *     } catch (Error e) {
+ *         stderr.printf("Error: %s\n", e.message);
+ *         return 1;
+ *     }
+ * }
+ * }}}
+ * 
+ * == Operation Modes ==
+ * 
+ * '''Embedded Mode:'''
+ * - Direct in-process database operations
+ * - No server required
+ * - Best for single-application scenarios
+ * - Use `EngineFactory.create_embedded()` or `EngineConfiguration.embedded()`
+ * 
+ * '''Remote Mode:'''
+ * - Connect to a remote implexusd server
+ * - Supports multiple concurrent clients
+ * - Best for distributed or multi-process scenarios
+ * - Use `EngineFactory.create_remote()` or `EngineConfiguration.remote()`
+ * 
+ * == Entity Types ==
+ *
+ * - '''Container''': Container for other entities (like a folder)
+ * - '''Document''': Leaf node with properties (like a file)
+ * - '''Category''': Dynamic collection based on type and expression
+ * - '''Index''': Similar to Category with additional indexing
+ * 
+ * == Architecture ==
+ * 
+ * The library is built around several key interfaces:
+ * 
+ * - '''Core.Engine''': Main API for database operations
+ * - '''Core.Entity''': Base interface for all entity types
+ * - '''Core.EntityPath''': Path-based addressing (e.g., /users/john)
+ * - '''Core.Transaction''': Atomic operations support
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus {
+
+/**
+ * Library version string.
+ */
+public const string VERSION = "0.1";
+
+/**
+ * Library version components.
+ */
+public const int VERSION_MAJOR = 0;
+public const int VERSION_MINOR = 1;
+public const int VERSION_PATCH = 0;
+
+/**
+ * Default server port for remote connections.
+ */
+public const uint16 DEFAULT_PORT = 9876;
+
+/**
+ * Default storage path for embedded mode.
+ */
+public const string DEFAULT_STORAGE_PATH = "./data";
+
+/**
+ * Creates an embedded engine with default settings.
+ * 
+ * This is a convenience function that creates an embedded engine
+ * using the default storage path ("./data").
+ * 
+ * @return A new embedded engine instance
+ * @throws Core.EngineError if engine creation fails
+ */
+public static Core.Engine create_engine() throws Core.EngineError {
+    return Engine.EngineFactory.create_embedded();
+}
+
+/**
+ * Creates an engine with the specified configuration.
+ * 
+ * This is a convenience function that delegates to EngineFactory.create().
+ * 
+ * @param config The engine configuration
+ * @return A new engine instance
+ * @throws Core.EngineError if engine creation fails
+ */
+public static Core.Engine create_engine_with_config(Engine.EngineConfiguration config) throws Core.EngineError {
+    return Engine.EngineFactory.create(config);
+}
+
+} // namespace Implexus

+ 93 - 0
src/Migrations/BootstrapMigration.vala

@@ -0,0 +1,93 @@
+/**
+ * BootstrapMigration - Initial database setup migration
+ * 
+ * A special migration that runs before any other migrations.
+ * Creates the root container if needed and can be extended by
+ * applications for custom initialization.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Migrations {
+
+/**
+ * A bootstrap migration that runs before any other migrations.
+ *
+ * This migration is automatically applied when the database is first
+ * created. It can be extended by applications to perform initial setup
+ * such as creating required containers or seeding default data.
+ *
+ * The bootstrap migration always uses version "0000000000" to ensure
+ * it runs first.
+ *
+ * Example:
+ * {{{
+ * public class MyAppBootstrap : BootstrapMigration {
+ *     public override async void up_async(Core.Engine engine) throws MigrationError {
+ *         yield base.up_async(engine);
+ *         
+ *         try {
+ *             var root = yield engine.get_root_async();
+ *             // Create application-specific containers
+ *             yield root.create_container_async("config");
+ *             yield root.create_container_async("sessions");
+ *             
+ *             // Seed initial data
+ *             var config = yield root.get_child_async("config");
+ *             var settings = yield config.create_document_async("settings", "AppConfig");
+ *             yield settings.set_entity_property_async("version", new Invercargill.NativeElement<string>("1.0.0"));
+ *         } catch (Core.EngineError e) {
+ *             throw new MigrationError.EXECUTION_FAILED(
+ *                 "Bootstrap failed: %s".printf(e.message)
+ *             );
+ *         }
+ *     }
+ * }
+ * }}}
+ */
+public class BootstrapMigration : Object, Migration {
+    
+    /**
+     * Bootstrap version is always "0000000000".
+     */
+    public string version { owned get { return "0000000000"; } }
+    
+    /**
+     * Description of the bootstrap migration.
+     */
+    public string description { owned get { return "Initial database setup"; } }
+    
+    /**
+     * Performs initial database setup.
+     *
+     * Ensures the root container exists. Subclasses should
+     * call the base implementation first.
+     *
+     * @param engine The database engine to operate on
+     * @throws MigrationError if setup fails
+     */
+    public virtual async void up_async(Core.Engine engine) throws MigrationError {
+        // Ensure root exists
+        try {
+            yield engine.get_root_async();
+        } catch (Core.EngineError e) {
+            throw new MigrationError.EXECUTION_FAILED(
+                "Failed to initialize root: %s".printf(e.message)
+            );
+        }
+    }
+    
+    /**
+     * Bootstrap migration cannot be reversed.
+     *
+     * @param engine The database engine to operate on
+     * @throws MigrationError.IRREVERSIBLE always
+     */
+    public virtual async void down_async(Core.Engine engine) throws MigrationError {
+        throw new MigrationError.IRREVERSIBLE(
+            "Bootstrap migration cannot be reversed"
+        );
+    }
+}
+
+} // namespace Implexus.Migrations

+ 100 - 0
src/Migrations/Migration.vala

@@ -0,0 +1,100 @@
+/**
+ * Migration - Interface for database migrations
+ * 
+ * Defines the contract that all migrations must implement, including
+ * version identification, description, and up/down methods for
+ * applying and reversing migrations.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Migrations {
+
+/**
+ * Interface for database migrations.
+ *
+ * Migrations are application-defined classes that modify the database
+ * schema or data in a controlled, versioned manner.
+ *
+ * Each migration must implement:
+ * - A unique identifier (version)
+ * - An up_async() method to apply the migration
+ * - A down_async() method to reverse the migration (optional for irreversible migrations)
+ *
+ * Example:
+ * {{{
+ * public class CreateUsersTable : Object, Migration {
+ *     public string version { owned get { return "2026031301"; } }
+ *     public string description { owned get { return "Create users container"; } }
+ *     
+ *     public async void up_async(Core.Engine engine) throws MigrationError {
+ *         try {
+ *             var root = yield engine.get_root_async();
+ *             yield root.create_container_async("users");
+ *         } catch (Core.EngineError e) {
+ *             throw new MigrationError.EXECUTION_FAILED(
+ *                 "Failed to create users container: %s".printf(e.message)
+ *             );
+ *         }
+ *     }
+ *     
+ *     public async void down_async(Core.Engine engine) throws MigrationError {
+ *         try {
+ *             var root = yield engine.get_root_async();
+ *             var users = yield root.get_child_async("users");
+ *             if (users != null) {
+ *                 yield ((!) users).delete_async();
+ *             }
+ *         } catch (Core.EngineError e) {
+ *             throw new MigrationError.EXECUTION_FAILED(
+ *                 "Failed to delete users container: %s".printf(e.message)
+ *             );
+ *         }
+ *     }
+ * }
+ * }}}
+ */
+public interface Migration : Object {
+    
+    /**
+     * Unique identifier for this migration.
+     *
+     * Best practice: Use a timestamp format like YYYYMMDDNN where NN is a
+     * sequence number within the day. This ensures proper ordering.
+     *
+     * Examples: "2026031301", "2026031302", "2026031501"
+     */
+    public abstract string version { owned get; }
+    
+    /**
+     * Human-readable description of what this migration does.
+     */
+    public abstract string description { owned get; }
+    
+    /**
+     * Applies the migration.
+     *
+     * This method is called when migrating forward. All operations
+     * are executed within a single transaction.
+     *
+     * @param engine The database engine to operate on
+     * @throws MigrationError if the migration fails
+     */
+    public abstract async void up_async(Core.Engine engine) throws MigrationError;
+    
+    /**
+     * Reverses the migration.
+     *
+     * This method is called when rolling back. All operations
+     * are executed within a single transaction.
+     *
+     * Implementations may throw MigrationError.IRREVERSIBLE if the
+     * migration cannot be safely reversed.
+     *
+     * @param engine The database engine to operate on
+     * @throws MigrationError if the rollback fails
+     */
+    public abstract async void down_async(Core.Engine engine) throws MigrationError;
+}
+
+} // namespace Implexus.Migrations

+ 62 - 0
src/Migrations/MigrationError.vala

@@ -0,0 +1,62 @@
+/**
+ * MigrationError - Error domain for migration operations
+ * 
+ * Provides error types for the migration system including registration
+ * conflicts, execution failures, and storage issues.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Migrations {
+
+/**
+ * Error domain for migration operations.
+ */
+public errordomain MigrationError {
+    /**
+     * The migration was not found.
+     */
+    NOT_FOUND,
+    
+    /**
+     * A migration with this version is already registered or applied.
+     */
+    VERSION_CONFLICT,
+    
+    /**
+     * The migration is already applied.
+     */
+    ALREADY_APPLIED,
+    
+    /**
+     * The migration cannot be reversed.
+     */
+    IRREVERSIBLE,
+    
+    /**
+     * The migration execution failed.
+     */
+    EXECUTION_FAILED,
+    
+    /**
+     * Transaction error during migration.
+     */
+    TRANSACTION_ERROR,
+    
+    /**
+     * Storage error during migration.
+     */
+    STORAGE_ERROR,
+    
+    /**
+     * Engine reference error.
+     */
+    ENGINE_ERROR,
+    
+    /**
+     * No migrations are available or applied.
+     */
+    NO_MIGRATIONS
+}
+
+} // namespace Implexus.Migrations

+ 395 - 0
src/Migrations/MigrationRunner.vala

@@ -0,0 +1,395 @@
+/**
+ * MigrationRunner - Migration execution engine
+ * 
+ * Discovers and executes migrations in version order. Each migration
+ * runs in its own transaction for safety and atomicity.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Migrations {
+
+/**
+ * Delegate for migration progress notifications.
+ */
+public delegate void MigrationProgressDelegate(Migration migration, bool is_up);
+
+/**
+ * Discovers and executes migrations in version order.
+ *
+ * The MigrationRunner is responsible for:
+ * - Registering available migrations
+ * - Determining which migrations need to run
+ * - Executing migrations in the correct order within transactions
+ * - Recording migration history
+ *
+ * Example usage:
+ * {{{
+ * var runner = new MigrationRunner(engine);
+ * 
+ * // Register migrations
+ * runner.register_migration(new CreateUsersTable());
+ * runner.register_migration(new AddEmailIndex());
+ * runner.register_migration(new SeedInitialData());
+ * 
+ * // Run all pending migrations
+ * try {
+ *     int count = yield runner.run_pending_async();
+ *     print("Ran %d migrations\n", count);
+ * } catch (MigrationError e) {
+ *     stderr.printf("Migration failed: %s\n", e.message);
+ * }
+ * }}}
+ */
+public class MigrationRunner : Object {
+    
+    // === Private Fields ===
+    
+    private weak Core.Engine _engine;
+    private MigrationStorage _storage;
+    private Invercargill.DataStructures.Dictionary<string, Migration> _migrations;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new MigrationRunner for the given engine.
+     *
+     * @param engine The database engine to migrate
+     */
+    public MigrationRunner(Core.Engine engine) {
+        _engine = engine;
+        _storage = new MigrationStorage(engine);
+        _migrations = new Invercargill.DataStructures.Dictionary<string, Migration>();
+    }
+    
+    // === Migration Registration ===
+    
+    /**
+     * Registers a migration to be available for execution.
+     *
+     * Migrations must be registered before calling run_pending_async() or rollback_to_async().
+     *
+     * @param migration The migration to register
+     * @throws MigrationError.VERSION_CONFLICT if a migration with the same version exists
+     */
+    public void register_migration(Migration migration) throws MigrationError {
+        var version = migration.version;
+        if (_migrations.has(version)) {
+            throw new MigrationError.VERSION_CONFLICT(
+                "Migration version %s is already registered".printf(version)
+            );
+        }
+        _migrations.set(version, migration);
+    }
+    
+    /**
+     * Registers multiple migrations at once.
+     *
+     * @param migrations Array of migrations to register
+     * @throws MigrationError if any registration fails
+     */
+    public void register_migrations(Migration[] migrations) throws MigrationError {
+        foreach (var migration in migrations) {
+            register_migration(migration);
+        }
+    }
+    
+    // === Status Queries ===
+    
+    /**
+     * Gets all migration versions that have been applied.
+     *
+     * @return Sorted list of applied migration versions
+     */
+    public Invercargill.Enumerable<string> get_applied_versions() {
+        return _storage.get_applied_versions();
+    }
+    
+    /**
+     * Gets all migration versions that are registered but not yet applied.
+     *
+     * @return Sorted list of pending migration versions
+     */
+    public Invercargill.Enumerable<string> get_pending_versions() {
+        var pending = new Invercargill.DataStructures.Vector<string>();
+        var applied = _storage.get_applied_set();
+        
+        // Get all registered versions and sort them
+        var all_versions = new Invercargill.DataStructures.Vector<string>();
+        foreach (var version in _migrations.keys) {
+            all_versions.add(version);
+        }
+        all_versions.sort((a, b) => strcmp(a, b));
+        
+        // Filter to only pending
+        foreach (var version in all_versions) {
+            if (!applied.contains(version)) {
+                pending.add(version);
+            }
+        }
+        
+        return pending.as_enumerable();
+    }
+    
+    /**
+     * Gets all pending migrations (not yet applied).
+     *
+     * @return Vector of pending Migration objects in version order
+     */
+    public Invercargill.DataStructures.Vector<Migration> get_pending_migrations() {
+        var pending = new Invercargill.DataStructures.Vector<Migration>();
+        var applied = _storage.get_applied_set();
+        
+        // Get all registered versions and sort them
+        var all_versions = new Invercargill.DataStructures.Vector<string>();
+        foreach (var version in _migrations.keys) {
+            all_versions.add(version);
+        }
+        all_versions.sort((a, b) => strcmp(a, b));
+        
+        // Filter to only pending and get the migration objects
+        foreach (var version in all_versions) {
+            if (!applied.contains(version)) {
+                var migration = _migrations.get(version);
+                if (migration != null) {
+                    pending.add((!) migration);
+                }
+            }
+        }
+        
+        return pending;
+    }
+    
+    /**
+     * Checks if a specific migration version has been applied.
+     *
+     * @param version The migration version to check
+     * @return true if the migration has been applied
+     */
+    public bool is_applied(string version) {
+        return _storage.is_applied(version);
+    }
+    
+    /**
+     * Gets the count of pending migrations.
+     *
+     * @return Number of migrations waiting to be applied
+     */
+    public int get_pending_count() {
+        int count = 0;
+        var applied = _storage.get_applied_set();
+        foreach (var version in _migrations.keys) {
+            if (!applied.contains(version)) {
+                count++;
+            }
+        }
+        return count;
+    }
+    
+    // === Execution ===
+    
+    /**
+     * Runs all pending migrations in version order.
+     *
+     * Each migration runs in its own transaction. If a migration fails,
+     * the process stops and the failing migration is rolled back.
+     *
+     * @param progress Optional callback for progress notifications
+     * @return Number of migrations that were run
+     * @throws MigrationError if any migration fails
+     */
+    public async int run_pending_async(MigrationProgressDelegate? progress = null) throws MigrationError {
+        var pending = get_pending_versions();
+        int count = 0;
+        
+        foreach (var version in pending) {
+            var migration = _migrations.get(version);
+            if (migration == null) {
+                throw new MigrationError.NOT_FOUND(
+                    "Migration %s not found".printf(version)
+                );
+            }
+            
+            yield run_single_async((!) migration, true, progress);
+            count++;
+        }
+        
+        return count;
+    }
+    
+    /**
+     * Runs a specific migration.
+     *
+     * @param version The version of the migration to run
+     * @param progress Optional callback for progress notification
+     * @throws MigrationError if the migration fails or is already applied
+     */
+    public async void run_one_async(string version, MigrationProgressDelegate? progress = null) throws MigrationError {
+        if (_storage.is_applied(version)) {
+            throw new MigrationError.ALREADY_APPLIED(
+                "Migration %s is already applied".printf(version)
+            );
+        }
+        
+        var migration = _migrations.get(version);
+        if (migration == null) {
+            throw new MigrationError.NOT_FOUND(
+                "Migration %s not found".printf(version)
+            );
+        }
+        
+        yield run_single_async((!) migration, true, progress);
+    }
+    
+    /**
+     * Runs migrations up to a specific version.
+     *
+     * Runs all pending migrations up to and including the target version.
+     *
+     * @param version The target version to run to (inclusive)
+     * @param progress Optional callback for progress notifications
+     * @return Number of migrations that were run
+     * @throws MigrationError if any migration fails
+     */
+    public async int run_to_version_async(string version, MigrationProgressDelegate? progress = null) throws MigrationError {
+        var pending = get_pending_versions();
+        int count = 0;
+        
+        foreach (var v in pending) {
+            if (strcmp(v, version) > 0) {
+                break;
+            }
+            
+            var migration = _migrations.get(v);
+            if (migration == null) {
+                throw new MigrationError.NOT_FOUND(
+                    "Migration %s not found".printf(v)
+                );
+            }
+            
+            yield run_single_async((!) migration, true, progress);
+            count++;
+        }
+        
+        return count;
+    }
+    
+    /**
+     * Rolls back to a specific version.
+     *
+     * Runs the down() method of all migrations after the target version,
+     * in reverse order.
+     *
+     * @param target_version The version to roll back to (exclusive - this version remains applied)
+     * @param progress Optional callback for progress notifications
+     * @return Number of migrations that were rolled back
+     * @throws MigrationError if any rollback fails
+     */
+    public async int rollback_to_version_async(string target_version, MigrationProgressDelegate? progress = null) throws MigrationError {
+        var applied = _storage.get_applied_versions();
+        int count = 0;
+        
+        // Roll back in reverse order
+        var to_rollback = new Invercargill.DataStructures.Vector<string>();
+        foreach (var version in applied) {
+            if (strcmp(version, target_version) > 0) {
+                to_rollback.add(version);
+            }
+        }
+        
+        // Sort in reverse order
+        to_rollback.sort((a, b) => strcmp(b, a));
+        
+        foreach (var version in to_rollback) {
+            var migration = _migrations.get(version);
+            if (migration == null) {
+                throw new MigrationError.NOT_FOUND(
+                    "Migration %s not found for rollback".printf(version)
+                );
+            }
+            
+            yield run_single_async((!) migration, false, progress);
+            count++;
+        }
+        
+        return count;
+    }
+    
+    /**
+     * Rolls back the most recently applied migration.
+     *
+     * @param progress Optional callback for progress notification
+     * @throws MigrationError if no migrations are applied or rollback fails
+     */
+    public async void rollback_last_async(MigrationProgressDelegate? progress = null) throws MigrationError {
+        var applied = _storage.get_applied_versions();
+        
+        string? last_version = null;
+        foreach (var version in applied) {
+            last_version = version; // Last one due to sorted order
+        }
+        
+        if (last_version == null) {
+            throw new MigrationError.NO_MIGRATIONS(
+                "No migrations have been applied"
+            );
+        }
+        
+        var migration = _migrations.get((!) last_version);
+        if (migration == null) {
+            throw new MigrationError.NOT_FOUND(
+                "Migration %s not found for rollback".printf((!) last_version)
+            );
+        }
+        
+        yield run_single_async((!) migration, false, progress);
+    }
+    
+    // === Internal Methods ===
+    
+    /**
+     * Runs a single migration within a transaction.
+     */
+    private async void run_single_async(Migration migration, bool is_up, MigrationProgressDelegate? progress) 
+            throws MigrationError {
+        
+        var engine = _engine;
+        if (engine == null) {
+            throw new MigrationError.ENGINE_ERROR("Engine reference is invalid");
+        }
+        
+        // Execute within transaction
+        try {
+            var tx = yield ((!) engine).begin_transaction_async();
+            try {
+                if (is_up) {
+                    yield migration.up_async((!) engine);
+                    _storage.record_migration(migration.version, migration.description);
+                } else {
+                    yield migration.down_async((!) engine);
+                    _storage.remove_migration(migration.version);
+                }
+                
+                yield tx.commit_async();
+                
+                if (progress != null) {
+                    progress(migration, is_up);
+                }
+            } catch (MigrationError e) {
+                yield tx.rollback_async();
+                throw e;
+            } catch (Core.EngineError e) {
+                yield tx.rollback_async();
+                throw new MigrationError.EXECUTION_FAILED(
+                    "Migration %s failed: %s".printf(migration.version, e.message)
+                );
+            }
+        } catch (Core.EngineError e) {
+            throw new MigrationError.TRANSACTION_ERROR(
+                "Failed to begin transaction: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+} // namespace Implexus.Migrations

+ 316 - 0
src/Migrations/MigrationStorage.vala

@@ -0,0 +1,316 @@
+/**
+ * MigrationStorage - Migration history persistence
+ * 
+ * Stores and retrieves migration history using the existing Dbm 
+ * infrastructure. Tracks which migrations have been applied and
+ * when they were applied.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Migrations {
+
+/**
+ * Stores and retrieves migration history.
+ *
+ * Migration history is persisted in the database using a dedicated
+ * key prefix. This ensures migration state survives application restarts.
+ *
+ * Key format: migration:<version>
+ * Value: Serialized (timestamp, description)
+ */
+public class MigrationStorage : Object {
+    
+    // === Constants ===
+    
+    private const string PREFIX = "migration:";
+    private const string INDEX_KEY = "migration:index";
+    
+    // === Private Fields ===
+    
+    private weak Core.Engine _engine;
+    private Storage.Dbm? _dbm;
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new MigrationStorage for the given engine.
+     *
+     * @param engine The database engine
+     */
+    public MigrationStorage(Core.Engine engine) {
+        _engine = engine;
+        
+        // Get Dbm from engine configuration if available
+        var config = engine.configuration;
+        var storage = config.storage;
+        
+        // Try to get Dbm from BasicStorage
+        var basic_storage = (storage as Storage.BasicStorage);
+        if (basic_storage != null) {
+            _dbm = ((!) basic_storage).dbm;
+        }
+    }
+    
+    // === Recording Migrations ===
+    
+    /**
+     * Records that a migration has been applied.
+     *
+     * @param version The migration version
+     * @param description The migration description
+     * @throws MigrationError if the record cannot be saved
+     */
+    public void record_migration(string version, string description) throws MigrationError {
+        if (_dbm == null) {
+            throw new MigrationError.STORAGE_ERROR("Database storage not available");
+        }
+        
+        var dbm = (!) _dbm;
+        string key = PREFIX + version;
+        
+        // Store migration record
+        var writer = new Storage.ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<int64?>(new DateTime.now_utc().to_unix()));
+        writer.write_element(new Invercargill.NativeElement<string>(description));
+        
+        try {
+            dbm.set(key, writer.to_binary_data());
+        } catch (Storage.StorageError e) {
+            throw new MigrationError.STORAGE_ERROR(
+                "Failed to record migration: %s".printf(e.message)
+            );
+        }
+        
+        // Update index
+        update_index(dbm, version, true);
+    }
+    
+    /**
+     * Removes a migration record (for rollbacks).
+     *
+     * @param version The migration version to remove
+     * @throws MigrationError if the record cannot be removed
+     */
+    public void remove_migration(string version) throws MigrationError {
+        if (_dbm == null) {
+            throw new MigrationError.STORAGE_ERROR("Database storage not available");
+        }
+        
+        var dbm = (!) _dbm;
+        string key = PREFIX + version;
+        
+        try {
+            dbm.delete(key);
+        } catch (Storage.StorageError e) {
+            // Key may not exist, that's fine
+        }
+        
+        // Update index
+        update_index(dbm, version, false);
+    }
+    
+    // === Querying Migrations ===
+    
+    /**
+     * Gets all applied migration versions in sorted order.
+     *
+     * @return Sorted enumerable of version strings
+     */
+    public Invercargill.Enumerable<string> get_applied_versions() {
+        var versions = new Invercargill.DataStructures.Vector<string>();
+        
+        if (_dbm == null) {
+            return versions.as_enumerable();
+        }
+        
+        // Load from index
+        var index_data = ((!) _dbm).get(INDEX_KEY);
+        if (index_data == null) {
+            // Fall back to scanning keys
+            foreach (var key in ((!) _dbm).keys) {
+                if (key.has_prefix(PREFIX)) {
+                    var version = key.substring(PREFIX.length);
+                    versions.add(version);
+                }
+            }
+            versions.sort((a, b) => strcmp(a, b));
+            return versions.as_enumerable();
+        }
+        
+        // Parse index
+        var reader = new Storage.ElementReader((!) index_data);
+        try {
+            var element = reader.read_element();
+            if (!element.is_null()) {
+                var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+                foreach (var item in array) {
+                    if (!item.is_null()) {
+                        versions.add(item.as<string>());
+                    }
+                }
+            }
+        } catch (Invercargill.ElementError e) {
+            // Fall back to empty list
+        }
+        
+        return versions.as_enumerable();
+    }
+    
+    /**
+     * Gets a set of applied migration versions for efficient lookup.
+     *
+     * @return Set of applied version strings
+     */
+    public Invercargill.DataStructures.HashSet<string> get_applied_set() {
+        var set = new Invercargill.DataStructures.HashSet<string>();
+        
+        foreach (var version in get_applied_versions()) {
+            set.add(version);
+        }
+        
+        return set;
+    }
+    
+    /**
+     * Checks if a specific migration has been applied.
+     *
+     * @param version The migration version to check
+     * @return true if the migration has been applied
+     */
+    public bool is_applied(string version) {
+        if (_dbm == null) {
+            return false;
+        }
+        
+        string key = PREFIX + version;
+        return ((!) _dbm).has_key(key);
+    }
+    
+    /**
+     * Gets detailed information about an applied migration.
+     *
+     * @param version The migration version
+     * @return Migration record, or null if not applied
+     */
+    public MigrationRecord? get_migration_record(string version) {
+        if (_dbm == null) {
+            return null;
+        }
+        
+        string key = PREFIX + version;
+        var data = ((!) _dbm).get(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new Storage.ElementReader((!) data);
+        try {
+            var timestamp_element = reader.read_element();
+            var desc_element = reader.read_element();
+            
+            int64? timestamp = timestamp_element.as<int64?>();
+            string description = desc_element.as<string>();
+            
+            return new MigrationRecord(
+                version,
+                description,
+                timestamp != null ? new DateTime.from_unix_utc((!) timestamp) : null
+            );
+        } catch (Invercargill.ElementError e) {
+            return null;
+        }
+    }
+    
+    // === Private Methods ===
+    
+    /**
+     * Updates the migration index.
+     */
+    private void update_index(Storage.Dbm dbm, string version, bool add) {
+        var versions = new Invercargill.DataStructures.Vector<string>();
+        
+        // Load existing index
+        var index_data = dbm.get(INDEX_KEY);
+        if (index_data != null) {
+            var reader = new Storage.ElementReader((!) index_data);
+            try {
+                var element = reader.read_element();
+                if (!element.is_null()) {
+                    var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+                    foreach (var item in array) {
+                        if (!item.is_null()) {
+                            string existing = item.as<string>();
+                            if (!add || existing != version) {
+                                versions.add(existing);
+                            }
+                        }
+                    }
+                }
+            } catch (Invercargill.ElementError e) {
+                // Start fresh
+            }
+        }
+        
+        if (add) {
+            versions.add(version);
+        }
+        
+        // Sort
+        versions.sort((a, b) => strcmp(a, b));
+        
+        // Save index
+        if (versions.length == 0) {
+            try {
+                dbm.delete(INDEX_KEY);
+            } catch (Storage.StorageError e) {
+                // Ignore
+            }
+            return;
+        }
+        
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        foreach (var v in versions) {
+            array.add(new Invercargill.NativeElement<string>(v));
+        }
+        
+        var writer = new Storage.ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        
+        try {
+            dbm.set(INDEX_KEY, writer.to_binary_data());
+        } catch (Storage.StorageError e) {
+            // Ignore - not critical
+        }
+    }
+}
+
+/**
+ * Represents a record of an applied migration.
+ */
+public class MigrationRecord : Object {
+    /**
+     * The migration version.
+     */
+    public string version { get; construct set; }
+    
+    /**
+     * The migration description.
+     */
+    public string description { get; construct set; }
+    
+    /**
+     * When the migration was applied, or null if unknown.
+     */
+    public DateTime? applied_at { get; construct set; }
+    
+    /**
+     * Creates a new MigrationRecord.
+     */
+    public MigrationRecord(string version, string description, DateTime? applied_at) {
+        Object(version: version, description: description, applied_at: applied_at);
+    }
+}
+
+} // namespace Implexus.Migrations

+ 8 - 0
src/Migrations/meson.build

@@ -0,0 +1,8 @@
+# Migration system sources
+migration_sources = files(
+    'Migration.vala',
+    'MigrationRunner.vala',
+    'MigrationStorage.vala',
+    'MigrationError.vala',
+    'BootstrapMigration.vala'
+)

+ 1516 - 0
src/Protocol/Message.vala

@@ -0,0 +1,1516 @@
+/**
+ * Message - Protocol message types and serialization
+ * 
+ * Defines the binary message format for client/server communication.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Protocol {
+
+/**
+ * Message type codes for protocol messages.
+ * 
+ * Indicates the direction and purpose of a message.
+ */
+public enum MessageType {
+    /**
+     * Welcome message sent from server to client on connection.
+     */
+    WELCOME = 0x00,
+    
+    /**
+     * Goodbye message sent when connection is closing.
+     */
+    GOODBYE = 0x01,
+    
+    /**
+     * Request to get an entity by path.
+     */
+    GET_ENTITY = 0x10,
+    
+    /**
+     * Response containing entity data.
+     */
+    ENTITY_RESPONSE = 0x11,
+    
+    /**
+     * Response indicating entity was not found.
+     */
+    ENTITY_NOT_FOUND = 0x12,
+    
+    /**
+     * Request to check if entity exists.
+     */
+    ENTITY_EXISTS = 0x13,
+    
+    /**
+     * Response containing a boolean value.
+     */
+    BOOLEAN_RESPONSE = 0x14,
+    
+    /**
+     * Request to create a container.
+     */
+    CREATE_CONTAINER = 0x20,
+    
+    /**
+     * Request to create a document.
+     */
+    CREATE_DOCUMENT = 0x21,
+    
+    /**
+     * Request to create a category.
+     */
+    CREATE_CATEGORY = 0x22,
+    
+    /**
+     * Request to create an index.
+     */
+    CREATE_INDEX = 0x23,
+    
+    /**
+     * Request to set a property.
+     */
+    SET_PROPERTY = 0x30,
+    
+    /**
+     * Request to get a property.
+     */
+    GET_PROPERTY = 0x31,
+    
+    /**
+     * Request to remove a property.
+     */
+    REMOVE_PROPERTY = 0x32,
+    
+    /**
+     * Property value response.
+     */
+    PROPERTY_RESPONSE = 0x33,
+    
+    /**
+     * Request to delete an entity.
+     */
+    DELETE_ENTITY = 0x40,
+    
+    /**
+     * Request to get children of a category.
+     */
+    GET_CHILDREN = 0x41,
+    
+    /**
+     * Request to get child names of a category.
+     */
+    GET_CHILD_NAMES = 0x42,
+    
+    /**
+     * Children list response.
+     */
+    CHILDREN_RESPONSE = 0x43,
+    
+    /**
+     * Request to query entities by type.
+     */
+    QUERY_BY_TYPE = 0x50,
+    
+    /**
+     * Request to query entities by expression.
+     */
+    QUERY_BY_EXPRESSION = 0x51,
+    
+    /**
+     * Query results response.
+     */
+    QUERY_RESPONSE = 0x52,
+    
+    /**
+     * Request to begin a transaction.
+     */
+    BEGIN_TRANSACTION = 0x60,
+    
+    /**
+     * Request to commit a transaction.
+     */
+    COMMIT_TRANSACTION = 0x61,
+    
+    /**
+     * Request to rollback a transaction.
+     */
+    ROLLBACK_TRANSACTION = 0x62,
+    
+    /**
+     * Error response from server.
+     */
+    ERROR = 0x70,
+    
+    /**
+     * Success response indicating operation completed.
+     */
+    SUCCESS = 0x7F;
+
+    /**
+     * Checks if this message type is a request (client → server).
+     */
+    public bool is_request() {
+        return this == GET_ENTITY || this == ENTITY_EXISTS ||
+               this == CREATE_CONTAINER || this == CREATE_DOCUMENT ||
+               this == CREATE_CATEGORY || this == CREATE_INDEX ||
+               this == SET_PROPERTY || this == GET_PROPERTY || this == REMOVE_PROPERTY ||
+               this == DELETE_ENTITY || this == GET_CHILDREN || this == GET_CHILD_NAMES ||
+               this == QUERY_BY_TYPE || this == QUERY_BY_EXPRESSION ||
+               this == BEGIN_TRANSACTION || this == COMMIT_TRANSACTION || this == ROLLBACK_TRANSACTION;
+    }
+
+    /**
+     * Checks if this message type is a response (server → client).
+     */
+    public bool is_response() {
+        return this == ENTITY_RESPONSE || this == ENTITY_NOT_FOUND ||
+               this == BOOLEAN_RESPONSE || this == PROPERTY_RESPONSE ||
+               this == CHILDREN_RESPONSE || this == QUERY_RESPONSE ||
+               this == ERROR || this == SUCCESS;
+    }
+}
+
+/**
+ * Magic bytes for protocol identification ("IMPX").
+ */
+public const uint8[] MAGIC_BYTES = { 0x49, 0x4D, 0x50, 0x58 };
+
+/**
+ * Size of the message header in bytes.
+ */
+public const int HEADER_SIZE = 11;
+
+/**
+ * Maximum payload size (10 MB).
+ */
+public const uint32 MAX_PAYLOAD_SIZE = 10 * 1024 * 1024;
+
+/**
+ * Message header structure.
+ * 
+ * All messages start with this header:
+ * - 4 bytes: Magic ("IMPX")
+ * - 1 byte: Message type
+ * - 4 bytes: Payload length (big-endian)
+ * - 2 bytes: Request ID (big-endian)
+ */
+public class MessageHeader : Object {
+    
+    /**
+     * Magic bytes for protocol identification.
+     */
+    public uint8[] magic { get; set; }
+    
+    /**
+     * The message type.
+     */
+    public MessageType message_type { get; set; }
+    
+    /**
+     * Length of the payload in bytes.
+     */
+    public uint32 payload_length { get; set; }
+    
+    /**
+     * Request ID for matching requests with responses.
+     */
+    public uint16 request_id { get; set; }
+    
+    /**
+     * Creates a new MessageHeader with default values.
+     */
+    public MessageHeader() {
+        magic = new uint8[] { MAGIC_BYTES[0], MAGIC_BYTES[1], MAGIC_BYTES[2], MAGIC_BYTES[3] };
+        message_type = MessageType.SUCCESS;
+        payload_length = 0;
+        request_id = 0;
+    }
+    
+    /**
+     * Serializes the header to binary format.
+     * 
+     * @return The serialized header bytes
+     */
+    public uint8[] serialize() {
+        var data = new uint8[HEADER_SIZE];
+        
+        // Magic bytes
+        data[0] = MAGIC_BYTES[0];
+        data[1] = MAGIC_BYTES[1];
+        data[2] = MAGIC_BYTES[2];
+        data[3] = MAGIC_BYTES[3];
+        
+        // Message type
+        data[4] = (uint8) message_type;
+        
+        // Payload length (big-endian)
+        data[5] = (uint8) (payload_length >> 24);
+        data[6] = (uint8) (payload_length >> 16);
+        data[7] = (uint8) (payload_length >> 8);
+        data[8] = (uint8) payload_length;
+        
+        // Request ID (big-endian)
+        data[9] = (uint8) (request_id >> 8);
+        data[10] = (uint8) request_id;
+        
+        return data;
+    }
+    
+    /**
+     * Deserializes a header from binary data.
+     * 
+     * @param data The binary data (must be at least HEADER_SIZE bytes)
+     * @return The deserialized header
+     * @throws ProtocolError if the data is invalid
+     */
+    public static MessageHeader deserialize(uint8[] data) throws ProtocolError {
+        if (data.length < HEADER_SIZE) {
+            throw new ProtocolError.INVALID_MESSAGE(
+                "Header too short: expected %d bytes, got %d".printf(HEADER_SIZE, data.length)
+            );
+        }
+        
+        var header = new MessageHeader();
+        
+        // Validate magic bytes
+        if (data[0] != MAGIC_BYTES[0] || data[1] != MAGIC_BYTES[1] ||
+            data[2] != MAGIC_BYTES[2] || data[3] != MAGIC_BYTES[3]) {
+            throw new ProtocolError.INVALID_MAGIC(
+                "Invalid magic bytes: expected IMPX, got 0x%02X%02X%02X%02X".printf(
+                    data[0], data[1], data[2], data[3]
+                )
+            );
+        }
+        
+        // Message type
+        header.message_type = (MessageType) data[4];
+        
+        // Payload length (big-endian)
+        header.payload_length = 
+            ((uint32) data[5] << 24) |
+            ((uint32) data[6] << 16) |
+            ((uint32) data[7] << 8) |
+            ((uint32) data[8]);
+        
+        // Request ID (big-endian)
+        header.request_id = (uint16) ((data[9] << 8) | data[10]);
+        
+        // Validate payload length
+        if (header.payload_length > MAX_PAYLOAD_SIZE) {
+            throw new ProtocolError.PAYLOAD_TOO_LARGE(
+                "Payload too large: %u bytes (max %u)".printf(
+                    header.payload_length, MAX_PAYLOAD_SIZE
+                )
+            );
+        }
+        
+        return header;
+    }
+}
+
+/**
+ * Base class for protocol messages.
+ * 
+ * Provides common functionality for all message types including
+ * serialization, deserialization, and request/response matching.
+ */
+public abstract class Message : Object {
+    
+    /**
+     * The message header containing type and metadata.
+     */
+    public MessageHeader header { get; set; }
+    
+    /**
+     * The message type.
+     */
+    public MessageType message_type {
+        get { return header.message_type; }
+    }
+    
+    /**
+     * The request ID for matching requests with responses.
+     */
+    public uint16 request_id {
+        get { return header.request_id; }
+        set { header.request_id = value; }
+    }
+    
+    /**
+     * Creates a new Message with the specified type.
+     * 
+     * @param type The message type
+     */
+    protected Message(MessageType type) {
+        header = new MessageHeader();
+        header.message_type = type;
+    }
+    
+    /**
+     * Serializes the message payload to binary format.
+     * 
+     * @return The serialized payload bytes
+     */
+    public abstract uint8[]? serialize_payload();
+    
+    /**
+     * Deserializes the message payload from binary data.
+     * 
+     * @param data The payload binary data
+     * @throws ProtocolError if deserialization fails
+     */
+    public abstract void deserialize_payload(uint8[] data) throws ProtocolError;
+    
+    /**
+     * Serializes the complete message (header + payload) to binary format.
+     * 
+     * @return The serialized message bytes
+     */
+    public uint8[] serialize() {
+        var payload = serialize_payload();
+        header.payload_length = (uint32) (payload != null ? ((!) payload).length : 0);
+        
+        var header_data = header.serialize();
+        
+        if (payload == null || ((!) payload).length == 0) {
+            return header_data;
+        }
+        
+        // Combine header and payload
+        var result = new uint8[header_data.length + ((!) payload).length];
+        Memory.copy(result, header_data, header_data.length);
+        Memory.copy((uint8*) result + header_data.length, (!) payload, ((!) payload).length);
+        
+        return result;
+    }
+}
+
+// ============================================================================
+// Request Messages
+// ============================================================================
+
+/**
+ * Request to get an entity by path.
+ */
+public class GetEntityRequest : Message {
+    
+    /**
+     * The path of the entity to retrieve.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * Creates a new GetEntityRequest.
+     */
+    public GetEntityRequest() {
+        base(MessageType.GET_ENTITY);
+        path = new Core.EntityPath.root();
+    }
+    
+    /**
+     * Creates a new GetEntityRequest for the specified path.
+     * 
+     * @param path The entity path
+     */
+    public GetEntityRequest.for_path(Core.EntityPath path) {
+        base(MessageType.GET_ENTITY);
+        this.path = path;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(path.to_string());
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            var path_str = reader.read_string();
+            path = new Core.EntityPath(path_str);
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize GetEntityRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to check if an entity exists.
+ */
+public class EntityExistsRequest : Message {
+    
+    /**
+     * The path to check.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * Creates a new EntityExistsRequest.
+     */
+    public EntityExistsRequest() {
+        base(MessageType.ENTITY_EXISTS);
+        path = new Core.EntityPath.root();
+    }
+    
+    /**
+     * Creates a new EntityExistsRequest for the specified path.
+     */
+    public EntityExistsRequest.for_path(Core.EntityPath path) {
+        base(MessageType.ENTITY_EXISTS);
+        this.path = path;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(path.to_string());
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            var path_str = reader.read_string();
+            path = new Core.EntityPath(path_str);
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize EntityExistsRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to create a container.
+ */
+public class CreateContainerRequest : Message {
+    
+    /**
+     * The path where the container should be created.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * Creates a new CreateContainerRequest.
+     */
+    public CreateContainerRequest() {
+        base(MessageType.CREATE_CONTAINER);
+        path = new Core.EntityPath.root();
+    }
+    
+    /**
+     * Creates a new CreateContainerRequest for the specified path.
+     */
+    public CreateContainerRequest.for_path(Core.EntityPath path) {
+        base(MessageType.CREATE_CONTAINER);
+        this.path = path;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(path.to_string());
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            var path_str = reader.read_string();
+            path = new Core.EntityPath(path_str);
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize CreateContainerRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to create a document.
+ */
+public class CreateDocumentRequest : Message {
+    
+    /**
+     * The path where the document should be created.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * The type label for the document.
+     */
+    public string type_label { get; set; }
+    
+    /**
+     * Creates a new CreateDocumentRequest.
+     */
+    public CreateDocumentRequest() {
+        base(MessageType.CREATE_DOCUMENT);
+        path = new Core.EntityPath.root();
+        type_label = "";
+    }
+    
+    /**
+     * Creates a new CreateDocumentRequest for the specified path and type.
+     */
+    public CreateDocumentRequest.for_path_and_type(Core.EntityPath path, string type_label) {
+        base(MessageType.CREATE_DOCUMENT);
+        this.path = path;
+        this.type_label = type_label;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(path.to_string());
+        writer.write_string(type_label);
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            var path_str = reader.read_string();
+            path = new Core.EntityPath(path_str);
+            type_label = reader.read_string();
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize CreateDocumentRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to delete an entity.
+ */
+public class DeleteEntityRequest : Message {
+    
+    /**
+     * The path of the entity to delete.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * Creates a new DeleteEntityRequest.
+     */
+    public DeleteEntityRequest() {
+        base(MessageType.DELETE_ENTITY);
+        path = new Core.EntityPath.root();
+    }
+    
+    /**
+     * Creates a new DeleteEntityRequest for the specified path.
+     */
+    public DeleteEntityRequest.for_path(Core.EntityPath path) {
+        base(MessageType.DELETE_ENTITY);
+        this.path = path;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(path.to_string());
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            var path_str = reader.read_string();
+            path = new Core.EntityPath(path_str);
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize DeleteEntityRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to set a property on an entity.
+ */
+public class SetPropertyRequest : Message {
+    
+    /**
+     * The path of the entity.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * The property name.
+     */
+    public string property_name { get; set; }
+    
+    /**
+     * The property value.
+     */
+    public Invercargill.Element? value { get; set; }
+    
+    /**
+     * Creates a new SetPropertyRequest.
+     */
+    public SetPropertyRequest() {
+        base(MessageType.SET_PROPERTY);
+        path = new Core.EntityPath.root();
+        property_name = "";
+        value = null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(path.to_string());
+        writer.write_string(property_name);
+        if (value != null) {
+            try {
+                writer.write_element((!) value);
+            } catch (Storage.StorageError e) {
+                // Write null on error
+                writer.write_null();
+            }
+        } else {
+            writer.write_null();
+        }
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            var path_str = reader.read_string();
+            path = new Core.EntityPath(path_str);
+            property_name = reader.read_string();
+            value = reader.read_element();
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize SetPropertyRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to get a property from an entity.
+ */
+public class GetPropertyRequest : Message {
+    
+    /**
+     * The path of the entity.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * The property name.
+     */
+    public string property_name { get; set; }
+    
+    /**
+     * Creates a new GetPropertyRequest.
+     */
+    public GetPropertyRequest() {
+        base(MessageType.GET_PROPERTY);
+        path = new Core.EntityPath.root();
+        property_name = "";
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(path.to_string());
+        writer.write_string(property_name);
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            var path_str = reader.read_string();
+            path = new Core.EntityPath(path_str);
+            property_name = reader.read_string();
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize GetPropertyRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to get children of a container.
+ */
+public class GetChildrenRequest : Message {
+    
+    /**
+     * The path of the container.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * Creates a new GetChildrenRequest.
+     */
+    public GetChildrenRequest() {
+        base(MessageType.GET_CHILDREN);
+        path = new Core.EntityPath.root();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(path.to_string());
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            var path_str = reader.read_string();
+            path = new Core.EntityPath(path_str);
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize GetChildrenRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to query entities by type.
+ */
+public class QueryByTypeRequest : Message {
+    
+    /**
+     * The type label to query.
+     */
+    public string type_label { get; set; }
+    
+    /**
+     * Creates a new QueryByTypeRequest.
+     */
+    public QueryByTypeRequest() {
+        base(MessageType.QUERY_BY_TYPE);
+        type_label = "";
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_string(type_label);
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            type_label = reader.read_string();
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize QueryByTypeRequest: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Request to begin a transaction.
+ */
+public class BeginTransactionRequest : Message {
+    
+    /**
+     * Creates a new BeginTransactionRequest.
+     */
+    public BeginTransactionRequest() {
+        base(MessageType.BEGIN_TRANSACTION);
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        return null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        // No payload
+    }
+}
+
+/**
+ * Request to commit a transaction.
+ */
+public class CommitTransactionRequest : Message {
+    
+    /**
+     * Creates a new CommitTransactionRequest.
+     */
+    public CommitTransactionRequest() {
+        base(MessageType.COMMIT_TRANSACTION);
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        return null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        // No payload
+    }
+}
+
+/**
+ * Request to rollback a transaction.
+ */
+public class RollbackTransactionRequest : Message {
+    
+    /**
+     * Creates a new RollbackTransactionRequest.
+     */
+    public RollbackTransactionRequest() {
+        base(MessageType.ROLLBACK_TRANSACTION);
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        return null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        // No payload
+    }
+}
+
+// ============================================================================
+// Response Messages
+// ============================================================================
+
+/**
+ * Data about an entity for serialization.
+ */
+public class EntityData : Object {
+    /**
+     * The entity type.
+     */
+    public Core.EntityType entity_type { get; set; }
+    
+    /**
+     * The entity path.
+     */
+    public Core.EntityPath path { get; set; }
+    
+    /**
+     * The type label (for documents).
+     */
+    public string? type_label { get; set; }
+    
+    /**
+     * The expression (for category/index).
+     */
+    public string? expression { get; set; }
+    
+    /**
+     * The properties (for documents).
+     */
+    public Invercargill.Properties? properties { get; set; }
+    
+    /**
+     * Creates a new EntityData.
+     */
+    public EntityData() {
+        entity_type = Core.EntityType.CONTAINER;
+        path = new Core.EntityPath.root();
+    }
+}
+
+/**
+ * Response containing entity data.
+ */
+public class EntityResponse : Message {
+    
+    /**
+     * The entity data.
+     */
+    public EntityData entity_data { get; set; }
+    
+    /**
+     * Creates a new EntityResponse.
+     */
+    public EntityResponse() {
+        base(MessageType.ENTITY_RESPONSE);
+        entity_data = new EntityData();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        
+        // Entity type
+        writer.write_int64((int64) entity_data.entity_type);
+        
+        // Path
+        writer.write_string(entity_data.path.to_string());
+        
+        // Type label
+        writer.write_string(entity_data.type_label ?? "");
+        
+        // Expression
+        writer.write_string(entity_data.expression ?? "");
+        
+        // Properties
+        if (entity_data.properties != null) {
+            writer.write_dictionary((!) entity_data.properties);
+        } else {
+            writer.write_null();
+        }
+        
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            
+            entity_data = new EntityData();
+            
+            var type_code = reader.read_int64();
+            entity_data.entity_type = (Core.EntityType) type_code;
+            
+            var path_str = reader.read_string();
+            entity_data.path = new Core.EntityPath(path_str);
+            
+            var type_label = reader.read_string();
+            entity_data.type_label = type_label.length > 0 ? type_label : null;
+            
+            var expression = reader.read_string();
+            entity_data.expression = expression.length > 0 ? expression : null;
+            
+            // Read properties if present
+            if (reader.has_more) {
+                var element = reader.read_element();
+                if (!element.is_null()) {
+                    try {
+                        entity_data.properties = element.as<Invercargill.Properties>();
+                    } catch (Invercargill.ElementError e) {
+                        // Ignore if not a dictionary
+                    }
+                }
+            }
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize EntityResponse: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Response containing a boolean value.
+ */
+public class BooleanResponse : Message {
+    
+    /**
+     * The boolean value.
+     */
+    public bool value { get; set; }
+    
+    /**
+     * Creates a new BooleanResponse.
+     */
+    public BooleanResponse() {
+        base(MessageType.BOOLEAN_RESPONSE);
+        value = false;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_bool(value);
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            value = reader.read_bool();
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize BooleanResponse: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Response containing a property value.
+ */
+public class PropertyResponse : Message {
+    
+    /**
+     * The property value.
+     */
+    public Invercargill.Element? value { get; set; }
+    
+    /**
+     * Creates a new PropertyResponse.
+     */
+    public PropertyResponse() {
+        base(MessageType.PROPERTY_RESPONSE);
+        value = null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        if (value != null) {
+            try {
+                writer.write_element((!) value);
+            } catch (Storage.StorageError e) {
+                writer.write_null();
+            }
+        } else {
+            writer.write_null();
+        }
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            value = reader.read_element();
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize PropertyResponse: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Response containing children list.
+ */
+public class ChildrenResponse : Message {
+    
+    /**
+     * The list of child entity data.
+     */
+    public Invercargill.DataStructures.Vector<EntityData> children { get; set; }
+    
+    /**
+     * Creates a new ChildrenResponse.
+     */
+    public ChildrenResponse() {
+        base(MessageType.CHILDREN_RESPONSE);
+        children = new Invercargill.DataStructures.Vector<EntityData>();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        
+        writer.write_int64((int64) children.count());
+        
+        foreach (var child in children) {
+            // Entity type
+            writer.write_int64((int64) child.entity_type);
+            // Path
+            writer.write_string(child.path.to_string());
+            // Type label
+            writer.write_string(child.type_label ?? "");
+            // Expression
+            writer.write_string(child.expression ?? "");
+        }
+        
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            
+            children = new Invercargill.DataStructures.Vector<EntityData>();
+            
+            var count = reader.read_int64();
+            for (int64 i = 0; i < count; i++) {
+                var child = new EntityData();
+                child.entity_type = (Core.EntityType) reader.read_int64();
+                child.path = new Core.EntityPath(reader.read_string());
+                var type_label = reader.read_string();
+                child.type_label = type_label.length > 0 ? type_label : null;
+                var expression = reader.read_string();
+                child.expression = expression.length > 0 ? expression : null;
+                children.add(child);
+            }
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize ChildrenResponse: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Response containing query results.
+ */
+public class QueryResponse : Message {
+    
+    /**
+     * The list of matching entity paths.
+     */
+    public Invercargill.DataStructures.Vector<string> paths { get; set; }
+    
+    /**
+     * Creates a new QueryResponse.
+     */
+    public QueryResponse() {
+        base(MessageType.QUERY_RESPONSE);
+        paths = new Invercargill.DataStructures.Vector<string>();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        
+        writer.write_int64((int64) paths.count());
+        
+        foreach (var path in paths) {
+            writer.write_string(path);
+        }
+        
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            
+            paths = new Invercargill.DataStructures.Vector<string>();
+            
+            var count = reader.read_int64();
+            for (int64 i = 0; i < count; i++) {
+                paths.add(reader.read_string());
+            }
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize QueryResponse: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Error response from the server.
+ */
+public class ErrorResponse : Message {
+    
+    /**
+     * The error code.
+     */
+    public int error_code { get; set; }
+    
+    /**
+     * The error message.
+     */
+    public string error_message { get; set; }
+    
+    /**
+     * Creates a new ErrorResponse.
+     */
+    public ErrorResponse() {
+        base(MessageType.ERROR);
+        error_code = 0;
+        error_message = "";
+    }
+    
+    /**
+     * Creates a new ErrorResponse with the specified error.
+     */
+    public ErrorResponse.with_error(int code, string message) {
+        base(MessageType.ERROR);
+        error_code = code;
+        error_message = message;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_int64((int64) error_code);
+        writer.write_string(error_message);
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            error_code = (int) reader.read_int64();
+            error_message = reader.read_string();
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize ErrorResponse: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Success response indicating operation completed.
+ */
+public class SuccessResponse : Message {
+    
+    /**
+     * Creates a new SuccessResponse.
+     */
+    public SuccessResponse() {
+        base(MessageType.SUCCESS);
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        return null;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        // No payload for success
+    }
+}
+
+/**
+ * Welcome message sent from server to client on connection.
+ */
+public class WelcomeMessage : Message {
+    
+    /**
+     * The server protocol version.
+     */
+    public uint8 server_version { get; set; }
+    
+    /**
+     * Creates a new WelcomeMessage.
+     */
+    public WelcomeMessage() {
+        base(MessageType.WELCOME);
+        server_version = 1;
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override uint8[]? serialize_payload() {
+        var writer = new Storage.ElementWriter();
+        writer.write_int64((int64) server_version);
+        return writer.to_binary_data().to_bytes().get_data();
+    }
+    
+    /**
+     * {@inheritDoc}
+     */
+    public override void deserialize_payload(uint8[] data) throws ProtocolError {
+        try {
+            var reader = new Storage.ElementReader(
+                new Invercargill.DataStructures.ByteBuffer.from_byte_array(data)
+            );
+            // Read version byte - for now just read as int64 for simplicity
+            var version = reader.read_int64();
+            server_version = (uint8) version;
+        } catch (Storage.StorageError e) {
+            throw new ProtocolError.SERIALIZATION_ERROR(
+                "Failed to deserialize WelcomeMessage: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Factory for creating messages from message type.
+ */
+public class MessageFactory : Object {
+    
+    /**
+     * Creates a message instance for the given message type.
+     * 
+     * @param type The message type
+     * @return A new message instance
+     * @throws ProtocolError if the message type is unknown
+     */
+    public static Message create_message(MessageType type) throws ProtocolError {
+        switch (type) {
+            // Requests
+            case MessageType.GET_ENTITY:
+                return new GetEntityRequest();
+            case MessageType.ENTITY_EXISTS:
+                return new EntityExistsRequest();
+            case MessageType.CREATE_CONTAINER:
+                return new CreateContainerRequest();
+            case MessageType.CREATE_DOCUMENT:
+                return new CreateDocumentRequest();
+            case MessageType.DELETE_ENTITY:
+                return new DeleteEntityRequest();
+            case MessageType.SET_PROPERTY:
+                return new SetPropertyRequest();
+            case MessageType.GET_PROPERTY:
+                return new GetPropertyRequest();
+            case MessageType.GET_CHILDREN:
+                return new GetChildrenRequest();
+            case MessageType.QUERY_BY_TYPE:
+                return new QueryByTypeRequest();
+            case MessageType.BEGIN_TRANSACTION:
+                return new BeginTransactionRequest();
+            case MessageType.COMMIT_TRANSACTION:
+                return new CommitTransactionRequest();
+            case MessageType.ROLLBACK_TRANSACTION:
+                return new RollbackTransactionRequest();
+            
+            // Responses
+            case MessageType.ENTITY_RESPONSE:
+                return new EntityResponse();
+            case MessageType.BOOLEAN_RESPONSE:
+                return new BooleanResponse();
+            case MessageType.PROPERTY_RESPONSE:
+                return new PropertyResponse();
+            case MessageType.CHILDREN_RESPONSE:
+                return new ChildrenResponse();
+            case MessageType.QUERY_RESPONSE:
+                return new QueryResponse();
+            case MessageType.ERROR:
+                return new ErrorResponse();
+            case MessageType.SUCCESS:
+                return new SuccessResponse();
+            case MessageType.WELCOME:
+                return new WelcomeMessage();
+            
+            default:
+                throw new ProtocolError.UNKNOWN_MESSAGE_TYPE(
+                    "Unknown message type: 0x%02X".printf((int) type)
+                );
+        }
+    }
+}
+
+} // namespace Implexus.Protocol

+ 296 - 0
src/Protocol/MessageReader.vala

@@ -0,0 +1,296 @@
+/**
+ * MessageReader - Message reading from input stream
+ * 
+ * Provides functionality for reading protocol messages from
+ * an InputStream with framing and length prefixes.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Protocol {
+
+/**
+ * Reads protocol messages from an InputStream.
+ * 
+ * Handles the binary message format including:
+ * - Header parsing and validation
+ * - Payload reading
+ * - Message deserialization
+ * 
+ * Example usage:
+ * {{{
+ * var reader = new MessageReader(input_stream);
+ * try {
+ *     var message = reader.read_message();
+ *     if (message is EntityResponse) {
+ *         var response = (EntityResponse) message;
+ *         // process response
+ *     }
+ * } catch (ProtocolError e) {
+ *     warning("Failed to read message: %s", e.message);
+ * }
+ * }}}
+ */
+public class MessageReader : Object {
+    
+    /**
+     * The underlying input stream.
+     */
+    private InputStream _stream;
+    
+    /**
+     * Buffer for reading the header.
+     */
+    private uint8[] _header_buffer;
+    
+    /**
+     * Whether the reader is closed.
+     */
+    private bool _closed = false;
+    
+    /**
+     * Creates a new MessageReader for the given stream.
+     * 
+     * @param stream The input stream to read from
+     */
+    public MessageReader(InputStream stream) {
+        _stream = stream;
+        _header_buffer = new uint8[HEADER_SIZE];
+    }
+    
+    /**
+     * Reads a single message from the stream.
+     * 
+     * This method blocks until a complete message is available
+     * or the connection is closed.
+     * 
+     * @return The message read, or null if the connection was closed
+     * @throws ProtocolError if reading or deserialization fails
+     */
+    public Message? read_message() throws ProtocolError {
+        if (_closed) {
+            throw new ProtocolError.CONNECTION_CLOSED("Reader is closed");
+        }
+        
+        try {
+            // Read header
+            size_t bytes_read = 0;
+            var result = _stream.read_all(_header_buffer, out bytes_read);
+            
+            if (bytes_read == 0) {
+                // Connection closed
+                return null;
+            }
+            
+            if (bytes_read < HEADER_SIZE) {
+                throw new ProtocolError.INVALID_MESSAGE(
+                    "Incomplete header: expected %d bytes, got %zu".printf(HEADER_SIZE, bytes_read)
+                );
+            }
+            
+            // Parse header
+            var header = MessageHeader.deserialize(_header_buffer);
+            
+            // Read payload if present
+            uint8[]? payload = null;
+            if (header.payload_length > 0) {
+                payload = new uint8[header.payload_length];
+                _stream.read_all(payload, out bytes_read);
+                
+                if (bytes_read < header.payload_length) {
+                    throw new ProtocolError.INVALID_MESSAGE(
+                        "Incomplete payload: expected %u bytes, got %zu".printf(
+                            header.payload_length, bytes_read
+                        )
+                    );
+                }
+            }
+            
+            // Create message based on type
+            var message = MessageFactory.create_message(header.message_type);
+            message.header = header;
+            
+            // Deserialize payload if present
+            if (payload != null && payload.length > 0) {
+                message.deserialize_payload((!) payload);
+            }
+            
+            return message;
+            
+        } catch (IOError e) {
+            if (e is IOError.CLOSED) {
+                _closed = true;
+                return null;
+            }
+            throw new ProtocolError.IO_ERROR("Read error: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Reads a message asynchronously.
+     * 
+     * @param priority The I/O priority
+     * @param cancellable Optional cancellation token
+     * @return The message read, or null if the connection was closed
+     * @throws ProtocolError if reading or deserialization fails
+     */
+    public async Message? read_message_async(
+        int priority = GLib.Priority.DEFAULT,
+        Cancellable? cancellable = null
+    ) throws ProtocolError {
+        if (_closed) {
+            throw new ProtocolError.CONNECTION_CLOSED("Reader is closed");
+        }
+        
+        try {
+            // Read header asynchronously
+            size_t bytes_read = 0;
+            var result = yield _stream.read_all_async(
+                _header_buffer, 
+                priority, 
+                cancellable, 
+                out bytes_read
+            );
+            
+            if (bytes_read == 0) {
+                return null;
+            }
+            
+            if (bytes_read < HEADER_SIZE) {
+                throw new ProtocolError.INVALID_MESSAGE(
+                    "Incomplete header: expected %d bytes, got %zu".printf(HEADER_SIZE, bytes_read)
+                );
+            }
+            
+            // Parse header
+            var header = MessageHeader.deserialize(_header_buffer);
+            
+            // Read payload if present
+            uint8[]? payload = null;
+            if (header.payload_length > 0) {
+                payload = new uint8[header.payload_length];
+                yield _stream.read_all_async(
+                    payload, 
+                    priority, 
+                    cancellable, 
+                    out bytes_read
+                );
+                
+                if (bytes_read < header.payload_length) {
+                    throw new ProtocolError.INVALID_MESSAGE(
+                        "Incomplete payload: expected %u bytes, got %zu".printf(
+                            header.payload_length, bytes_read
+                        )
+                    );
+                }
+            }
+            
+            // Create message based on type
+            var message = MessageFactory.create_message(header.message_type);
+            message.header = header;
+            
+            // Deserialize payload if present
+            if (payload != null && payload.length > 0) {
+                message.deserialize_payload((!) payload);
+            }
+            
+            return message;
+            
+        } catch (IOError e) {
+            if (e is IOError.CLOSED) {
+                _closed = true;
+                return null;
+            }
+            throw new ProtocolError.IO_ERROR("Async read error: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Reads a response message from the stream.
+     * 
+     * This is a convenience method that ensures the message read
+     * is a response type.
+     * 
+     * @return The response message
+     * @throws ProtocolError if reading fails or the message is not a response
+     */
+    public Message read_response() throws ProtocolError {
+        var message = read_message();
+        if (message == null) {
+            throw new ProtocolError.CONNECTION_CLOSED("Connection closed while waiting for response");
+        }
+        
+        if (!((!) message).message_type.is_response() && ((!) message).message_type != MessageType.WELCOME) {
+            throw new ProtocolError.INVALID_MESSAGE(
+                "Expected response message, got 0x%02X".printf((int) ((!) message).message_type)
+            );
+        }
+        
+        return (!) message;
+    }
+    
+    /**
+     * Reads a response message asynchronously.
+     * 
+     * @param priority The I/O priority
+     * @param cancellable Optional cancellation token
+     * @return The response message
+     * @throws ProtocolError if reading fails or the message is not a response
+     */
+    public async Message read_response_async(
+        int priority = GLib.Priority.DEFAULT,
+        Cancellable? cancellable = null
+    ) throws ProtocolError {
+        var message = yield read_message_async(priority, cancellable);
+        if (message == null) {
+            throw new ProtocolError.CONNECTION_CLOSED("Connection closed while waiting for response");
+        }
+        
+        if (!((!) message).message_type.is_response() && ((!) message).message_type != MessageType.WELCOME) {
+            throw new ProtocolError.INVALID_MESSAGE(
+                "Expected response message, got 0x%02X".printf((int) ((!) message).message_type)
+            );
+        }
+        
+        return (!) message;
+    }
+    
+    /**
+     * Reads a response and matches it to a request ID.
+     * 
+     * @param expected_request_id The expected request ID
+     * @return The response message with matching request ID
+     * @throws ProtocolError if reading fails or request ID doesn't match
+     */
+    public Message read_response_for_request(uint16 expected_request_id) throws ProtocolError {
+        var message = read_response();
+        
+        if (message.request_id != expected_request_id) {
+            throw new ProtocolError.INVALID_MESSAGE(
+                "Response request ID mismatch: expected %u, got %u".printf(
+                    expected_request_id, message.request_id
+                )
+            );
+        }
+        
+        return message;
+    }
+    
+    /**
+     * Closes the reader.
+     * 
+     * After closing, all read operations will throw CONNECTION_CLOSED.
+     */
+    public void close() {
+        _closed = true;
+    }
+    
+    /**
+     * Whether the reader is closed.
+     */
+    public bool is_closed {
+        get { return _closed; }
+    }
+}
+
+} // namespace Implexus.Protocol

+ 276 - 0
src/Protocol/MessageWriter.vala

@@ -0,0 +1,276 @@
+/**
+ * MessageWriter - Message writing to output stream
+ * 
+ * Provides functionality for writing protocol messages to
+ * an OutputStream with framing and length prefixes.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Protocol {
+
+/**
+ * Writes protocol messages to an OutputStream.
+ * 
+ * Handles the binary message format including:
+ * - Header serialization
+ * - Payload writing
+ * - Request ID assignment
+ * 
+ * Example usage:
+ * {{{
+ * var writer = new MessageWriter(output_stream);
+ * try {
+ *     var request = new GetEntityRequest.for_path(path);
+ *     var request_id = writer.write_request(request);
+ *     // wait for response with matching request_id
+ * } catch (ProtocolError e) {
+ *     warning("Failed to write message: %s", e.message);
+ * }
+ * }}}
+ */
+public class MessageWriter : Object {
+    
+    /**
+     * The underlying output stream.
+     */
+    private OutputStream _stream;
+    
+    /**
+     * The next request ID to assign.
+     */
+    private uint16 _next_request_id;
+    
+    /**
+     * Whether the writer is closed.
+     */
+    private bool _closed = false;
+    
+    /**
+     * Creates a new MessageWriter for the given stream.
+     * 
+     * @param stream The output stream to write to
+     */
+    public MessageWriter(OutputStream stream) {
+        _stream = stream;
+        _next_request_id = 1;
+    }
+    
+    /**
+     * Writes a message to the stream.
+     * 
+     * @param message The message to write
+     * @throws ProtocolError if writing fails
+     */
+    public void write_message(Message message) throws ProtocolError {
+        if (_closed) {
+            throw new ProtocolError.CONNECTION_CLOSED("Writer is closed");
+        }
+        
+        try {
+            // Serialize the complete message
+            var data = message.serialize();
+            
+            // Write to stream
+            size_t bytes_written = 0;
+            _stream.write_all(data, out bytes_written);
+            _stream.flush();
+            
+        } catch (IOError e) {
+            if (e is IOError.CLOSED) {
+                _closed = true;
+            }
+            throw new ProtocolError.IO_ERROR("Write error: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Writes a message asynchronously.
+     * 
+     * @param message The message to write
+     * @param priority The I/O priority
+     * @param cancellable Optional cancellation token
+     * @throws ProtocolError if writing fails
+     */
+    public async void write_message_async(
+        Message message,
+        int priority = GLib.Priority.DEFAULT,
+        Cancellable? cancellable = null
+    ) throws ProtocolError {
+        if (_closed) {
+            throw new ProtocolError.CONNECTION_CLOSED("Writer is closed");
+        }
+        
+        try {
+            // Serialize the complete message
+            var data = message.serialize();
+            
+            // Write to stream asynchronously
+            size_t bytes_written = 0;
+            yield _stream.write_all_async(data, priority, cancellable, out bytes_written);
+            yield _stream.flush_async(priority, cancellable);
+            
+        } catch (IOError e) {
+            if (e is IOError.CLOSED) {
+                _closed = true;
+            }
+            throw new ProtocolError.IO_ERROR("Async write error: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Writes a request message and assigns a request ID.
+     * 
+     * This method automatically assigns a unique request ID to
+     * the message before writing it.
+     * 
+     * @param request The request message to write
+     * @return The assigned request ID
+     * @throws ProtocolError if writing fails
+     */
+    public uint16 write_request(Message request) throws ProtocolError {
+        var request_id = _next_request_id++;
+        request.request_id = request_id;
+        write_message(request);
+        return request_id;
+    }
+    
+    /**
+     * Writes a request message asynchronously.
+     * 
+     * @param request The request message to write
+     * @param priority The I/O priority
+     * @param cancellable Optional cancellation token
+     * @return The assigned request ID
+     * @throws ProtocolError if writing fails
+     */
+    public async uint16 write_request_async(
+        Message request,
+        int priority = GLib.Priority.DEFAULT,
+        Cancellable? cancellable = null
+    ) throws ProtocolError {
+        var request_id = _next_request_id++;
+        request.request_id = request_id;
+        yield write_message_async(request, priority, cancellable);
+        return request_id;
+    }
+    
+    /**
+     * Writes a response message with a specific request ID.
+     * 
+     * @param response The response message to write
+     * @param request_id The request ID to match
+     * @throws ProtocolError if writing fails
+     */
+    public void write_response(Message response, uint16 request_id) throws ProtocolError {
+        response.request_id = request_id;
+        write_message(response);
+    }
+    
+    /**
+     * Writes a response message asynchronously.
+     * 
+     * @param response The response message to write
+     * @param request_id The request ID to match
+     * @param priority The I/O priority
+     * @param cancellable Optional cancellation token
+     * @throws ProtocolError if writing fails
+     */
+    public async void write_response_async(
+        Message response,
+        uint16 request_id,
+        int priority = GLib.Priority.DEFAULT,
+        Cancellable? cancellable = null
+    ) throws ProtocolError {
+        response.request_id = request_id;
+        yield write_message_async(response, priority, cancellable);
+    }
+    
+    /**
+     * Writes an error response.
+     * 
+     * @param request_id The request ID to respond to
+     * @param error_code The error code
+     * @param error_message The error message
+     * @throws ProtocolError if writing fails
+     */
+    public void write_error(uint16 request_id, int error_code, string error_message) throws ProtocolError {
+        var error = new ErrorResponse.with_error(error_code, error_message);
+        write_response(error, request_id);
+    }
+    
+    /**
+     * Writes a success response.
+     * 
+     * @param request_id The request ID to respond to
+     * @throws ProtocolError if writing fails
+     */
+    public void write_success(uint16 request_id) throws ProtocolError {
+        var success = new SuccessResponse();
+        write_response(success, request_id);
+    }
+    
+    /**
+     * Flushes the underlying stream.
+     * 
+     * @throws ProtocolError if flushing fails
+     */
+    public void flush() throws ProtocolError {
+        if (_closed) {
+            throw new ProtocolError.CONNECTION_CLOSED("Writer is closed");
+        }
+        
+        try {
+            _stream.flush();
+        } catch (IOError e) {
+            throw new ProtocolError.IO_ERROR("Flush error: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Flushes the underlying stream asynchronously.
+     * 
+     * @param priority The I/O priority
+     * @param cancellable Optional cancellation token
+     * @throws ProtocolError if flushing fails
+     */
+    public async void flush_async(
+        int priority = GLib.Priority.DEFAULT,
+        Cancellable? cancellable = null
+    ) throws ProtocolError {
+        if (_closed) {
+            throw new ProtocolError.CONNECTION_CLOSED("Writer is closed");
+        }
+        
+        try {
+            yield _stream.flush_async(priority, cancellable);
+        } catch (IOError e) {
+            throw new ProtocolError.IO_ERROR("Async flush error: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Closes the writer.
+     * 
+     * After closing, all write operations will throw CONNECTION_CLOSED.
+     */
+    public void close() {
+        _closed = true;
+    }
+    
+    /**
+     * Whether the writer is closed.
+     */
+    public bool is_closed {
+        get { return _closed; }
+    }
+    
+    /**
+     * The next request ID that will be assigned.
+     */
+    public uint16 next_request_id {
+        get { return _next_request_id; }
+    }
+}
+
+} // namespace Implexus.Protocol

+ 70 - 0
src/Protocol/ProtocolError.vala

@@ -0,0 +1,70 @@
+/**
+ * ProtocolError - Error domain for client/server protocol
+ * 
+ * Defines error codes for protocol-level failures in the
+ * client/server communication layer.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Protocol {
+
+/**
+ * Error domain for protocol operations.
+ * 
+ * These errors occur during message serialization, deserialization,
+ * and network communication between client and server.
+ */
+public errordomain ProtocolError {
+    /**
+     * The message format is invalid or corrupted.
+     */
+    INVALID_MESSAGE,
+    
+    /**
+     * The message type code is unknown.
+     */
+    UNKNOWN_MESSAGE_TYPE,
+    
+    /**
+     * An I/O error occurred during communication.
+     */
+    IO_ERROR,
+    
+    /**
+     * The operation timed out.
+     */
+    TIMEOUT,
+    
+    /**
+     * The connection was closed unexpectedly.
+     */
+    CONNECTION_CLOSED,
+    
+    /**
+     * Failed to connect to the server.
+     */
+    CONNECTION_FAILED,
+    
+    /**
+     * The magic bytes in the message header are invalid.
+     */
+    INVALID_MAGIC,
+    
+    /**
+     * The message payload is too large.
+     */
+    PAYLOAD_TOO_LARGE,
+    
+    /**
+     * Serialization or deserialization failed.
+     */
+    SERIALIZATION_ERROR,
+    
+    /**
+     * The server returned an error response.
+     */
+    SERVER_ERROR
+}
+
+} // namespace Implexus.Protocol

+ 457 - 0
src/Server/ClientHandler.vala

@@ -0,0 +1,457 @@
+/**
+ * ClientHandler - Client connection handler for Implexus server
+ * 
+ * Handles individual client connections, processing requests
+ * and generating responses.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Server {
+
+/**
+ * Handles client connections and processes requests.
+ * 
+ * Each ClientHandler instance manages a single client connection,
+ * reading requests, dispatching them to the engine, and sending
+ * responses back to the client.
+ * 
+ * Example usage (internal to Server):
+ * {{{
+ * var handler = new ClientHandler(engine, reader, writer);
+ * var response = yield handler.process_request_async(request);
+ * writer.write_message(response);
+ * }}}
+ */
+public class ClientHandler : Object {
+    
+    // === Private Fields ===
+    
+    /**
+     * The engine that handles requests.
+     */
+    private Core.Engine _engine;
+    
+    /**
+     * The message reader for this client.
+     */
+    private Protocol.MessageReader _reader;
+    
+    /**
+     * The message writer for this client.
+     */
+    private Protocol.MessageWriter _writer;
+    
+    /**
+     * Whether the handler is connected.
+     */
+    private bool _connected = true;
+    
+    /**
+     * The current transaction (if any).
+     */
+    private Core.Transaction? _current_transaction = null;
+    
+    // === Properties ===
+    
+    /**
+     * Whether the handler is connected.
+     */
+    public bool is_connected {
+        get { return _connected; }
+    }
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new ClientHandler.
+     * 
+     * @param engine The engine to handle requests
+     * @param reader The message reader
+     * @param writer The message writer
+     */
+    public ClientHandler(
+        Core.Engine engine,
+        Protocol.MessageReader reader,
+        Protocol.MessageWriter writer
+    ) {
+        _engine = engine;
+        _reader = reader;
+        _writer = writer;
+    }
+    
+    // === Public Methods ===
+    
+    /**
+     * Processes a request message and returns a response.
+     * 
+     * @param request The request message to process
+     * @return The response message
+     */
+    public async Protocol.Message process_request_async(Protocol.Message request) {
+        try {
+            switch (request.message_type) {
+                // Entity operations
+                case Protocol.MessageType.GET_ENTITY:
+                    return yield handle_get_entity((Protocol.GetEntityRequest) request);
+                
+                case Protocol.MessageType.ENTITY_EXISTS:
+                    return yield handle_entity_exists((Protocol.EntityExistsRequest) request);
+                
+                case Protocol.MessageType.CREATE_CONTAINER:
+                    return yield handle_create_container((Protocol.CreateContainerRequest) request);
+                
+                case Protocol.MessageType.CREATE_DOCUMENT:
+                    return yield handle_create_document((Protocol.CreateDocumentRequest) request);
+                
+                case Protocol.MessageType.DELETE_ENTITY:
+                    return yield handle_delete_entity((Protocol.DeleteEntityRequest) request);
+                
+                // Property operations
+                case Protocol.MessageType.SET_PROPERTY:
+                    return yield handle_set_property((Protocol.SetPropertyRequest) request);
+                
+                case Protocol.MessageType.GET_PROPERTY:
+                    return yield handle_get_property((Protocol.GetPropertyRequest) request);
+                
+                // Children operations
+                case Protocol.MessageType.GET_CHILDREN:
+                    return yield handle_get_children((Protocol.GetChildrenRequest) request);
+                
+                // Query operations
+                case Protocol.MessageType.QUERY_BY_TYPE:
+                    return yield handle_query_by_type((Protocol.QueryByTypeRequest) request);
+                
+                // Transaction operations
+                case Protocol.MessageType.BEGIN_TRANSACTION:
+                    return yield handle_begin_transaction(request);
+                
+                case Protocol.MessageType.COMMIT_TRANSACTION:
+                    return yield handle_commit_transaction(request);
+                
+                case Protocol.MessageType.ROLLBACK_TRANSACTION:
+                    return yield handle_rollback_transaction(request);
+                
+                default:
+                    return create_error_response(
+                        request.request_id,
+                        9, // PROTOCOL_ERROR
+                        "Unknown request type: 0x%02X".printf((int) request.message_type)
+                    );
+            }
+        } catch (Core.EngineError e) {
+            return create_error_response(request.request_id, (int) e.code, e.message);
+        } catch (Error e) {
+            return create_error_response(request.request_id, -1, e.message);
+        }
+    }
+    
+    /**
+     * Disconnects the client handler.
+     */
+    public async void disconnect_async() {
+        _connected = false;
+        _reader.close();
+        _writer.close();
+        
+        // Rollback any pending transaction
+        if (_current_transaction != null && ((!) _current_transaction).active) {
+            try {
+                yield ((!) _current_transaction).rollback_async();
+            } catch (Error e) {
+                // Ignore rollback errors
+            }
+        }
+    }
+    
+    // === Private Request Handlers ===
+    
+    /**
+     * Handles GET_ENTITY request.
+     */
+    private async Protocol.Message handle_get_entity(Protocol.GetEntityRequest request) throws Core.EngineError, Core.EntityError {
+        var entity = yield _engine.get_entity_async(request.path);
+        
+        if (entity == null) {
+            return create_error_response(
+                request.request_id,
+                0, // ENTITY_NOT_FOUND
+                "Entity not found: %s".printf(request.path.to_string())
+            );
+        }
+        
+        var response = new Protocol.EntityResponse();
+        response.request_id = request.request_id;
+        response.entity_data = yield entity_to_data_async((!) entity);
+        
+        return response;
+    }
+    
+    /**
+     * Handles ENTITY_EXISTS request.
+     */
+    private async Protocol.Message handle_entity_exists(Protocol.EntityExistsRequest request) throws Core.EngineError {
+        var response = new Protocol.BooleanResponse();
+        response.request_id = request.request_id;
+        response.value = yield _engine.entity_exists_async(request.path);
+        return response;
+    }
+    
+    /**
+     * Handles CREATE_CONTAINER request.
+     */
+    private async Protocol.Message handle_create_container(Protocol.CreateContainerRequest request) throws Core.EngineError, Core.EntityError {
+        // Cannot create root container
+        if (request.path.is_root) {
+            throw new Core.EngineError.INVALID_PATH("Cannot create root container");
+        }
+        
+        var parent_path = request.path.parent;
+        var parent = yield _engine.get_entity_async(parent_path);
+        if (parent == null) {
+            throw new Core.EngineError.ENTITY_NOT_FOUND(
+                "Parent not found: %s".printf(parent_path.to_string())
+            );
+        }
+        
+        // Create the container
+        var container = yield ((!) parent).create_container_async(request.path.name);
+        
+        var response = new Protocol.EntityResponse();
+        response.request_id = request.request_id;
+        response.entity_data = yield entity_to_data_async((!) container);
+        
+        return response;
+    }
+    
+    /**
+     * Handles CREATE_DOCUMENT request.
+     */
+    private async Protocol.Message handle_create_document(Protocol.CreateDocumentRequest request) throws Core.EngineError, Core.EntityError {
+        // Cannot create document at root
+        if (request.path.is_root) {
+            throw new Core.EngineError.INVALID_PATH("Cannot create document at root");
+        }
+        
+        var parent_path = request.path.parent;
+        var parent = yield _engine.get_entity_async(parent_path);
+        if (parent == null) {
+            throw new Core.EngineError.ENTITY_NOT_FOUND(
+                "Parent not found: %s".printf(parent_path.to_string())
+            );
+        }
+        
+        // Create the document
+        var document = yield ((!) parent).create_document_async(request.path.name, request.type_label);
+        
+        var response = new Protocol.EntityResponse();
+        response.request_id = request.request_id;
+        response.entity_data = yield entity_to_data_async((!) document);
+        
+        return response;
+    }
+    
+    /**
+     * Handles DELETE_ENTITY request.
+     */
+    private async Protocol.Message handle_delete_entity(Protocol.DeleteEntityRequest request) throws Core.EngineError, Core.EntityError {
+        var entity = yield _engine.get_entity_async(request.path);
+        
+        if (entity == null) {
+            // Entity doesn't exist - consider it a success
+            var response = new Protocol.SuccessResponse();
+            response.request_id = request.request_id;
+            return response;
+        }
+        
+        yield ((!) entity).delete_async();
+        
+        var response = new Protocol.SuccessResponse();
+        response.request_id = request.request_id;
+        return response;
+    }
+    
+    /**
+     * Handles SET_PROPERTY request.
+     */
+    private async Protocol.Message handle_set_property(Protocol.SetPropertyRequest request) throws Core.EngineError, Core.EntityError {
+        var entity = yield _engine.get_entity_async(request.path);
+        
+        if (entity == null) {
+            throw new Core.EngineError.ENTITY_NOT_FOUND(
+                "Entity not found: %s".printf(request.path.to_string())
+            );
+        }
+        
+        if (request.value != null) {
+            yield ((!) entity).set_entity_property_async(request.property_name, (!) request.value);
+        } else {
+            yield ((!) entity).remove_property_async(request.property_name);
+        }
+        
+        var response = new Protocol.SuccessResponse();
+        response.request_id = request.request_id;
+        return response;
+    }
+    
+    /**
+     * Handles GET_PROPERTY request.
+     */
+    private async Protocol.Message handle_get_property(Protocol.GetPropertyRequest request) throws Core.EngineError, Core.EntityError {
+        var entity = yield _engine.get_entity_async(request.path);
+        
+        if (entity == null) {
+            throw new Core.EngineError.ENTITY_NOT_FOUND(
+                "Entity not found: %s".printf(request.path.to_string())
+            );
+        }
+        
+        var value = yield ((!) entity).get_entity_property_async(request.property_name);
+        
+        var response = new Protocol.PropertyResponse();
+        response.request_id = request.request_id;
+        response.value = value;
+        
+        return response;
+    }
+    
+    /**
+     * Handles GET_CHILDREN request.
+     */
+    private async Protocol.Message handle_get_children(Protocol.GetChildrenRequest request) throws Core.EngineError, Core.EntityError {
+        var entity = yield _engine.get_entity_async(request.path);
+        
+        if (entity == null) {
+            throw new Core.EngineError.ENTITY_NOT_FOUND(
+                "Entity not found: %s".printf(request.path.to_string())
+            );
+        }
+        
+        var response = new Protocol.ChildrenResponse();
+        response.request_id = request.request_id;
+        
+        var children = yield ((!) entity).get_children_async();
+        foreach (var child in children) {
+            response.children.add(yield entity_to_data_async(child));
+        }
+        
+        return response;
+    }
+    
+    /**
+     * Handles QUERY_BY_TYPE request.
+     */
+    private async Protocol.Message handle_query_by_type(Protocol.QueryByTypeRequest request) throws Core.EngineError {
+        var results = yield _engine.query_by_type_async(request.type_label);
+        
+        var response = new Protocol.QueryResponse();
+        response.request_id = request.request_id;
+        
+        foreach (var entity in results) {
+            response.paths.add(entity.path.to_string());
+        }
+        
+        return response;
+    }
+    
+    /**
+     * Handles BEGIN_TRANSACTION request.
+     */
+    private async Protocol.Message handle_begin_transaction(Protocol.Message request) throws Core.EngineError {
+        if (_current_transaction != null && ((!) _current_transaction).active) {
+            throw new Core.EngineError.TRANSACTION_ERROR("A transaction is already active");
+        }
+        
+        _current_transaction = yield _engine.begin_transaction_async();
+        
+        var response = new Protocol.SuccessResponse();
+        response.request_id = request.request_id;
+        return response;
+    }
+    
+    /**
+     * Handles COMMIT_TRANSACTION request.
+     */
+    private async Protocol.Message handle_commit_transaction(Protocol.Message request) throws Core.EngineError {
+        if (_current_transaction == null || !((!) _current_transaction).active) {
+            throw new Core.EngineError.TRANSACTION_ERROR("No active transaction");
+        }
+        
+        yield ((!) _current_transaction).commit_async();
+        _current_transaction = null;
+        
+        var response = new Protocol.SuccessResponse();
+        response.request_id = request.request_id;
+        return response;
+    }
+    
+    /**
+     * Handles ROLLBACK_TRANSACTION request.
+     */
+    private async Protocol.Message handle_rollback_transaction(Protocol.Message request) throws Core.EngineError {
+        if (_current_transaction == null || !((!) _current_transaction).active) {
+            throw new Core.EngineError.TRANSACTION_ERROR("No active transaction");
+        }
+        
+        yield ((!) _current_transaction).rollback_async();
+        _current_transaction = null;
+        
+        var response = new Protocol.SuccessResponse();
+        response.request_id = request.request_id;
+        return response;
+    }
+    
+    // === Helper Methods ===
+    
+    /**
+     * Converts an entity to EntityData for serialization.
+     *
+     * This is async because it may need to fetch properties for documents.
+     *
+     * @param entity The entity to convert
+     * @return The entity data
+     */
+    private async Protocol.EntityData entity_to_data_async(Core.Entity entity) throws Core.EntityError {
+        var data = new Protocol.EntityData();
+        data.entity_type = entity.entity_type;
+        data.path = entity.path;
+        data.type_label = entity.type_label;
+        data.expression = entity.configured_expression;
+        
+        // For documents, include properties
+        if (entity.entity_type == Core.EntityType.DOCUMENT) {
+            var props = new Invercargill.DataStructures.PropertyDictionary();
+            var entity_props = yield entity.get_properties_async();
+            foreach (var kvp in entity_props) {
+                try {
+                    props.set(kvp.key, kvp.value);
+                } catch (Invercargill.IndexError e) {
+                    // Skip properties that fail to set
+                }
+            }
+            data.properties = props;
+        }
+        
+        return data;
+    }
+    
+    /**
+     * Creates an error response.
+     * 
+     * @param request_id The request ID
+     * @param error_code The error code
+     * @param error_message The error message
+     * @return The error response
+     */
+    private Protocol.ErrorResponse create_error_response(
+        uint16 request_id,
+        int error_code,
+        string error_message
+    ) {
+        var response = new Protocol.ErrorResponse.with_error(error_code, error_message);
+        response.request_id = request_id;
+        return response;
+    }
+}
+
+} // namespace Implexus.Server

+ 419 - 0
src/Server/Server.vala

@@ -0,0 +1,419 @@
+/**
+ * Server - TCP server implementation for Implexus daemon
+ * 
+ * Provides a TCP server that accepts client connections and
+ * dispatches commands to an embedded engine.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Server {
+
+/**
+ * Error domain for server operations.
+ */
+public errordomain ServerError {
+    /**
+     * Failed to start the server.
+     */
+    STARTUP_FAILED,
+    
+    /**
+     * Failed to stop the server.
+     */
+    SHUTDOWN_FAILED,
+    
+    /**
+     * Invalid configuration.
+     */
+    INVALID_CONFIGURATION,
+    
+    /**
+     * Maximum connections reached.
+     */
+    MAX_CONNECTIONS_REACHED
+}
+
+/**
+ * Server configuration options.
+ */
+public class ServerConfiguration : Object {
+    
+    /**
+     * The port to listen on.
+     */
+    public uint16 port { get; set; default = 9090; }
+    
+    /**
+     * Maximum number of concurrent connections.
+     */
+    public int max_connections { get; set; default = 100; }
+    
+    /**
+     * Connection timeout in seconds (0 = no timeout).
+     */
+    public int timeout_seconds { get; set; default = 30; }
+    
+    /**
+     * Whether to enable TLS.
+     */
+    public bool enable_tls { get; set; default = false; }
+    
+    /**
+     * Path to TLS certificate file.
+     */
+    public string? tls_cert_path { get; set; default = null; }
+    
+    /**
+     * Path to TLS key file.
+     */
+    public string? tls_key_path { get; set; default = null; }
+    
+    /**
+     * Creates a new ServerConfiguration with default values.
+     */
+    public ServerConfiguration() {
+    }
+    
+    /**
+     * Creates a new ServerConfiguration for the specified port.
+     * 
+     * @param port The port to listen on
+     */
+    public ServerConfiguration.with_port(uint16 port) {
+        this.port = port;
+    }
+}
+
+/**
+ * TCP server for Implexus daemon.
+ * 
+ * The Server listens for client connections and dispatches
+ * commands to an embedded engine. It handles multiple concurrent
+ * clients using threads.
+ * 
+ * Example usage:
+ * {{{
+ * var storage = new Storage.BasicStorage.with_directory("/path/to/data");
+ * var config = new Core.StorageConfiguration(storage);
+ * var engine = new EmbeddedEngine(config);
+ * 
+ * var server = new Server(engine);
+ * server.client_connected.connect(() => print("Client connected\n"));
+ * server.start(9090);
+ * 
+ * // Run until interrupted
+ * var loop = new MainLoop();
+ * loop.run();
+ * 
+ * server.stop();
+ * }}}
+ */
+public class Server : Object {
+    
+    // === Private Fields ===
+    
+    /**
+     * The embedded engine that handles requests.
+     */
+    private Core.Engine _engine;
+    
+    /**
+     * The socket service for accepting connections.
+     */
+    private SocketService? _socket_service;
+    
+    /**
+     * The server configuration.
+     */
+    private ServerConfiguration _configuration;
+    
+    /**
+     * Whether the server is running.
+     */
+    private bool _running = false;
+    
+    /**
+     * Active client handlers.
+     */
+    private Invercargill.DataStructures.Vector<ClientHandler> _clients;
+    
+    /**
+     * Number of active connections.
+     */
+    private int _connection_count = 0;
+    
+    /**
+     * Lock for connection count.
+     */
+    private Mutex _connection_lock = Mutex();
+    
+    // === Signals ===
+    
+    /**
+     * Signal emitted when a client connects.
+     */
+    public signal void client_connected();
+    
+    /**
+     * Signal emitted when a client disconnects.
+     */
+    public signal void client_disconnected();
+    
+    /**
+     * Signal emitted when an error occurs.
+     */
+    public signal void error_occurred(string message);
+    
+    /**
+     * Signal emitted when the server starts.
+     */
+    public signal void started();
+    
+    /**
+     * Signal emitted when the server stops.
+     */
+    public signal void stopped();
+    
+    // === Properties ===
+    
+    /**
+     * Whether the server is running.
+     */
+    public bool is_running {
+        get { return _running; }
+    }
+    
+    /**
+     * The server configuration.
+     */
+    public ServerConfiguration configuration {
+        get { return _configuration; }
+    }
+    
+    /**
+     * The number of active connections.
+     */
+    public int connection_count {
+        get { return _connection_count; }
+    }
+    
+    /**
+     * The port the server is listening on.
+     */
+    public uint16 port {
+        get { return _configuration.port; }
+    }
+    
+    // === Constructors ===
+    
+    /**
+     * Creates a new Server with the given engine.
+     * 
+     * @param engine The engine to handle requests
+     */
+    public Server(Core.Engine engine) {
+        _engine = engine;
+        _configuration = new ServerConfiguration();
+        _clients = new Invercargill.DataStructures.Vector<ClientHandler>();
+        _connection_lock = new Mutex();
+    }
+    
+    /**
+     * Creates a new Server with the given engine and configuration.
+     * 
+     * @param engine The engine to handle requests
+     * @param config The server configuration
+     */
+    public Server.with_configuration(Core.Engine engine, ServerConfiguration config) {
+        _engine = engine;
+        _configuration = config;
+        _clients = new Invercargill.DataStructures.Vector<ClientHandler>();
+        _connection_lock = new Mutex();
+    }
+    
+    // === Public Methods ===
+    
+    /**
+     * Starts the server.
+     * 
+     * @throws ServerError if the server fails to start
+     */
+    public void start() throws ServerError {
+        if (_running) {
+            return;
+        }
+        
+        try {
+            _socket_service = new SocketService();
+            
+            // Bind to port
+            _socket_service.add_inet_port(_configuration.port, null);
+            
+            // Connect incoming signal
+            _socket_service.incoming.connect(on_incoming);
+            
+            // Start the service
+            _socket_service.start();
+            _running = true;
+            
+            started();
+            
+            print("Implexus server listening on port %d\n", _configuration.port);
+            
+        } catch (Error e) {
+            throw new ServerError.STARTUP_FAILED(
+                "Failed to start server on port %d: %s".printf(_configuration.port, e.message)
+            );
+        }
+    }
+    
+    /**
+     * Stops the server.
+     *
+     * This method gracefully disconnects all clients and stops
+     * accepting new connections.
+     */
+    public void stop() {
+        if (!_running) {
+            return;
+        }
+        
+        _running = false;
+        
+        // Close all client connections synchronously
+        // (using async wrapper since we're in a sync context)
+        foreach (var client in _clients) {
+            // Sync wrapper for disconnect_async
+            var loop = new MainLoop();
+            client.disconnect_async.begin((obj, res) => {
+                client.disconnect_async.end(res);
+                loop.quit();
+            });
+            loop.run();
+        }
+        _clients.clear();
+        
+        // Stop the socket service
+        if (_socket_service != null) {
+            _socket_service.stop();
+            _socket_service.close();
+            _socket_service = null;
+        }
+        
+        stopped();
+        
+        print("Implexus server stopped\n");
+    }
+    
+    /**
+     * Waits for the server to stop.
+     *
+     * This method blocks until stop() is called by another thread.
+     */
+    public void wait_for_stop() {
+        while (_running) {
+            Thread.usleep(100000); // Sleep 100ms
+        }
+    }
+    
+    // === Private Methods ===
+    
+    /**
+     * Handles an incoming connection.
+     * 
+     * @param connection The socket connection
+     * @param source_object The source object (unused)
+     * @return true to keep the service running
+     */
+    private bool on_incoming(SocketConnection connection, Object? source_object) {
+        // Check max connections
+        _connection_lock.lock();
+        if (_connection_count >= _configuration.max_connections) {
+            _connection_lock.unlock();
+            error_occurred("Maximum connections reached, rejecting new connection");
+            try {
+                connection.close();
+            } catch (Error e) {
+                // Ignore close errors
+            }
+            return true;
+        }
+        _connection_count++;
+        _connection_lock.unlock();
+        
+        client_connected();
+        
+        // Handle in background thread with its own MainContext
+        new Thread<void*>("client-handler", () => {
+            handle_client_async.begin(connection);
+            return null;
+        });
+        
+        return true;
+    }
+    
+    /**
+     * Handles a client connection asynchronously.
+     * 
+     * @param connection The socket connection
+     */
+    private async void handle_client_async(SocketConnection connection) {
+        var input = connection.get_input_stream();
+        var output = connection.get_output_stream();
+        
+        var reader = new Protocol.MessageReader(input);
+        var writer = new Protocol.MessageWriter(output);
+        
+        // Create client handler
+        var handler = new ClientHandler(_engine, reader, writer);
+        _clients.add(handler);
+        
+        try {
+            // Send welcome message
+            var welcome = new Protocol.WelcomeMessage();
+            writer.write_message(welcome);
+            
+            // Process requests
+            while (_running) {
+                var message = reader.read_message();
+                if (message == null) {
+                    // Connection closed
+                    break;
+                }
+                
+                // Process the request asynchronously
+                var response = yield handler.process_request_async((!) message);
+                
+                // Send response
+                writer.write_message(response);
+            }
+            
+        } catch (Protocol.ProtocolError e) {
+            if (_running) {
+                error_occurred("Protocol error: %s".printf(e.message));
+            }
+        } catch (Error e) {
+            if (_running) {
+                error_occurred("Connection error: %s".printf(e.message));
+            }
+        }
+        
+        // Cleanup
+        _clients.remove(handler);
+        
+        _connection_lock.lock();
+        _connection_count--;
+        _connection_lock.unlock();
+        
+        client_disconnected();
+        
+        try {
+            connection.close();
+        } catch (Error e) {
+            // Ignore close errors
+        }
+    }
+}
+
+} // namespace Implexus.Server

+ 243 - 0
src/Storage/AsyncDbmQueue.vala

@@ -0,0 +1,243 @@
+/**
+ * AsyncDbmQueue - Queue system for async DBM operations.
+ *
+ * For DBMs without concurrent read support:
+ * - All operations go through a single worker thread
+ * - Read operations are prioritized over writes
+ *
+ * For DBMs with concurrent read support (LMDB):
+ * - Write operations go through the worker thread
+ * - Read operations spawn their own threads
+ */
+namespace Implexus.Storage {
+
+/**
+ * Queue system for async DBM operations.
+ *
+ * This class manages asynchronous execution of database operations,
+ * handling both concurrent-read-capable and single-threaded DBM backends.
+ */
+public class AsyncDbmQueue : GLib.Object {
+    private weak Dbm _dbm;
+    private AsyncQueue<DbmOperation> _read_queue;
+    private AsyncQueue<DbmOperation> _write_queue;
+    private Thread<void>? _worker_thread = null;
+    private bool _running = false;
+    private Mutex _shutdown_mutex;
+    private Cond _shutdown_cond;
+    
+    /**
+     * Creates a new AsyncDbmQueue for the given DBM.
+     *
+     * @param dbm The DBM instance to wrap
+     */
+    public AsyncDbmQueue(Dbm dbm) {
+        _dbm = dbm;
+        _read_queue = new AsyncQueue<DbmOperation>();
+        _write_queue = new AsyncQueue<DbmOperation>();
+        _shutdown_mutex = Mutex();
+        _shutdown_cond = Cond();
+    }
+    
+    /**
+     * Starts the worker thread.
+     * Must be called before executing any operations.
+     */
+    public void start() {
+        if (_running) {
+            return;
+        }
+        
+        _running = true;
+        
+        try {
+            _worker_thread = new Thread<void>.try("dbm-worker", _worker);
+        } catch (ThreadError e) {
+            warning("Failed to create worker thread: %s", e.message);
+            _running = false;
+        }
+    }
+    
+    /**
+     * Stops the worker thread and waits for it to finish.
+     * 
+     * @param timeout_ms Maximum time to wait for shutdown (0 = wait forever)
+     * @return true if shutdown completed, false if timed out
+     */
+    public bool shutdown(int timeout_ms = 5000) {
+        if (!_running) {
+            return true;
+        }
+        
+        _running = false;
+        
+        // Push a dummy operation to wake up the worker
+        var dummy = new DbmOperation(DbmOperationType.READ, () => { return false; });
+        dummy.completed = true;
+        _read_queue.push(dummy);
+        
+        // Wait for worker to finish
+        _shutdown_mutex.lock();
+        int64 end_time = get_monotonic_time() + (timeout_ms * 1000);
+        
+        while (_worker_thread != null) {
+            if (timeout_ms > 0) {
+                if (!_shutdown_cond.wait_until(_shutdown_mutex, end_time)) {
+                    _shutdown_mutex.unlock();
+                    return false; // Timed out
+                }
+            } else {
+                _shutdown_cond.wait(_shutdown_mutex);
+            }
+        }
+        _shutdown_mutex.unlock();
+        
+        return true;
+    }
+    
+    /**
+     * Checks if the queue is currently running.
+     */
+    public bool is_running {
+        get { return _running; }
+    }
+    
+    /**
+     * Worker thread main loop.
+     * 
+     * Processes operations from the queue, prioritizing reads over writes.
+     */
+    private void _worker() {
+        while (_running) {
+            // Try to get a read operation first (prioritize reads)
+            DbmOperation? op = _read_queue.try_pop();
+            
+            // If no read operation, try write
+            if (op == null) {
+                op = _write_queue.try_pop();
+            }
+            
+            // If still no operation, block on read queue with timeout
+            if (op == null) {
+                // timed_pop uses TimeVal (deprecated but required by AsyncQueue)
+                TimeVal end_time = TimeVal();
+                end_time.get_current_time();
+                end_time.add(1); // Add 1 second
+                op = _read_queue.timed_pop(ref end_time);
+                if (op == null) {
+                    continue;
+                }
+            }
+            
+            // Skip completed operations (used for shutdown signaling)
+            if (op.completed) {
+                continue;
+            }
+            
+            // Signal completion via Idle to return to main context
+            Idle.add(() => {
+                op.callback();
+                return Source.REMOVE;
+            });
+        }
+        
+        // Signal shutdown complete
+        _shutdown_mutex.lock();
+        _worker_thread = null;
+        _shutdown_cond.broadcast();
+        _shutdown_mutex.unlock();
+    }
+    
+    /**
+     * Executes a read operation asynchronously.
+     * 
+     * For concurrent-read DBMs (LMDB), spawns a new thread.
+     * For single-threaded DBMs (GDBM, Filesystem), queues and prioritizes.
+     *
+     * @param op The operation to execute
+     */
+    public async void execute_read_async(owned DbmOperation op) throws Error {
+        if (!_running) {
+            throw new StorageError.IO_ERROR("AsyncDbmQueue not started");
+        }
+        
+        if (_dbm.supports_concurrent_reads) {
+            // For concurrent-read DBMs, execute directly in new thread
+            Error? error = null;
+            
+            try {
+                new Thread<void>.try(null, () => {
+                    // The operation should be executed here
+                    // Result/error will be set by the caller
+                    Idle.add(() => {
+                        execute_read_async.callback();
+                        return Source.REMOVE;
+                    });
+                });
+            } catch (ThreadError e) {
+                throw new StorageError.IO_ERROR("Failed to create read thread: %s".printf(e.message));
+            }
+            
+            yield;
+            
+            if (error != null) {
+                throw error;
+            }
+        } else {
+            // Queue the operation for the worker thread
+            _read_queue.push((owned)op);
+            yield;
+            
+            if (op.error != null) {
+                throw op.error;
+            }
+        }
+    }
+    
+    /**
+     * Executes a write operation asynchronously.
+     * 
+     * Write operations always go through the queue to ensure
+     * serialization and data consistency.
+     *
+     * @param op The operation to execute
+     */
+    public async void execute_write_async(owned DbmOperation op) throws Error {
+        if (!_running) {
+            throw new StorageError.IO_ERROR("AsyncDbmQueue not started");
+        }
+        
+        // Queue the operation for the worker thread
+        _write_queue.push((owned)op);
+        yield;
+        
+        if (op.error != null) {
+            throw op.error;
+        }
+    }
+    
+    /**
+     * Queues a read operation and returns the operation object.
+     * 
+     * The caller should set the operation's result/error before
+     * the callback is invoked.
+     *
+     * @return The queued operation
+     */
+    public DbmOperation queue_read() {
+        var op = new DbmOperation(DbmOperationType.READ, () => { return false; });
+        return op;
+    }
+    
+    /**
+     * Queues a write operation and returns the operation object.
+     *
+     * @return The queued operation
+     */
+    public DbmOperation queue_write() {
+        var op = new DbmOperation(DbmOperationType.WRITE, () => { return false; });
+        return op;
+    }
+}
+
+} // namespace Implexus.Storage

+ 68 - 0
src/Storage/Dbm.vala

@@ -0,0 +1,68 @@
+/**
+ * Dbm - Database Manager interface for key-value storage
+ * 
+ * Provides low-level key-value storage abstraction with support
+ * for transactions, iteration, and batch operations.
+ */
+namespace Implexus.Storage {
+
+/**
+ * Interface for low-level key-value storage.
+ */
+public interface Dbm : Object {
+    
+    /**
+     * Whether this DBM implementation supports concurrent read operations.
+     * 
+     * If true, read operations can spawn new threads while writes go through
+     * the async queue. LMDB returns true (MVCC), GDBM and Filesystem return false.
+     */
+    public abstract bool supports_concurrent_reads { get; }
+    
+    /**
+     * Checks if a key exists in the database.
+     */
+    public abstract bool has_key(string key);
+    
+    /**
+     * Gets the value for a key.
+     */
+    public abstract Invercargill.BinaryData? @get(string key);
+    
+    /**
+     * Sets a key-value pair.
+     */
+    public abstract void @set(string key, Invercargill.BinaryData value) throws StorageError;
+    
+    /**
+     * Deletes a key from the database.
+     */
+    public abstract void delete(string key) throws StorageError;
+    
+    /**
+     * Gets all keys in the database.
+     */
+    public abstract Invercargill.Enumerable<string> keys { owned get; }
+    
+    /**
+     * Begins a new transaction.
+     */
+    public abstract void begin_transaction() throws StorageError;
+    
+    /**
+     * Commits the current transaction.
+     */
+    public abstract void commit_transaction() throws StorageError;
+    
+    /**
+     * Rolls back the current transaction.
+     */
+    public abstract void rollback_transaction();
+    
+    /**
+     * Indicates whether a transaction is currently active.
+     */
+    public abstract bool in_transaction { get; }
+}
+
+} // namespace Implexus.Storage

+ 64 - 0
src/Storage/DbmOperation.vala

@@ -0,0 +1,64 @@
+/**
+ * DbmOperation - Represents a single operation in the AsyncDbmQueue.
+ *
+ * This class encapsulates a database operation (read or write) along with
+ * its callback for async continuation and result/error storage.
+ */
+namespace Implexus.Storage {
+
+/**
+ * Type of database operation.
+ */
+public enum DbmOperationType {
+    /** Read operation - prioritized over writes */
+    READ,
+    /** Write operation - processed in order */
+    WRITE
+}
+
+/**
+ * Represents a single operation in the AsyncDbmQueue.
+ *
+ * Stores the operation type, callback for async continuation,
+ * and provides storage for the result or error.
+ */
+public class DbmOperation : GLib.Object {
+    /**
+     * The type of this operation (READ or WRITE).
+     */
+    public DbmOperationType op_type { get; construct; }
+    
+    /**
+     * The callback to resume the async method when complete.
+     */
+    public SourceFunc callback;
+    
+    /**
+     * The result of the operation (if successful).
+     * Stored as a generic pointer since Vala generics have limitations.
+     */
+    public void* result { get; set; }
+    
+    /**
+     * The error that occurred during execution (if any).
+     */
+    public Error? error { get; set; }
+    
+    /**
+     * Whether the operation has been completed.
+     */
+    public bool completed { get; set; default = false; }
+    
+    /**
+     * Creates a new DbmOperation.
+     *
+     * @param type The type of operation (READ or WRITE)
+     * @param cb The callback to resume the async method
+     */
+    public DbmOperation(DbmOperationType type, owned SourceFunc cb) {
+        Object(op_type: type);
+        this.callback = (owned)cb;
+    }
+}
+
+} // namespace Implexus.Storage

+ 524 - 0
src/Storage/ElementSerializer.vala

@@ -0,0 +1,524 @@
+/**
+ * Element binary serialization for Implexus storage layer.
+ *
+ * Provides serialization and deserialization of Invercargill.Element
+ * values to/from binary format for persistent storage.
+ */
+
+namespace Implexus.Storage {
+
+    /**
+     * Type codes for binary element serialization.
+     */
+    internal enum ElementTypeCode {
+        NULL = 0x00,
+        BOOL = 0x01,
+        INT64 = 0x02,
+        UINT64 = 0x03,
+        DOUBLE = 0x04,
+        STRING = 0x05,
+        BINARY = 0x06,
+        ARRAY = 0x07,
+        DICTIONARY = 0x08;
+
+        /**
+         * Determines the type code from an Element's GLib.Type.
+         */
+        public static ElementTypeCode from_type(GLib.Type type) {
+            if (type == typeof(bool)) {
+                return BOOL;
+            } else if (type == typeof(int64)) {
+                return INT64;
+            } else if (type == typeof(uint64)) {
+                return UINT64;
+            } else if (type == typeof(double)) {
+                return DOUBLE;
+            } else if (type == typeof(string)) {
+                return STRING;
+            } else if (type == typeof(Invercargill.BinaryData)) {
+                return BINARY;
+            } else if (type == typeof(Invercargill.Enumerable<Invercargill.Element>)) {
+                return ARRAY;
+            } else if (type == typeof(Invercargill.Properties)) {
+                return DICTIONARY;
+            } else {
+                return NULL;
+            }
+        }
+    }
+
+    /**
+     * Writes Element values to binary format.
+     */
+    public class ElementWriter : Object {
+        private Invercargill.ByteComposition _buffer;
+
+        /**
+         * Creates a new ElementWriter.
+         */
+        public ElementWriter() {
+            _buffer = new Invercargill.ByteComposition();
+        }
+
+        /**
+         * Writes an element to the buffer.
+         *
+         * @param element The element to write
+         */
+        public void write_element(Invercargill.Element element) throws StorageError {
+            // Check for null element
+            if (element.is_null()) {
+                write_null();
+                return;
+            }
+
+            // Check the element's type and serialize accordingly
+            var type = element.type();
+            if (type == typeof(bool)) {
+                _buffer.append_uint8(ElementTypeCode.BOOL);
+                try {
+                    write_bool(element.as<bool?>());
+                } catch (Invercargill.ElementError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to get bool value: %s".printf(e.message));
+                }
+            } else if (type == typeof(int64)) {
+                _buffer.append_uint8(ElementTypeCode.INT64);
+                try {
+                    int64? val = element.as<int64?>();
+                    write_int64(val == null ? 0 : (!) val);
+                } catch (Invercargill.ElementError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to get int64 value: %s".printf(e.message));
+                }
+            } else if (type == typeof(uint64)) {
+                _buffer.append_uint8(ElementTypeCode.UINT64);
+                try {
+                    uint64? val = element.as<uint64?>();
+                    write_uint64(val == null ? 0 : (!) val);
+                } catch (Invercargill.ElementError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to get uint64 value: %s".printf(e.message));
+                }
+            } else if (type == typeof(double)) {
+                _buffer.append_uint8(ElementTypeCode.DOUBLE);
+                try {
+                    double? val = element.as<double?>();
+                    write_double(val == null ? 0.0 : (!) val);
+                } catch (Invercargill.ElementError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to get double value: %s".printf(e.message));
+                }
+            } else if (type == typeof(string)) {
+                _buffer.append_uint8(ElementTypeCode.STRING);
+                try {
+                    write_string(element.as<string>());
+                } catch (Invercargill.ElementError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to get string value: %s".printf(e.message));
+                }
+            } else if (type == typeof(Invercargill.BinaryData)) {
+                _buffer.append_uint8(ElementTypeCode.BINARY);
+                try {
+                    write_binary(element.as<Invercargill.BinaryData>());
+                } catch (Invercargill.ElementError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to get binary value: %s".printf(e.message));
+                }
+            } else if (type == typeof(Invercargill.Enumerable<Invercargill.Element>)) {
+                _buffer.append_uint8(ElementTypeCode.ARRAY);
+                try {
+                    write_array(element.as<Invercargill.Enumerable<Invercargill.Element>>());
+                } catch (Invercargill.ElementError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to get array value: %s".printf(e.message));
+                }
+            } else if (type == typeof(Invercargill.Properties)) {
+                _buffer.append_uint8(ElementTypeCode.DICTIONARY);
+                try {
+                    write_dictionary(element.as<Invercargill.Properties>());
+                } catch (Invercargill.ElementError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to get dictionary value: %s".printf(e.message));
+                }
+            } else {
+                // Unknown type - serialize as null
+                write_null();
+            }
+        }
+
+        /**
+         * Writes a null value.
+         */
+        public void write_null() {
+            _buffer.append_uint8(ElementTypeCode.NULL);
+        }
+
+        /**
+         * Writes a boolean value.
+         */
+        public void write_bool(bool value) {
+            _buffer.append_uint8(value ? (uint8) 1 : (uint8) 0);
+        }
+
+        /**
+         * Writes an int64 value (big-endian).
+         */
+        public void write_int64(int64 value) {
+            _buffer.append_uint8((uint8) ((value >> 56) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 48) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 40) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 32) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 24) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 16) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 8) & 0xFF));
+            _buffer.append_uint8((uint8) (value & 0xFF));
+        }
+
+        /**
+         * Writes a uint64 value (big-endian).
+         */
+        public void write_uint64(uint64 value) {
+            _buffer.append_uint8((uint8) ((value >> 56) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 48) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 40) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 32) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 24) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 16) & 0xFF));
+            _buffer.append_uint8((uint8) ((value >> 8) & 0xFF));
+            _buffer.append_uint8((uint8) (value & 0xFF));
+        }
+
+        /**
+         * Writes a double value.
+         */
+        public void write_double(double value) {
+            int64 bits = (int64) *(uint64*) &value;
+            write_int64(bits);
+        }
+
+        /**
+         * Writes a length value (int64).
+         */
+        public void write_length(int64 length) {
+            write_int64(length);
+        }
+
+        /**
+         * Writes a string value.
+         */
+        public void write_string(string value) {
+            int64 len = value.length;
+            write_length(len);
+            _buffer.append_string(value);
+        }
+
+        /**
+         * Writes binary data.
+         */
+        public void write_binary(Invercargill.BinaryData value) {
+            var bytes = value.to_bytes();
+            int64 len = (int64) bytes.length;
+            write_length(len);
+            _buffer.append_bytes(bytes);
+        }
+
+        /**
+         * Writes an array of elements.
+         */
+        public void write_array(Invercargill.Enumerable<Invercargill.Element> collection) {
+            uint count = collection.count();
+            write_length((int64) count);
+            foreach (var item in collection) {
+                try {
+                    write_element(item);
+                } catch (StorageError e) {
+                    // Skip invalid elements - write null
+                    write_null();
+                }
+            }
+        }
+
+        /**
+         * Writes a dictionary of elements.
+         */
+        public void write_dictionary(Invercargill.Properties props) {
+            uint count = props.count();
+            write_length((int64) count);
+            foreach (var kvp in props) {
+                write_string(kvp.key);
+                try {
+                    write_element(kvp.value);
+                } catch (StorageError e) {
+                    // Write null for invalid elements
+                    write_null();
+                }
+            }
+        }
+
+        /**
+         * Gets the written data as BinaryData.
+         *
+         * @return The binary data
+         */
+        public Invercargill.BinaryData to_binary_data() {
+            return _buffer;
+        }
+
+        /**
+         * Clears the buffer for reuse.
+         */
+        public void clear() {
+            _buffer.clear();
+        }
+    }
+
+    /**
+     * Reads Element values from binary format.
+     */
+    public class ElementReader : Object {
+        private Invercargill.BinaryData _data;
+        private Invercargill.ReadOnlyAddressable<uint8> _buffer;
+        private uint _position;
+        private uint _size;
+
+        /**
+         * Creates a new ElementReader with the given data.
+         *
+         * @param data The binary data to read
+         */
+        public ElementReader(Invercargill.BinaryData data) {
+            _data = data;
+            _buffer = data.to_byte_buffer();
+            _position = 0;
+            _size = data.count();
+        }
+
+        /**
+         * Current position in the data.
+         */
+        public uint position {
+            get { return _position; }
+        }
+
+        /**
+         * Size of the data.
+         */
+        public uint size {
+            get { return _size; }
+        }
+
+        /**
+         * Whether there is more data to read.
+         */
+        public bool has_more {
+            get { return _position < _size; }
+        }
+
+        /**
+         * Reads a single byte.
+         */
+        private uint8 read_byte() throws StorageError {
+            if (_position >= _size) {
+                throw new StorageError.CORRUPT_DATA("Unexpected end of data");
+            }
+            try {
+                var byte_val = _buffer.get(_position);
+                _position++;
+                return byte_val;
+            } catch (Invercargill.IndexError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read byte: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * Reads bytes from the data.
+         *
+         * @param length Number of bytes to read
+         * @return The bytes read
+         */
+        private uint8[] read_bytes(uint length) throws StorageError {
+            if (_position + length > _size) {
+                throw new StorageError.CORRUPT_DATA("Unexpected end of data");
+            }
+
+            var bytes = new uint8[length];
+            var slice = _data.slice(_position, _position + length);
+            var slice_bytes = slice.to_bytes();
+            Memory.copy(bytes, slice_bytes.get_data(), length);
+            _position += length;
+            return bytes;
+        }
+
+        /**
+         * Reads an element from the data.
+         *
+         * @return The element read
+         */
+        public Invercargill.Element read_element() throws StorageError {
+            if (_position >= _size) {
+                throw new StorageError.CORRUPT_DATA("Unexpected end of data");
+            }
+
+            var type_code = (ElementTypeCode) read_byte();
+
+            switch (type_code) {
+                case ElementTypeCode.NULL:
+                    return new Invercargill.NullElement();
+                case ElementTypeCode.BOOL:
+                    return new Invercargill.NativeElement<bool?>(read_bool());
+                case ElementTypeCode.INT64:
+                    return new Invercargill.NativeElement<int64?>(read_int64());
+                case ElementTypeCode.UINT64:
+                    return new Invercargill.NativeElement<uint64?>(read_uint64());
+                case ElementTypeCode.DOUBLE:
+                    return new Invercargill.NativeElement<double?>(read_double());
+                case ElementTypeCode.STRING:
+                    return new Invercargill.NativeElement<string>(read_string());
+                case ElementTypeCode.BINARY:
+                    return new Invercargill.NativeElement<Invercargill.BinaryData>(read_binary());
+                case ElementTypeCode.ARRAY:
+                    return read_array();
+                case ElementTypeCode.DICTIONARY:
+                    return read_dictionary();
+                default:
+                    throw new StorageError.CORRUPT_DATA("Unknown type code: %d".printf((int) type_code));
+            }
+        }
+
+        /**
+         * Reads a boolean value.
+         */
+        public bool read_bool() throws StorageError {
+            return read_byte() != 0;
+        }
+
+        /**
+         * Reads an int64 value (big-endian).
+         */
+        public int64 read_int64() throws StorageError {
+            if (_position + 8 > _size) {
+                throw new StorageError.CORRUPT_DATA("Unexpected end of data reading int64");
+            }
+            try {
+                int64 result = (int64) (
+                    ((int64) _buffer.get(_position) << 56) |
+                    ((int64) _buffer.get(_position + 1) << 48) |
+                    ((int64) _buffer.get(_position + 2) << 40) |
+                    ((int64) _buffer.get(_position + 3) << 32) |
+                    ((int64) _buffer.get(_position + 4) << 24) |
+                    ((int64) _buffer.get(_position + 5) << 16) |
+                    ((int64) _buffer.get(_position + 6) << 8) |
+                    ((int64) _buffer.get(_position + 7))
+                );
+                _position += 8;
+                return result;
+            } catch (Invercargill.IndexError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read int64: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * Reads a uint64 value (big-endian).
+         */
+        public uint64 read_uint64() throws StorageError {
+            if (_position + 8 > _size) {
+                throw new StorageError.CORRUPT_DATA("Unexpected end of data reading uint64");
+            }
+            try {
+                uint64 result = (uint64) (
+                    ((uint64) _buffer.get(_position) << 56) |
+                    ((uint64) _buffer.get(_position + 1) << 48) |
+                    ((uint64) _buffer.get(_position + 2) << 40) |
+                    ((uint64) _buffer.get(_position + 3) << 32) |
+                    ((uint64) _buffer.get(_position + 4) << 24) |
+                    ((uint64) _buffer.get(_position + 5) << 16) |
+                    ((uint64) _buffer.get(_position + 6) << 8) |
+                    ((uint64) _buffer.get(_position + 7))
+                );
+                _position += 8;
+                return result;
+            } catch (Invercargill.IndexError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read uint64: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * Reads a double value.
+         */
+        public double read_double() throws StorageError {
+            int64 bits = read_int64();
+            return *(double*) &bits;
+        }
+
+        /**
+         * Reads a length value.
+         */
+        public int64 read_length() throws StorageError {
+            return read_int64();
+        }
+
+        /**
+         * Reads a string value.
+         */
+        public string read_string() throws StorageError {
+            int64 len = read_length();
+            if (len < 0) {
+                throw new StorageError.CORRUPT_DATA("Negative string length");
+            }
+            var bytes = read_bytes((uint) len);
+            // Create a properly null-terminated string from the bytes
+            // We need to copy the data and add a null terminator
+            var str_builder = new StringBuilder.sized((size_t)(len + 1));
+            for (int64 i = 0; i < len; i++) {
+                str_builder.append_c((char) bytes[i]);
+            }
+            return str_builder.str;
+        }
+
+        /**
+         * Reads binary data.
+         */
+        public Invercargill.BinaryData read_binary() throws StorageError {
+            int64 len = read_length();
+            if (len < 0) {
+                throw new StorageError.CORRUPT_DATA("Negative binary length");
+            }
+            var bytes = read_bytes((uint) len);
+            return new Invercargill.DataStructures.ByteBuffer.from_byte_array(bytes);
+        }
+
+        /**
+         * Reads an array of elements.
+         */
+        public Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>> read_array() throws StorageError {
+            int64 count = read_length();
+            if (count < 0) {
+                throw new StorageError.CORRUPT_DATA("Negative array count");
+            }
+
+            var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+            for (int64 i = 0; i < count; i++) {
+                var element = read_element();
+                array.add(element);
+            }
+
+            return new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array);
+        }
+
+        /**
+         * Reads a dictionary of elements.
+         */
+        public Invercargill.NativeElement<Invercargill.Properties> read_dictionary() throws StorageError {
+            int64 count = read_length();
+            if (count < 0) {
+                throw new StorageError.CORRUPT_DATA("Negative dictionary count");
+            }
+
+            var props = new Invercargill.DataStructures.PropertyDictionary();
+            for (int64 i = 0; i < count; i++) {
+                string key = read_string();
+                var value = read_element();
+                try {
+                    props.set(key, value);
+                } catch (Invercargill.IndexError e) {
+                    throw new StorageError.CORRUPT_DATA("Failed to set dictionary key: %s".printf(e.message));
+                }
+            }
+
+            return new Invercargill.NativeElement<Invercargill.Properties>(props);
+        }
+    }
+}

+ 320 - 0
src/Storage/FilesystemDbm.vala

@@ -0,0 +1,320 @@
+/**
+ * Filesystem-based Dbm implementation.
+ *
+ * This implementation stores each key-value pair in a separate file
+ * within a directory. This provides crash recovery through atomic
+ * file operations.
+ */
+
+namespace Implexus.Storage {
+
+    /**
+     * Filesystem-based Dbm implementation.
+     *
+     * Stores each key-value pair in a separate file for crash recovery.
+     * Filenames are hex-encoded keys for safety.
+     */
+    public class FilesystemDbm : Object, Dbm {
+        private string _data_dir;
+        private bool _in_transaction_value = false;
+        private Invercargill.DataStructures.Dictionary<string, Invercargill.BinaryData> _transaction_buffer;
+        private Invercargill.DataStructures.Dictionary<string, bool> _transaction_deletes;
+
+        /**
+         * Creates a new FilesystemDbm storing data in the given directory.
+         *
+         * @param data_dir Directory to store data files in
+         */
+        public FilesystemDbm(string data_dir) {
+            _data_dir = data_dir;
+            _transaction_buffer = new Invercargill.DataStructures.Dictionary<string, Invercargill.BinaryData>();
+            _transaction_deletes = new Invercargill.DataStructures.Dictionary<string, bool>();
+
+            // Ensure directory exists
+            DirUtils.create_with_parents(data_dir, 0755);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public bool supports_concurrent_reads {
+            get { return false; }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public bool in_transaction {
+            get { return _in_transaction_value; }
+        }
+
+        /**
+         * Gets the value for a key.
+         *
+         * @param key The key to look up
+         * @return The value, or null if not found
+         */
+        public Invercargill.BinaryData? @get(string key) {
+            // Check transaction buffer first
+            if (_in_transaction_value) {
+                if (_transaction_deletes.has(key)) {
+                    return null;
+                }
+                if (_transaction_buffer.has(key)) {
+                    try {
+                        return _transaction_buffer.get(key);
+                    } catch (Invercargill.IndexError e) {
+                        return null;
+                    }
+                }
+            }
+
+            string file_path = get_file_path(key);
+            if (!FileUtils.test(file_path, FileTest.EXISTS)) {
+                return null;
+            }
+
+            try {
+                uint8[] contents;
+                FileUtils.get_data(file_path, out contents);
+                return new Invercargill.DataStructures.ByteBuffer.from_byte_array(contents);
+            } catch (FileError e) {
+                return null;
+            }
+        }
+
+        /**
+         * Sets a key-value pair.
+         *
+         * @param key The key
+         * @param value The value
+         */
+        public void @set(string key, Invercargill.BinaryData value) throws StorageError {
+            if (_in_transaction_value) {
+                try {
+                    _transaction_buffer.set(key, value);
+                } catch (Invercargill.IndexError e) {
+                    throw new StorageError.IO_ERROR("Failed to buffer transaction set: %s".printf(e.message));
+                }
+                _transaction_deletes.remove(key);
+            } else {
+                write_key(key, value);
+            }
+        }
+
+        /**
+         * Deletes a key.
+         *
+         * @param key The key to delete
+         */
+        public void delete(string key) throws StorageError {
+            if (_in_transaction_value) {
+                try {
+                    _transaction_buffer.remove(key);
+                } catch (Invercargill.IndexError e) {
+                    // Key not in buffer, that's fine
+                }
+                try {
+                    _transaction_deletes.set(key, true);
+                } catch (Invercargill.IndexError e) {
+                    throw new StorageError.IO_ERROR("Failed to buffer transaction delete: %s".printf(e.message));
+                }
+            } else {
+                string file_path = get_file_path(key);
+                if (FileUtils.test(file_path, FileTest.EXISTS)) {
+                    try {
+                        FileUtils.unlink(file_path);
+                    } catch (FileError e) {
+                        throw new StorageError.IO_ERROR("Failed to delete key: %s".printf(e.message));
+                    }
+                }
+            }
+        }
+
+        /**
+         * Checks if a key exists.
+         *
+         * @param key The key to check
+         * @return True if the key exists
+         */
+        public bool has_key(string key) {
+            if (_in_transaction_value) {
+                if (_transaction_deletes.has(key)) {
+                    return false;
+                }
+                if (_transaction_buffer.has(key)) {
+                    return true;
+                }
+            }
+
+            string file_path = get_file_path(key);
+            return FileUtils.test(file_path, FileTest.EXISTS);
+        }
+
+        /**
+         * Gets all keys in the database.
+         *
+         * @return Enumerable of all keys
+         */
+        public Invercargill.Enumerable<string> keys {
+            owned get {
+                var result = new Invercargill.DataStructures.HashSet<string>();
+
+                try {
+                    Dir dir = Dir.open(_data_dir, 0);
+                    string? name;
+                    while ((name = dir.read_name()) != null) {
+                        string key = decode_key(name);
+                        if (_in_transaction_value && _transaction_deletes.has(key)) {
+                            continue;
+                        }
+                        result.add(key);
+                    }
+                } catch (FileError e) {
+                    // Return empty set
+                }
+
+                // Add keys from transaction buffer
+                if (_in_transaction_value) {
+                    foreach (var key in _transaction_buffer.keys) {
+                        if (!result.has(key)) {
+                            result.add(key);
+                        }
+                    }
+                }
+
+                return result;
+            }
+        }
+
+        /**
+         * Begins a transaction.
+         */
+        public void begin_transaction() throws StorageError {
+            if (_in_transaction_value) {
+                throw new StorageError.IO_ERROR("Already in transaction");
+            }
+            _in_transaction_value = true;
+            _transaction_buffer.clear();
+            _transaction_deletes.clear();
+        }
+
+        /**
+         * Commits the current transaction.
+         */
+        public void commit_transaction() throws StorageError {
+            if (!_in_transaction_value) {
+                throw new StorageError.IO_ERROR("Not in transaction");
+            }
+
+            try {
+                // Apply all deletes
+                foreach (var key in _transaction_deletes.keys) {
+                    string file_path = get_file_path(key);
+                    if (FileUtils.test(file_path, FileTest.EXISTS)) {
+                        FileUtils.unlink(file_path);
+                    }
+                }
+
+                // Apply all sets
+                foreach (var key in _transaction_buffer.keys) {
+                    var value = _transaction_buffer.get(key);
+                    write_key(key, value);
+                }
+
+                _in_transaction_value = false;
+                _transaction_buffer.clear();
+                _transaction_deletes.clear();
+            } catch (Invercargill.IndexError e) {
+                throw new StorageError.IO_ERROR("Transaction commit failed: %s".printf(e.message));
+            } catch (FileError e) {
+                throw new StorageError.IO_ERROR("Transaction commit failed: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * Rolls back the current transaction.
+         */
+        public void rollback_transaction() {
+            _in_transaction_value = false;
+            _transaction_buffer.clear();
+            _transaction_deletes.clear();
+        }
+
+        /**
+         * Gets the file path for a key.
+         */
+        private string get_file_path(string key) {
+            return Path.build_filename(_data_dir, encode_key(key));
+        }
+
+        /**
+         * Encodes a key to a safe filename using hex encoding.
+         * Empty key is encoded as "__empty__" to avoid empty filename.
+         */
+        private string encode_key(string key) {
+            if (key.length == 0) {
+                return "__empty__";
+            }
+            var builder = new StringBuilder();
+            for (int i = 0; i < key.length; i++) {
+                builder.append("%02x".printf((uint8) key[i]));
+            }
+            return builder.str;
+        }
+
+        /**
+         * Decodes a key from a hex filename.
+         * "__empty__" is decoded as empty string.
+         */
+        private string decode_key(string encoded) {
+            if (encoded == "__empty__") {
+                return "";
+            }
+            var builder = new StringBuilder();
+            for (int i = 0; i < encoded.length; i += 2) {
+                string hex = encoded.substring(i, 2);
+                builder.append_c((char) hex_to_int(hex));
+            }
+            return builder.str;
+        }
+
+        /**
+         * Converts a hex string to an integer.
+         */
+        private int hex_to_int(string hex) {
+            int result = 0;
+            for (int i = 0; i < hex.length; i++) {
+                char c = hex[i];
+                result *= 16;
+                if (c >= '0' && c <= '9') {
+                    result += c - '0';
+                } else if (c >= 'a' && c <= 'f') {
+                    result += c - 'a' + 10;
+                } else if (c >= 'A' && c <= 'F') {
+                    result += c - 'A' + 10;
+                }
+            }
+            return result;
+        }
+
+        /**
+         * Writes a key-value pair to disk atomically.
+         */
+        private void write_key(string key, Invercargill.BinaryData value) throws StorageError {
+            string file_path = get_file_path(key);
+            string temp_path = file_path + ".tmp";
+
+            try {
+                // Convert BinaryData to bytes
+                var bytes = value.to_bytes();
+                FileUtils.set_data(temp_path, bytes.get_data());
+
+                // Atomic rename
+                FileUtils.rename(temp_path, file_path);
+            } catch (FileError e) {
+                throw new StorageError.IO_ERROR("Failed to write key: %s".printf(e.message));
+            }
+        }
+    }
+}

+ 337 - 0
src/Storage/Gdbm/GdbmDbm.vala

@@ -0,0 +1,337 @@
+/**
+ * GdbmDbm - GDBM-based implementation of the Dbm interface.
+ *
+ * This implementation uses the GNU dbm library for persistent
+ * key-value storage with efficient disk-based operations.
+ */
+
+namespace Implexus.Storage {
+
+    /**
+     * GDBM-based Dbm implementation.
+     *
+     * Provides persistent key-value storage using the GNU dbm library.
+     * Supports transactions through an in-memory buffer.
+     */
+    public class GdbmDbm : Object, Dbm {
+        private Gdbm.Database? _dbf = null;
+        private string _path;
+        private bool _read_only;
+        private bool _in_transaction_value = false;
+        private Invercargill.DataStructures.Dictionary<string, Invercargill.BinaryData> _transaction_buffer;
+        private Invercargill.DataStructures.Dictionary<string, bool> _transaction_deletes;
+
+        /**
+         * Creates a new GdbmDbm instance.
+         *
+         * The database is not opened until open() is called.
+         */
+        public GdbmDbm() {
+            _transaction_buffer = new Invercargill.DataStructures.Dictionary<string, Invercargill.BinaryData>();
+            _transaction_deletes = new Invercargill.DataStructures.Dictionary<string, bool>();
+        }
+
+        /**
+         * Opens the database file.
+         *
+         * @param path Path to the database file
+         * @param read_only If true, opens in read-only mode
+         * @throws StorageError if the database cannot be opened
+         */
+        public void open(string path, bool read_only = false) throws StorageError {
+            if (_dbf != null) {
+                throw new StorageError.IO_ERROR("Database already open");
+            }
+
+            _path = path;
+            _read_only = read_only;
+
+            int flags;
+            if (read_only) {
+                flags = (int) Gdbm.OpenFlag.READER;
+            } else {
+                // WRCREAT creates if not exists, opens for writing if exists
+                flags = (int) Gdbm.OpenFlag.WRCREAT;
+            }
+
+            _dbf = Gdbm.Database.open(path, 0, flags, 0644, null);
+
+            if (_dbf == null) {
+                throw new StorageError.IO_ERROR("Failed to open database: %s".printf(path));
+            }
+        }
+
+        /**
+         * Closes the database.
+         *
+         * @throws StorageError if the database cannot be closed
+         */
+        public void close() throws StorageError {
+            if (_dbf == null) {
+                throw new StorageError.IO_ERROR("Database not open");
+            }
+
+            // Rollback any pending transaction
+            if (_in_transaction_value) {
+                rollback_transaction();
+            }
+
+            // Delete the Database object, which calls gdbm_close via free_function
+            _dbf = null;
+        }
+
+        /**
+         * Indicates whether the database is currently open.
+         */
+        public bool is_open {
+            get { return _dbf != null; }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public bool supports_concurrent_reads {
+            get { return false; }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public bool in_transaction {
+            get { return _in_transaction_value; }
+        }
+
+        /**
+         * Checks if a key exists in the database.
+         *
+         * @param key The key to check
+         * @return True if the key exists
+         */
+        public bool has_key(string key) {
+            // Check transaction state first
+            if (_in_transaction_value) {
+                if (_transaction_deletes.has(key)) {
+                    return false;
+                }
+                if (_transaction_buffer.has(key)) {
+                    return true;
+                }
+            }
+
+            if (_dbf == null) {
+                return false;
+            }
+
+            return ((!) _dbf).exists(key);
+        }
+
+        /**
+         * Gets the value for a key.
+         *
+         * @param key The key to look up
+         * @return The value, or null if not found
+         */
+        public Invercargill.BinaryData? @get(string key) {
+            // Check transaction state first
+            if (_in_transaction_value) {
+                if (_transaction_deletes.has(key)) {
+                    return null;
+                }
+                if (_transaction_buffer.has(key)) {
+                    try {
+                        return _transaction_buffer.get(key);
+                    } catch (Invercargill.IndexError e) {
+                        return null;
+                    }
+                }
+            }
+
+            if (_dbf == null) {
+                return null;
+            }
+
+            GLib.Bytes? data = ((!) _dbf).fetch(key);
+            if (data == null) {
+                return null;
+            }
+
+            return new Invercargill.DataStructures.ByteBuffer.from_byte_array(((!) data).get_data());
+        }
+
+        /**
+         * Sets a key-value pair.
+         *
+         * @param key The key
+         * @param value The value
+         * @throws StorageError if the write operation fails
+         */
+        public void @set(string key, Invercargill.BinaryData value) throws StorageError {
+            if (_dbf == null) {
+                throw new StorageError.IO_ERROR("Database not open");
+            }
+
+            if (_read_only) {
+                throw new StorageError.IO_ERROR("Database opened in read-only mode");
+            }
+
+            if (_in_transaction_value) {
+                try {
+                    _transaction_buffer.set(key, value);
+                } catch (Invercargill.IndexError e) {
+                    throw new StorageError.IO_ERROR("Failed to buffer transaction set: %s".printf(e.message));
+                }
+                _transaction_deletes.remove(key);
+            } else {
+                write_key(key, value);
+            }
+        }
+
+        /**
+         * Deletes a key from the database.
+         *
+         * @param key The key to delete
+         * @throws StorageError if the delete operation fails
+         */
+        public void delete(string key) throws StorageError {
+            if (_dbf == null) {
+                throw new StorageError.IO_ERROR("Database not open");
+            }
+
+            if (_read_only) {
+                throw new StorageError.IO_ERROR("Database opened in read-only mode");
+            }
+
+            if (_in_transaction_value) {
+                try {
+                    _transaction_buffer.remove(key);
+                } catch (Invercargill.IndexError e) {
+                    // Key not in buffer, that's fine
+                }
+                try {
+                    _transaction_deletes.set(key, true);
+                } catch (Invercargill.IndexError e) {
+                    throw new StorageError.IO_ERROR("Failed to buffer transaction delete: %s".printf(e.message));
+                }
+            } else {
+                int result = ((!) _dbf).delete(key);
+                if (result != 0) {
+                    // Key may not exist, which is fine for delete
+                    // But check for actual errors
+                    var error = ((!) _dbf).last_errno();
+                    if (error != Gdbm.ErrorCode.NO_ERROR && error != Gdbm.ErrorCode.ITEM_NOT_FOUND) {
+                        throw new StorageError.IO_ERROR("Failed to delete key: %s".printf(((!) _dbf).db_strerror()));
+                    }
+                }
+            }
+        }
+
+        /**
+         * Gets all keys in the database.
+         *
+         * @return Enumerable of all keys
+         */
+        public Invercargill.Enumerable<string> keys {
+            owned get {
+                var result = new Invercargill.DataStructures.HashSet<string>();
+
+                if (_dbf != null) {
+                    // Iterate through all keys in GDBM
+                    string? key = ((!) _dbf).first_key();
+                    while (key != null) {
+                        // Check if key is pending deletion in transaction
+                        if (_in_transaction_value && _transaction_deletes.has((!) key)) {
+                            // Skip this key
+                        } else {
+                            result.add((!) key);
+                        }
+                        key = ((!) _dbf).next_key((!) key);
+                    }
+                }
+
+                // Add keys from transaction buffer
+                if (_in_transaction_value) {
+                    foreach (var key in _transaction_buffer.keys) {
+                        if (!result.has(key)) {
+                            result.add(key);
+                        }
+                    }
+                }
+
+                return result;
+            }
+        }
+
+        /**
+         * Begins a transaction.
+         *
+         * @throws StorageError if already in a transaction
+         */
+        public void begin_transaction() throws StorageError {
+            if (_in_transaction_value) {
+                throw new StorageError.IO_ERROR("Already in transaction");
+            }
+            _in_transaction_value = true;
+            _transaction_buffer.clear();
+            _transaction_deletes.clear();
+        }
+
+        /**
+         * Commits the current transaction.
+         *
+         * @throws StorageError if not in a transaction or if commit fails
+         */
+        public void commit_transaction() throws StorageError {
+            if (!_in_transaction_value) {
+                throw new StorageError.IO_ERROR("Not in transaction");
+            }
+
+            if (_dbf == null) {
+                throw new StorageError.IO_ERROR("Database not open");
+            }
+
+            try {
+                // Apply all deletes
+                foreach (var key in _transaction_deletes.keys) {
+                    ((!) _dbf).delete(key);
+                }
+
+                // Apply all sets
+                foreach (var key in _transaction_buffer.keys) {
+                    var value = _transaction_buffer.get(key);
+                    write_key(key, value);
+                }
+
+                // Sync to disk
+                ((!) _dbf).sync();
+
+                _in_transaction_value = false;
+                _transaction_buffer.clear();
+                _transaction_deletes.clear();
+            } catch (Invercargill.IndexError e) {
+                throw new StorageError.IO_ERROR("Transaction commit failed: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * Rolls back the current transaction.
+         */
+        public void rollback_transaction() {
+            _in_transaction_value = false;
+            _transaction_buffer.clear();
+            _transaction_deletes.clear();
+        }
+
+        /**
+         * Writes a key-value pair to the database.
+         */
+        private void write_key(string key, Invercargill.BinaryData value) throws StorageError {
+            var bytes = value.to_bytes();
+            var gbytes = new GLib.Bytes(bytes.get_data());
+
+            int result = ((!) _dbf).store(key, gbytes, Gdbm.StoreFlag.REPLACE);
+            if (result != 0) {
+                throw new StorageError.IO_ERROR("Failed to store key: %s".printf(((!) _dbf).db_strerror()));
+            }
+        }
+    }
+}

+ 83 - 0
src/Storage/Gdbm/gdbm_wrapper.c

@@ -0,0 +1,83 @@
+/**
+ * GDBM wrapper functions for Vala bindings.
+ * 
+ * These wrapper functions handle the datum struct conversion between
+ * C and Vala types.
+ */
+
+#include "gdbm_wrapper.h"
+#include <string.h>
+
+int gdbm_exists_wrapper(GDBM_FILE dbf, const char *key) {
+    datum key_datum;
+    key_datum.dptr = (char *)key;
+    key_datum.dsize = strlen(key);
+    return gdbm_exists(dbf, key_datum);
+}
+
+GBytes* gdbm_fetch_wrapper(GDBM_FILE dbf, const char *key) {
+    datum key_datum;
+    key_datum.dptr = (char *)key;
+    key_datum.dsize = strlen(key);
+    
+    datum result = gdbm_fetch(dbf, key_datum);
+    if (result.dptr == NULL) {
+        return NULL;
+    }
+    
+    GBytes *bytes = g_bytes_new(result.dptr, result.dsize);
+    free(result.dptr);  /* GDBM allocates memory that we need to free */
+    
+    return bytes;
+}
+
+int gdbm_store_wrapper(GDBM_FILE dbf, const char *key, GBytes *content, int flag) {
+    datum key_datum;
+    key_datum.dptr = (char *)key;
+    key_datum.dsize = strlen(key);
+    
+    gsize content_size;
+    gconstpointer content_data = g_bytes_get_data(content, &content_size);
+    
+    datum content_datum;
+    content_datum.dptr = (char *)content_data;
+    content_datum.dsize = content_size;
+    
+    return gdbm_store(dbf, key_datum, content_datum, flag);
+}
+
+int gdbm_delete_wrapper(GDBM_FILE dbf, const char *key) {
+    datum key_datum;
+    key_datum.dptr = (char *)key;
+    key_datum.dsize = strlen(key);
+    
+    return gdbm_delete(dbf, key_datum);
+}
+
+char* gdbm_firstkey_wrapper(GDBM_FILE dbf) {
+    datum key = gdbm_firstkey(dbf);
+    if (key.dptr == NULL) {
+        return NULL;
+    }
+    
+    char *result = g_strndup(key.dptr, key.dsize);
+    free(key.dptr);  /* GDBM allocates memory that we need to free */
+    
+    return result;
+}
+
+char* gdbm_nextkey_wrapper(GDBM_FILE dbf, const char *prev_key) {
+    datum prev_datum;
+    prev_datum.dptr = (char *)prev_key;
+    prev_datum.dsize = strlen(prev_key);
+    
+    datum key = gdbm_nextkey(dbf, prev_datum);
+    if (key.dptr == NULL) {
+        return NULL;
+    }
+    
+    char *result = g_strndup(key.dptr, key.dsize);
+    free(key.dptr);  /* GDBM allocates memory that we need to free */
+    
+    return result;
+}

+ 54 - 0
src/Storage/Gdbm/gdbm_wrapper.h

@@ -0,0 +1,54 @@
+/**
+ * GDBM wrapper functions for Vala bindings.
+ * 
+ * These wrapper functions handle the datum struct conversion between
+ * C and Vala types.
+ */
+
+#ifndef GDBM_WRAPPER_H
+#define GDBM_WRAPPER_H
+
+#include <glib.h>
+#include <gdbm.h>
+
+G_BEGIN_DECLS
+
+/**
+ * Wrapper for gdbm_exists that takes a string key.
+ */
+int gdbm_exists_wrapper(GDBM_FILE dbf, const char *key);
+
+/**
+ * Wrapper for gdbm_fetch that returns a GBytes object.
+ * Returns NULL if key not found.
+ * Caller must unref the result.
+ */
+GBytes* gdbm_fetch_wrapper(GDBM_FILE dbf, const char *key);
+
+/**
+ * Wrapper for gdbm_store that takes a string key and GBytes content.
+ */
+int gdbm_store_wrapper(GDBM_FILE dbf, const char *key, GBytes *content, int flag);
+
+/**
+ * Wrapper for gdbm_delete that takes a string key.
+ */
+int gdbm_delete_wrapper(GDBM_FILE dbf, const char *key);
+
+/**
+ * Wrapper for gdbm_firstkey that returns a string.
+ * Returns NULL if database is empty.
+ * Caller must free the result.
+ */
+char* gdbm_firstkey_wrapper(GDBM_FILE dbf);
+
+/**
+ * Wrapper for gdbm_nextkey that returns a string.
+ * Returns NULL if no more keys.
+ * Caller must free the result.
+ */
+char* gdbm_nextkey_wrapper(GDBM_FILE dbf, const char *prev_key);
+
+G_END_DECLS
+
+#endif /* GDBM_WRAPPER_H */

+ 178 - 0
src/Storage/HighLevel/CatalogueStore.vala

@@ -0,0 +1,178 @@
+/**
+ * CatalogueStore - High-level facade for catalogue operations
+ *
+ * Composes CatalogueConfigStorage and CatalogueIndexStorage
+ * to provide catalogue-specific APIs.
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.HighLevel {
+
+/**
+ * High-level facade for catalogue operations.
+ *
+ * This class composes CatalogueConfigStorage and CatalogueIndexStorage
+ * to provide a unified API for catalogue operations.
+ */
+public class CatalogueStore : Object {
+    
+    private LowLevel.CatalogueConfigStorage _config;
+    private LowLevel.CatalogueIndexStorage _index;
+    
+    /**
+     * Creates a new CatalogueStore with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public CatalogueStore(Dbm dbm) {
+        _config = new LowLevel.CatalogueConfigStorage(dbm);
+        _index = new LowLevel.CatalogueIndexStorage(dbm);
+    }
+    
+    // === Configuration Operations ===
+    
+    /**
+     * Stores catalogue configuration.
+     *
+     * @param path The catalogue path
+     * @param type_label The type label for documents to catalogue
+     * @param expression The expression to extract the grouping key
+     * @throws StorageError if the operation fails
+     */
+    public void store_config(Core.EntityPath path, string type_label, string expression) throws StorageError {
+        _config.store(path, type_label, expression);
+    }
+    
+    /**
+     * Loads catalogue configuration.
+     *
+     * @param path The catalogue path
+     * @return The configuration, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public LowLevel.CatalogueConfig? load_config(Core.EntityPath path) throws StorageError {
+        return _config.load(path);
+    }
+    
+    // === Single Group Operations ===
+    
+    /**
+     * Adds a document path to a catalogue group.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param catalogue_path The catalogue's entity path
+     * @param key The group key (e.g., "john" for author grouping)
+     * @param doc_path The document path to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_to_group(Core.EntityPath catalogue_path, string key, string doc_path) throws StorageError {
+        _index.add_to_group(catalogue_path.to_string(), key, doc_path);
+    }
+    
+    /**
+     * Removes a document path from a catalogue group.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param catalogue_path The catalogue's entity path
+     * @param key The group key
+     * @param doc_path The document path to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_from_group(Core.EntityPath catalogue_path, string key, string doc_path) throws StorageError {
+        _index.remove_from_group(catalogue_path.to_string(), key, doc_path);
+    }
+    
+    /**
+     * Gets all member document paths for a catalogue group.
+     *
+     * @param catalogue_path The catalogue's entity path
+     * @param key The group key
+     * @return An enumerable of document paths
+     */
+    public Invercargill.Enumerable<string> get_group_members(Core.EntityPath catalogue_path, string key) {
+        return _index.get_group_members(catalogue_path.to_string(), key);
+    }
+    
+    /**
+     * Gets all keys for a catalogue.
+     *
+     * @param catalogue_path The catalogue's entity path
+     * @return An enumerable of keys
+     */
+    public Invercargill.Enumerable<string> get_group_keys(Core.EntityPath catalogue_path) {
+        return _index.get_keys(catalogue_path.to_string());
+    }
+    
+    // === Batch Group Operations ===
+    
+    /**
+     * Adds multiple document paths to a catalogue group.
+     *
+     * Uses O(1) HashSet membership checking and change tracking internally.
+     *
+     * @param catalogue_path The catalogue's entity path
+     * @param key The group key
+     * @param doc_paths The document paths to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_to_group_batch(Core.EntityPath catalogue_path, string key, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        _index.add_to_group_batch(catalogue_path.to_string(), key, doc_paths);
+    }
+    
+    /**
+     * Removes multiple document paths from a catalogue group.
+     *
+     * Uses O(1) HashSet membership checking and efficient rebuild internally.
+     *
+     * @param catalogue_path The catalogue's entity path
+     * @param key The group key
+     * @param doc_paths The document paths to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_from_group_batch(Core.EntityPath catalogue_path, string key, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        _index.remove_from_group_batch(catalogue_path.to_string(), key, doc_paths);
+    }
+    
+    /**
+     * Sets the complete member set for a catalogue group.
+     *
+     * This replaces any existing members with the provided set.
+     * Uses O(1) HashSet deduplication while preserving insertion order.
+     *
+     * @param catalogue_path The catalogue's entity path
+     * @param key The group key
+     * @param doc_paths The complete set of member paths
+     * @throws StorageError if the operation fails
+     */
+    public void set_group_members(Core.EntityPath catalogue_path, string key, Invercargill.Enumerable<string> doc_paths) throws StorageError {
+        _index.set_group_members(catalogue_path.to_string(), key, doc_paths);
+    }
+    
+    // === Lifecycle Operations ===
+    
+    /**
+     * Clears all index data for a catalogue.
+     *
+     * @param catalogue_path The catalogue's entity path
+     * @throws StorageError if the operation fails
+     */
+    public void clear_index(Core.EntityPath catalogue_path) throws StorageError {
+        _index.clear(catalogue_path.to_string());
+    }
+    
+    /**
+     * Deletes all data for a catalogue.
+     *
+     * @param path The catalogue path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        _config.delete(path);
+        _index.clear(path.to_string());
+    }
+}
+
+} // namespace Implexus.Storage.HighLevel

+ 212 - 0
src/Storage/HighLevel/CategoryStore.vala

@@ -0,0 +1,212 @@
+/**
+ * CategoryStore - High-level facade for category operations
+ *
+ * Composes CategoryConfigStorage, CategoryIndexStorage, and ChildrenStorage
+ * to provide category-specific APIs.
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.HighLevel {
+
+/**
+ * High-level facade for category operations.
+ *
+ * This class composes CategoryConfigStorage, CategoryIndexStorage, and
+ * ChildrenStorage to provide a unified API for category operations.
+ */
+public class CategoryStore : Object {
+    
+    private LowLevel.CategoryConfigStorage _config;
+    private LowLevel.CategoryIndexStorage _index;
+    private LowLevel.ChildrenStorage _children;
+    
+    /**
+     * Creates a new CategoryStore with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public CategoryStore(Dbm dbm) {
+        _config = new LowLevel.CategoryConfigStorage(dbm);
+        _index = new LowLevel.CategoryIndexStorage(dbm);
+        _children = new LowLevel.ChildrenStorage(dbm);
+    }
+    
+    // === Configuration Operations ===
+    
+    /**
+     * Stores category configuration.
+     *
+     * @param path The category path
+     * @param type_label The type label for entities
+     * @param expression The index expression
+     * @throws StorageError if the operation fails
+     */
+    public void store_config(Core.EntityPath path, string type_label, string expression) throws StorageError {
+        _config.store(path, type_label, expression);
+    }
+    
+    /**
+     * Loads category configuration.
+     *
+     * @param path The category path
+     * @return The configuration, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public LowLevel.CategoryConfig? load_config(Core.EntityPath path) throws StorageError {
+        return _config.load(path);
+    }
+    
+    // === Single Member Operations ===
+    
+    /**
+     * Adds a document path to a category's member set.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param category_path The category's entity path
+     * @param doc_path The document path to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_member(Core.EntityPath category_path, string doc_path) throws StorageError {
+        _index.add_member(category_path.to_string(), doc_path);
+    }
+    
+    /**
+     * Removes a document path from a category's member set.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param category_path The category's entity path
+     * @param doc_path The document path to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_member(Core.EntityPath category_path, string doc_path) throws StorageError {
+        _index.remove_member(category_path.to_string(), doc_path);
+    }
+    
+    /**
+     * Checks if a document is a member of a category.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param category_path The category's entity path
+     * @param doc_path The document path to check
+     * @return true if the document is a member
+     */
+    public bool has_member(Core.EntityPath category_path, string doc_path) {
+        return _index.has_member(category_path.to_string(), doc_path);
+    }
+    
+    // === Batch Member Operations ===
+    
+    /**
+     * Adds multiple document paths to a category's member set.
+     *
+     * Uses O(1) HashSet membership checking and change tracking internally.
+     *
+     * @param category_path The category's entity path
+     * @param doc_paths The document paths to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_members(Core.EntityPath category_path, Invercargill.Enumerable<string> doc_paths) throws StorageError {
+        _index.add_members(category_path.to_string(), doc_paths);
+    }
+    
+    /**
+     * Removes multiple document paths from a category's member set.
+     *
+     * Uses O(1) HashSet membership checking and efficient rebuild internally.
+     *
+     * @param category_path The category's entity path
+     * @param doc_paths The document paths to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_members(Core.EntityPath category_path, Invercargill.Enumerable<string> doc_paths) throws StorageError {
+        _index.remove_members(category_path.to_string(), doc_paths);
+    }
+    
+    /**
+     * Sets the complete member set for a category.
+     *
+     * This replaces any existing members with the provided set.
+     * Uses O(1) HashSet deduplication while preserving insertion order.
+     *
+     * @param category_path The category's entity path
+     * @param doc_paths The complete set of member paths
+     * @throws StorageError if the operation fails
+     */
+    public void set_members(Core.EntityPath category_path, Invercargill.Enumerable<string> doc_paths) throws StorageError {
+        _index.set_members(category_path.to_string(), doc_paths);
+    }
+    
+    /**
+     * Gets all member document paths for a category.
+     *
+     * @param category_path The category's entity path
+     * @return An enumerable of document paths
+     */
+    public Invercargill.Enumerable<string> get_members(Core.EntityPath category_path) {
+        return _index.get_members(category_path.to_string());
+    }
+    
+    // === Structural Children Operations ===
+    
+    /**
+     * Adds a child name to a category's children set.
+     *
+     * @param parent The parent category path
+     * @param child_name The name of the child to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_child(Core.EntityPath parent, string child_name) throws StorageError {
+        _children.add_child(parent, child_name);
+    }
+    
+    /**
+     * Removes a child name from a category's children set.
+     *
+     * @param parent The parent category path
+     * @param child_name The name of the child to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_child(Core.EntityPath parent, string child_name) throws StorageError {
+        _children.remove_child(parent, child_name);
+    }
+    
+    /**
+     * Gets all child names for a category.
+     *
+     * @param parent The parent category path
+     * @return An enumerable of child names
+     */
+    public Invercargill.Enumerable<string> get_children(Core.EntityPath parent) {
+        return _children.get_children(parent);
+    }
+    
+    // === Lifecycle Operations ===
+    
+    /**
+     * Clears all index data for a category.
+     *
+     * @param category_path The category's entity path
+     * @throws StorageError if the operation fails
+     */
+    public void clear_index(Core.EntityPath category_path) throws StorageError {
+        _index.clear(category_path.to_string());
+    }
+    
+    /**
+     * Deletes all data for a category.
+     *
+     * @param path The category path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        _config.delete(path);
+        _index.clear(path.to_string());
+        _children.delete(path);
+    }
+}
+
+} // namespace Implexus.Storage.HighLevel

+ 84 - 0
src/Storage/HighLevel/ContainerStore.vala

@@ -0,0 +1,84 @@
+/**
+ * ContainerStore - High-level facade for container children
+ *
+ * Composes ChildrenStorage to provide container-specific APIs.
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.HighLevel {
+
+/**
+ * High-level facade for container children operations.
+ *
+ * This class composes ChildrenStorage to provide a unified API
+ * for container operations.
+ */
+public class ContainerStore : Object {
+    
+    private LowLevel.ChildrenStorage _children;
+    
+    /**
+     * Creates a new ContainerStore with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public ContainerStore(Dbm dbm) {
+        _children = new LowLevel.ChildrenStorage(dbm);
+    }
+    
+    /**
+     * Adds a child name to a container's children set.
+     *
+     * @param parent The parent container path
+     * @param child_name The name of the child to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_child(Core.EntityPath parent, string child_name) throws StorageError {
+        _children.add_child(parent, child_name);
+    }
+    
+    /**
+     * Removes a child name from a container's children set.
+     *
+     * @param parent The parent container path
+     * @param child_name The name of the child to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_child(Core.EntityPath parent, string child_name) throws StorageError {
+        _children.remove_child(parent, child_name);
+    }
+    
+    /**
+     * Checks if a container has a specific child.
+     *
+     * @param parent The parent container path
+     * @param child_name The name of the child to check
+     * @return True if the child exists
+     */
+    public bool has_child(Core.EntityPath parent, string child_name) {
+        return _children.has_child(parent, child_name);
+    }
+    
+    /**
+     * Gets all child names for a container.
+     *
+     * @param parent The parent container path
+     * @return An enumerable of child names
+     */
+    public Invercargill.Enumerable<string> get_children(Core.EntityPath parent) {
+        return _children.get_children(parent);
+    }
+    
+    /**
+     * Deletes all children for a container.
+     *
+     * @param parent The parent container path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath parent) throws StorageError {
+        _children.delete(parent);
+    }
+}
+
+} // namespace Implexus.Storage.HighLevel

+ 63 - 0
src/Storage/HighLevel/DocumentStore.vala

@@ -0,0 +1,63 @@
+/**
+ * DocumentStore - High-level facade for document properties
+ *
+ * Composes PropertiesStorage to provide document-specific APIs.
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.HighLevel {
+
+/**
+ * High-level facade for document property operations.
+ *
+ * This class composes PropertiesStorage to provide a unified API
+ * for document operations.
+ */
+public class DocumentStore : Object {
+    
+    private LowLevel.PropertiesStorage _properties;
+    
+    /**
+     * Creates a new DocumentStore with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public DocumentStore(Dbm dbm) {
+        _properties = new LowLevel.PropertiesStorage(dbm);
+    }
+    
+    /**
+     * Stores document properties.
+     *
+     * @param path The entity path
+     * @param properties The properties to store
+     * @throws StorageError if the operation fails
+     */
+    public void store_properties(Core.EntityPath path, Invercargill.Properties properties) throws StorageError {
+        _properties.store(path, properties);
+    }
+    
+    /**
+     * Loads document properties.
+     *
+     * @param path The entity path
+     * @return The properties, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public Invercargill.Properties? load_properties(Core.EntityPath path) throws StorageError {
+        return _properties.load(path);
+    }
+    
+    /**
+     * Deletes document properties.
+     *
+     * @param path The entity path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        _properties.delete(path);
+    }
+}
+
+} // namespace Implexus.Storage.HighLevel

+ 124 - 0
src/Storage/HighLevel/EntityStore.vala

@@ -0,0 +1,124 @@
+/**
+ * EntityStore - High-level facade for entity metadata and type index
+ *
+ * Composes EntityMetadataStorage and TypeIndexStorage to provide
+ * entity-specific APIs.
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.HighLevel {
+
+/**
+ * High-level facade for entity metadata and type index operations.
+ *
+ * This class composes EntityMetadataStorage and TypeIndexStorage to
+ * provide a unified API for entity operations.
+ */
+public class EntityStore : Object {
+    
+    private LowLevel.EntityMetadataStorage _metadata;
+    private LowLevel.TypeIndexStorage _type_index;
+    
+    /**
+     * Creates a new EntityStore with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public EntityStore(Dbm dbm) {
+        _metadata = new LowLevel.EntityMetadataStorage(dbm);
+        _type_index = new LowLevel.TypeIndexStorage(dbm);
+    }
+    
+    // === Metadata Operations ===
+    
+    /**
+     * Stores entity metadata.
+     *
+     * @param path The entity path
+     * @param type The entity type
+     * @param type_label Optional type label for documents
+     * @throws StorageError if the operation fails
+     */
+    public void store_metadata(Core.EntityPath path, Core.EntityType type, string? type_label = null) throws StorageError {
+        _metadata.store_metadata(path, type, type_label);
+    }
+    
+    /**
+     * Gets the entity type for a path.
+     *
+     * @param path The entity path
+     * @return The entity type, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public Core.EntityType? get_entity_type(Core.EntityPath path) throws StorageError {
+        return _metadata.get_entity_type(path);
+    }
+    
+    /**
+     * Gets the entity type label for a path.
+     *
+     * @param path The entity path
+     * @return The type label, or null if not found or empty
+     * @throws StorageError if the data is corrupt
+     */
+    public string? get_type_label(Core.EntityPath path) throws StorageError {
+        return _metadata.get_type_label(path);
+    }
+    
+    /**
+     * Checks if an entity exists at the given path.
+     *
+     * @param path The entity path
+     * @return True if the entity exists
+     */
+    public bool exists(Core.EntityPath path) {
+        return _metadata.exists(path);
+    }
+    
+    /**
+     * Deletes entity metadata.
+     *
+     * @param path The entity path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        _metadata.delete(path);
+    }
+    
+    // === Type Index Operations ===
+    
+    /**
+     * Registers a document in the type index.
+     *
+     * @param type_label The type label to index under
+     * @param doc_path The document path to register
+     * @throws StorageError if the operation fails
+     */
+    public void register_document_type(string type_label, string doc_path) throws StorageError {
+        _type_index.add_document(type_label, doc_path);
+    }
+    
+    /**
+     * Unregisters a document from the type index.
+     *
+     * @param type_label The type label the document is indexed under
+     * @param doc_path The document path to unregister
+     * @throws StorageError if the operation fails
+     */
+    public void unregister_document_type(string type_label, string doc_path) throws StorageError {
+        _type_index.remove_document(type_label, doc_path);
+    }
+    
+    /**
+     * Gets all document paths for a given type label.
+     *
+     * @param type_label The type label to look up
+     * @return An enumerable of document paths (may be empty)
+     */
+    public Invercargill.Enumerable<string> get_documents_by_type(string type_label) {
+        return _type_index.get_documents(type_label);
+    }
+}
+
+} // namespace Implexus.Storage.HighLevel

+ 296 - 0
src/Storage/HighLevel/IndexStore.vala

@@ -0,0 +1,296 @@
+/**
+ * IndexStore - High-level facade for text index operations
+ *
+ * Composes CategoryConfigStorage and TextIndexStorage
+ * to provide index-specific APIs.
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.HighLevel {
+
+/**
+ * High-level facade for text index operations.
+ *
+ * This class composes CategoryConfigStorage and TextIndexStorage
+ * to provide a unified API for index operations.
+ */
+public class IndexStore : Object {
+    
+    private LowLevel.CategoryConfigStorage _config;
+    private LowLevel.TextIndexStorage _text_index;
+    
+    /**
+     * Creates a new IndexStore with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public IndexStore(Dbm dbm) {
+        _config = new LowLevel.CategoryConfigStorage(dbm);
+        _text_index = new LowLevel.TextIndexStorage(dbm);
+    }
+    
+    // === Configuration Operations ===
+    
+    /**
+     * Stores index configuration.
+     *
+     * @param path The index path
+     * @param type_label The type label for entities
+     * @param expression The index expression
+     * @throws StorageError if the operation fails
+     */
+    public void store_config(Core.EntityPath path, string type_label, string expression) throws StorageError {
+        _config.store(path, type_label, expression);
+    }
+    
+    /**
+     * Loads index configuration.
+     *
+     * @param path The index path
+     * @return The configuration, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public LowLevel.CategoryConfig? load_config(Core.EntityPath path) throws StorageError {
+        return _config.load(path);
+    }
+    
+    // === Single Trigram Operations ===
+    
+    /**
+     * Adds a document path to the trigram index.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param index_path The index entity's path
+     * @param trigram The 3-character trigram
+     * @param doc_path The document path to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_trigram(Core.EntityPath index_path, string trigram, string doc_path) throws StorageError {
+        _text_index.add_trigram(index_path.to_string(), trigram, doc_path);
+    }
+    
+    /**
+     * Removes a document path from the trigram index.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param index_path The index entity's path
+     * @param trigram The 3-character trigram
+     * @param doc_path The document path to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_trigram(Core.EntityPath index_path, string trigram, string doc_path) throws StorageError {
+        _text_index.remove_trigram(index_path.to_string(), trigram, doc_path);
+    }
+    
+    /**
+     * Gets all document paths for a trigram.
+     *
+     * @param index_path The index entity's path
+     * @param trigram The 3-character trigram
+     * @return An enumerable of document paths
+     */
+    public Invercargill.Enumerable<string> get_documents_for_trigram(Core.EntityPath index_path, string trigram) {
+        return _text_index.get_documents_for_trigram(index_path.to_string(), trigram);
+    }
+    
+    // === Batch Trigram Operations ===
+    
+    /**
+     * Adds multiple document paths to a trigram index entry.
+     *
+     * Uses O(1) HashSet membership checking and change tracking internally.
+     *
+     * @param index_path The index entity's path
+     * @param trigram The 3-character trigram
+     * @param doc_paths The document paths to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_trigram_batch(Core.EntityPath index_path, string trigram, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        _text_index.add_trigram_batch(index_path.to_string(), trigram, doc_paths);
+    }
+    
+    /**
+     * Removes multiple document paths from a trigram index entry.
+     *
+     * Uses O(1) HashSet membership checking and efficient rebuild internally.
+     *
+     * @param index_path The index entity's path
+     * @param trigram The 3-character trigram
+     * @param doc_paths The document paths to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_trigram_batch(Core.EntityPath index_path, string trigram, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        _text_index.remove_trigram_batch(index_path.to_string(), trigram, doc_paths);
+    }
+    
+    /**
+     * Adds multiple trigrams with their document paths in batch.
+     *
+     * Uses Dictionary for efficient batch processing.
+     *
+     * @param index_path The index entity's path
+     * @param additions Dictionary mapping trigrams to vectors of document paths
+     * @throws StorageError if the operation fails
+     */
+    public void add_trigrams_batch(Core.EntityPath index_path, Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> additions) throws StorageError {
+        _text_index.add_trigrams_batch(index_path.to_string(), additions);
+    }
+    
+    /**
+     * Removes multiple trigrams with their document paths in batch.
+     *
+     * Uses Dictionary for efficient batch processing.
+     *
+     * @param index_path The index entity's path
+     * @param removals Dictionary mapping trigrams to vectors of document paths
+     * @throws StorageError if the operation fails
+     */
+    public void remove_trigrams_batch(Core.EntityPath index_path, Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> removals) throws StorageError {
+        _text_index.remove_trigrams_batch(index_path.to_string(), removals);
+    }
+    
+    // === Reverse Index Operations ===
+    
+    /**
+     * Adds a bigram to trigram mapping for reverse lookup.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param index_path The index entity's path
+     * @param bigram The 2-character bigram
+     * @param trigram The trigram that contains the bigram
+     * @throws StorageError if the operation fails
+     */
+    public void add_bigram_mapping(Core.EntityPath index_path, string bigram, string trigram) throws StorageError {
+        _text_index.add_bigram_mapping(index_path.to_string(), bigram, trigram);
+    }
+    
+    /**
+     * Gets all trigrams for a bigram (reverse lookup).
+     *
+     * @param index_path The index entity's path
+     * @param bigram The 2-character bigram
+     * @return An enumerable of trigrams
+     */
+    public Invercargill.Enumerable<string> get_trigrams_for_bigram(Core.EntityPath index_path, string bigram) {
+        return _text_index.get_trigrams_for_bigram(index_path.to_string(), bigram);
+    }
+    
+    /**
+     * Adds multiple bigram to trigram mappings in batch.
+     *
+     * Uses Dictionary for efficient batch processing.
+     *
+     * @param index_path The index entity's path
+     * @param additions Dictionary mapping bigrams to vectors of trigrams
+     * @throws StorageError if the operation fails
+     */
+    public void add_bigram_mappings_batch(Core.EntityPath index_path, Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> additions) throws StorageError {
+        _text_index.add_bigram_mappings_batch(index_path.to_string(), additions);
+    }
+    
+    /**
+     * Adds a unigram to bigram mapping for reverse lookup.
+     *
+     * Uses O(1) HashSet membership checking internally.
+     *
+     * @param index_path The index entity's path
+     * @param unigram The single character
+     * @param bigram The bigram that starts with the unigram
+     * @throws StorageError if the operation fails
+     */
+    public void add_unigram_mapping(Core.EntityPath index_path, string unigram, string bigram) throws StorageError {
+        _text_index.add_unigram_mapping(index_path.to_string(), unigram, bigram);
+    }
+    
+    /**
+     * Gets all bigrams for a unigram (reverse lookup).
+     *
+     * @param index_path The index entity's path
+     * @param unigram The single character
+     * @return An enumerable of bigrams
+     */
+    public Invercargill.Enumerable<string> get_bigrams_for_unigram(Core.EntityPath index_path, string unigram) {
+        return _text_index.get_bigrams_for_unigram(index_path.to_string(), unigram);
+    }
+    
+    /**
+     * Adds multiple unigram to bigram mappings in batch.
+     *
+     * Uses Dictionary for efficient batch processing.
+     *
+     * @param index_path The index entity's path
+     * @param additions Dictionary mapping unigrams to vectors of bigrams
+     * @throws StorageError if the operation fails
+     */
+    public void add_unigram_mappings_batch(Core.EntityPath index_path, Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> additions) throws StorageError {
+        _text_index.add_unigram_mappings_batch(index_path.to_string(), additions);
+    }
+    
+    // === Content Cache Operations ===
+    
+    /**
+     * Stores the indexed content for a document.
+     *
+     * This is used to cache the indexed field value for verification
+     * and reindexing operations.
+     *
+     * @param index_path The index entity's path
+     * @param doc_path The document path
+     * @param content The indexed content
+     * @throws StorageError if the operation fails
+     */
+    public void store_document_content(Core.EntityPath index_path, string doc_path, string content) throws StorageError {
+        _text_index.store_document_content(index_path.to_string(), doc_path, content);
+    }
+    
+    /**
+     * Gets the indexed content for a document.
+     *
+     * @param index_path The index entity's path
+     * @param doc_path The document path
+     * @return The indexed content, or null if not found
+     */
+    public string? get_document_content(Core.EntityPath index_path, string doc_path) {
+        return _text_index.get_document_content(index_path.to_string(), doc_path);
+    }
+    
+    /**
+     * Removes the indexed content for a document.
+     *
+     * @param index_path The index entity's path
+     * @param doc_path The document path
+     * @throws StorageError if the operation fails
+     */
+    public void remove_document_content(Core.EntityPath index_path, string doc_path) throws StorageError {
+        _text_index.remove_document_content(index_path.to_string(), doc_path);
+    }
+    
+    // === Lifecycle Operations ===
+    
+    /**
+     * Clears all n-gram index data for an index entity.
+     *
+     * @param index_path The index entity's path
+     * @throws StorageError if the operation fails
+     */
+    public void clear_index(Core.EntityPath index_path) throws StorageError {
+        _text_index.clear(index_path.to_string());
+    }
+    
+    /**
+     * Deletes all data for an index.
+     *
+     * @param path The index path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        _config.delete(path);
+        _text_index.clear(path.to_string());
+    }
+}
+
+} // namespace Implexus.Storage.HighLevel

+ 491 - 0
src/Storage/Lmdb/LmdbDbm.vala

@@ -0,0 +1,491 @@
+/**
+ * LmdbDbm - LMDB-based implementation of the Dbm interface.
+ *
+ * This implementation uses the Lightning Memory-Mapped Database (LMDB)
+ * for high-performance key-value storage with ACID transactions.
+ */
+
+namespace Implexus.Storage {
+
+    /**
+     * LMDB-based Dbm implementation.
+     *
+     * Provides high-performance key-value storage using LMDB.
+     * Supports native ACID transactions through LMDB's transaction system.
+     */
+    public class LmdbDbm : Object, Dbm {
+        private Lmdb.Env? _env = null;
+        private uint _dbi = 0;
+        private string _path = "";
+        private bool _read_only = false;
+        private bool _is_open = false;
+        private Lmdb.Txn? _write_txn = null;
+        private bool _in_transaction_value = false;
+        private Invercargill.DataStructures.Dictionary<string, Invercargill.BinaryData> _transaction_buffer;
+        private Invercargill.DataStructures.Dictionary<string, bool> _transaction_deletes;
+
+        /**
+         * Default map size (1GB)
+         */
+        private const size_t DEFAULT_MAP_SIZE = 1024 * 1024 * 1024;
+
+        /**
+         * Creates a new LmdbDbm instance.
+         *
+         * The database is not opened until open() is called.
+         */
+        public LmdbDbm() {
+            _transaction_buffer = new Invercargill.DataStructures.Dictionary<string, Invercargill.BinaryData>();
+            _transaction_deletes = new Invercargill.DataStructures.Dictionary<string, bool>();
+        }
+
+        /**
+         * Opens the database.
+         *
+         * @param path Path to the database directory
+         * @param read_only If true, opens in read-only mode
+         * @throws StorageError if the database cannot be opened
+         */
+        public void open(string path, bool read_only = false) throws StorageError {
+            if (_is_open) {
+                throw new StorageError.IO_ERROR("Database already open");
+            }
+
+            _path = path;
+            _read_only = read_only;
+
+            // Create environment
+            int rc = Lmdb.Env.create(out _env);
+            if (rc != 0) {
+                throw new StorageError.IO_ERROR("Failed to create LMDB environment: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            // Set map size
+            rc = ((!) _env).set_mapsize(DEFAULT_MAP_SIZE);
+            if (rc != 0) {
+                _env = null;
+                throw new StorageError.IO_ERROR("Failed to set map size: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            // Set max databases (we only use one, but allow for future expansion)
+            rc = ((!) _env).set_maxdbs(1);
+            if (rc != 0) {
+                _env = null;
+                throw new StorageError.IO_ERROR("Failed to set max databases: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            // Determine flags
+            uint flags = 0;
+            if (read_only) {
+                flags |= Lmdb.EnvFlags.RDONLY;
+            }
+            // Use NOTLS to allow multiple transactions in same thread
+            flags |= Lmdb.EnvFlags.NOTLS;
+
+            // Create directory if it doesn't exist
+            if (!read_only) {
+                DirUtils.create_with_parents(path, 0755);
+            }
+
+            // Open environment
+            rc = ((!) _env).open(path, flags, 0644);
+            if (rc != 0) {
+                _env = null;
+                throw new StorageError.IO_ERROR("Failed to open LMDB environment: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            // Open database within a transaction
+            Lmdb.Txn txn;
+            uint txn_flags = read_only ? Lmdb.TxnFlags.RDONLY : 0;
+            rc = Lmdb.Txn.begin((!) _env, null, txn_flags, out txn);
+            if (rc != 0) {
+                _env = null;
+                throw new StorageError.IO_ERROR("Failed to begin transaction: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            uint db_flags = 0;
+            if (!read_only) {
+                db_flags |= Lmdb.DbFlags.CREATE;
+            }
+
+            rc = txn.dbi_open(null, db_flags, out _dbi);
+            if (rc != 0) {
+                txn.abort();
+                _env = null;
+                throw new StorageError.IO_ERROR("Failed to open database: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            // Commit to save the database handle
+            if (!read_only) {
+                rc = txn.commit();
+                if (rc != 0) {
+                    _env = null;
+                    throw new StorageError.IO_ERROR("Failed to commit initial transaction: %s".printf(Lmdb.strerror(rc)));
+                }
+            } else {
+                txn.abort();
+            }
+
+            _is_open = true;
+        }
+
+        /**
+         * Closes the database.
+         *
+         * @throws StorageError if the database cannot be closed
+         */
+        public void close() throws StorageError {
+            if (!_is_open) {
+                throw new StorageError.IO_ERROR("Database not open");
+            }
+
+            // Rollback any pending transaction
+            if (_in_transaction_value) {
+                rollback_transaction();
+            }
+
+            // Close write transaction if any
+            if (_write_txn != null) {
+                ((!) _write_txn).abort();
+                _write_txn = null;
+            }
+
+            // Environment is freed when _env is set to null (due to free_function)
+            _env = null;
+            _is_open = false;
+        }
+
+        /**
+         * Indicates whether the database is currently open.
+         */
+        public bool is_open {
+            get { return _is_open; }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public bool supports_concurrent_reads {
+            get { return true; }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public bool in_transaction {
+            get { return _in_transaction_value; }
+        }
+
+        /**
+         * Checks if a key exists in the database.
+         *
+         * @param key The key to check
+         * @return True if the key exists
+         */
+        public bool has_key(string key) {
+            // Check transaction state first
+            if (_in_transaction_value) {
+                if (_transaction_deletes.has(key)) {
+                    return false;
+                }
+                if (_transaction_buffer.has(key)) {
+                    return true;
+                }
+            }
+
+            if (!_is_open || _env == null) {
+                return false;
+            }
+
+            // Use a read transaction
+            Lmdb.Txn txn;
+            uint txn_flags = Lmdb.TxnFlags.RDONLY;
+            int rc = Lmdb.Txn.begin((!) _env, null, txn_flags, out txn);
+            if (rc != 0) {
+                return false;
+            }
+
+            Lmdb.Val key_val = Lmdb.Val.from_string(key);
+            Lmdb.Val data_val = Lmdb.Val();
+
+            rc = txn.get(_dbi, &key_val, &data_val);
+
+            txn.abort();
+
+            return rc == 0;
+        }
+
+        /**
+         * Gets the value for a key.
+         *
+         * @param key The key to look up
+         * @return The value, or null if not found
+         */
+        public Invercargill.BinaryData? @get(string key) {
+            // Check transaction state first
+            if (_in_transaction_value) {
+                if (_transaction_deletes.has(key)) {
+                    return null;
+                }
+                if (_transaction_buffer.has(key)) {
+                    try {
+                        return _transaction_buffer.get(key);
+                    } catch (Invercargill.IndexError e) {
+                        return null;
+                    }
+                }
+            }
+
+            if (!_is_open || _env == null) {
+                return null;
+            }
+
+            // Use a read transaction
+            Lmdb.Txn txn;
+            uint txn_flags = Lmdb.TxnFlags.RDONLY;
+            int rc = Lmdb.Txn.begin((!) _env, null, txn_flags, out txn);
+            if (rc != 0) {
+                return null;
+            }
+
+            Lmdb.Val key_val = Lmdb.Val.from_string(key);
+            Lmdb.Val data_val = Lmdb.Val();
+
+            rc = txn.get(_dbi, &key_val, &data_val);
+
+            Invercargill.BinaryData? result = null;
+            if (rc == 0) {
+                uint8[] data = data_val.as_bytes();
+                result = new Invercargill.DataStructures.ByteBuffer.from_byte_array(data);
+            }
+
+            txn.abort();
+
+            return result;
+        }
+
+        /**
+         * Sets a key-value pair.
+         *
+         * @param key The key
+         * @param value The value
+         * @throws StorageError if the write operation fails
+         */
+        public void @set(string key, Invercargill.BinaryData value) throws StorageError {
+            if (!_is_open || _env == null) {
+                throw new StorageError.IO_ERROR("Database not open");
+            }
+
+            if (_read_only) {
+                throw new StorageError.IO_ERROR("Database opened in read-only mode");
+            }
+
+            if (_in_transaction_value) {
+                try {
+                    _transaction_buffer.set(key, value);
+                } catch (Invercargill.IndexError e) {
+                    throw new StorageError.IO_ERROR("Failed to buffer transaction set: %s".printf(e.message));
+                }
+                _transaction_deletes.remove(key);
+            } else {
+                write_key(key, value);
+            }
+        }
+
+        /**
+         * Deletes a key from the database.
+         *
+         * @param key The key to delete
+         * @throws StorageError if the delete operation fails
+         */
+        public void delete(string key) throws StorageError {
+            if (!_is_open || _env == null) {
+                throw new StorageError.IO_ERROR("Database not open");
+            }
+
+            if (_read_only) {
+                throw new StorageError.IO_ERROR("Database opened in read-only mode");
+            }
+
+            if (_in_transaction_value) {
+                try {
+                    _transaction_buffer.remove(key);
+                } catch (Invercargill.IndexError e) {
+                    // Key not in buffer, that's fine
+                }
+                try {
+                    _transaction_deletes.set(key, true);
+                } catch (Invercargill.IndexError e) {
+                    throw new StorageError.IO_ERROR("Failed to buffer transaction delete: %s".printf(e.message));
+                }
+            } else {
+                delete_key(key);
+            }
+        }
+
+        /**
+         * Gets all keys in the database.
+         *
+         * @return Enumerable of all keys
+         */
+        public Invercargill.Enumerable<string> keys {
+            owned get {
+                var result = new Invercargill.DataStructures.HashSet<string>();
+
+                if (_is_open && _env != null) {
+                    // Use a read transaction to iterate
+                    Lmdb.Txn txn;
+                    uint txn_flags = Lmdb.TxnFlags.RDONLY;
+                    int rc = Lmdb.Txn.begin((!) _env, null, txn_flags, out txn);
+                    if (rc == 0) {
+                        Lmdb.Cursor cursor;
+                        rc = Lmdb.Cursor.open(txn, _dbi, out cursor);
+                        if (rc == 0) {
+                            Lmdb.Val key_val = Lmdb.Val();
+                            Lmdb.Val data_val = Lmdb.Val();
+
+                            // Get first key
+                            rc = cursor.get(&key_val, &data_val, Lmdb.CursorOp.FIRST);
+                            while (rc == 0) {
+                                string key = key_val.as_string();
+
+                                // Check if key is pending deletion in transaction
+                                if (_in_transaction_value && _transaction_deletes.has(key)) {
+                                    // Skip this key
+                                } else {
+                                    result.add(key);
+                                }
+                                
+                                // Get next key
+                                rc = cursor.get(&key_val, &data_val, Lmdb.CursorOp.NEXT);
+                            }
+                        }
+                        txn.abort();
+                    }
+                }
+
+                // Add keys from transaction buffer
+                if (_in_transaction_value) {
+                    foreach (var key in _transaction_buffer.keys) {
+                        if (!result.has(key)) {
+                            result.add(key);
+                        }
+                    }
+                }
+
+                return result;
+            }
+        }
+
+        /**
+         * Begins a transaction.
+         *
+         * @throws StorageError if already in a transaction
+         */
+        public void begin_transaction() throws StorageError {
+            if (_in_transaction_value) {
+                throw new StorageError.IO_ERROR("Already in transaction");
+            }
+            _in_transaction_value = true;
+            _transaction_buffer.clear();
+            _transaction_deletes.clear();
+        }
+
+        /**
+         * Commits the current transaction.
+         *
+         * @throws StorageError if not in a transaction or if commit fails
+         */
+        public void commit_transaction() throws StorageError {
+            if (!_in_transaction_value) {
+                throw new StorageError.IO_ERROR("Not in transaction");
+            }
+
+            if (!_is_open || _env == null) {
+                throw new StorageError.IO_ERROR("Database not open");
+            }
+
+            try {
+                // Apply all deletes
+                foreach (var key in _transaction_deletes.keys) {
+                    delete_key(key);
+                }
+
+                // Apply all sets
+                foreach (var key in _transaction_buffer.keys) {
+                    var value = _transaction_buffer.get(key);
+                    write_key(key, value);
+                }
+
+                _in_transaction_value = false;
+                _transaction_buffer.clear();
+                _transaction_deletes.clear();
+            } catch (Invercargill.IndexError e) {
+                throw new StorageError.IO_ERROR("Transaction commit failed: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * Rolls back the current transaction.
+         */
+        public void rollback_transaction() {
+            _in_transaction_value = false;
+            _transaction_buffer.clear();
+            _transaction_deletes.clear();
+        }
+
+        /**
+         * Writes a key-value pair to the database.
+         */
+        private void write_key(string key, Invercargill.BinaryData value) throws StorageError {
+            var bytes = value.to_bytes();
+            uint8[] data = bytes.get_data();
+
+            // Create a write transaction
+            Lmdb.Txn txn;
+            int rc = Lmdb.Txn.begin((!) _env, null, 0, out txn);
+            if (rc != 0) {
+                throw new StorageError.IO_ERROR("Failed to begin write transaction: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            Lmdb.Val key_val = Lmdb.Val.from_string(key);
+            Lmdb.Val data_val = Lmdb.Val.from_bytes(data);
+
+            rc = txn.put(_dbi, &key_val, &data_val, 0);
+            if (rc != 0) {
+                txn.abort();
+                throw new StorageError.IO_ERROR("Failed to store key: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            rc = txn.commit();
+            if (rc != 0) {
+                throw new StorageError.IO_ERROR("Failed to commit write transaction: %s".printf(Lmdb.strerror(rc)));
+            }
+        }
+
+        /**
+         * Deletes a key from the database.
+         */
+        private void delete_key(string key) throws StorageError {
+            // Create a write transaction
+            Lmdb.Txn txn;
+            int rc = Lmdb.Txn.begin((!) _env, null, 0, out txn);
+            if (rc != 0) {
+                throw new StorageError.IO_ERROR("Failed to begin delete transaction: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            Lmdb.Val key_val = Lmdb.Val.from_string(key);
+
+            rc = txn.del(_dbi, &key_val, null);
+            if (rc != 0 && rc != Lmdb.ErrorCode.NOTFOUND) {
+                txn.abort();
+                throw new StorageError.IO_ERROR("Failed to delete key: %s".printf(Lmdb.strerror(rc)));
+            }
+
+            rc = txn.commit();
+            if (rc != 0) {
+                throw new StorageError.IO_ERROR("Failed to commit delete transaction: %s".printf(Lmdb.strerror(rc)));
+            }
+        }
+    }
+}

+ 134 - 0
src/Storage/LowLevel/CatalogueConfigStorage.vala

@@ -0,0 +1,134 @@
+/**
+ * CatalogueConfigStorage - Low-level storage for catalogue configuration
+ *
+ * Handles the 'catcfg:' prefix for storing catalogue configuration.
+ *
+ * Key format: catcfg:<path>
+ * Value: Serialized (string type_label, string expression)
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Configuration data for a Catalogue entity.
+ */
+public class CatalogueConfig : Object {
+    /**
+     * The type label for documents to catalogue.
+     */
+    public string type_label { get; set; }
+    
+    /**
+     * The expression used to extract the grouping key.
+     */
+    public string expression { get; set; }
+    
+    /**
+     * Creates a new CatalogueConfig.
+     *
+     * @param type_label The type label for documents
+     * @param expression The expression to extract the grouping key
+     */
+    public CatalogueConfig(string type_label, string expression) {
+        this.type_label = type_label;
+        this.expression = expression;
+    }
+}
+
+/**
+ * Low-level storage for catalogue configuration.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * catalogue configuration using the 'catcfg:' key prefix.
+ */
+public class CatalogueConfigStorage : Object {
+    
+    /**
+     * Key prefix for catalogue config entries.
+     */
+    private const string PREFIX = "catcfg:";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new CatalogueConfigStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public CatalogueConfigStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    /**
+     * Stores catalogue configuration.
+     *
+     * @param path The catalogue path
+     * @param type_label The type label for documents to catalogue
+     * @param expression The expression to extract the grouping key
+     * @throws StorageError if the operation fails
+     */
+    public void store(Core.EntityPath path, string type_label, string expression) throws StorageError {
+        string key = PREFIX + path.to_string();
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<string>(type_label));
+        writer.write_element(new Invercargill.NativeElement<string>(expression));
+        
+        _dbm.set(key, writer.to_binary_data());
+    }
+    
+    /**
+     * Loads catalogue configuration.
+     *
+     * @param path The catalogue path
+     * @return The configuration, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public CatalogueConfig? load(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var label_element = reader.read_element();
+            var expr_element = reader.read_element();
+            
+            if (label_element.is_null() || expr_element.is_null()) {
+                return null;
+            }
+            
+            string type_label = label_element.as<string>();
+            string expression = expr_element.as<string>();
+            
+            return new CatalogueConfig(type_label, expression);
+        } catch (Invercargill.ElementError e) {
+            throw new StorageError.CORRUPT_DATA("Failed to read catalogue config: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Deletes catalogue configuration.
+     *
+     * @param path The catalogue path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        try {
+            _dbm.delete(key);
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 409 - 0
src/Storage/LowLevel/CatalogueIndexStorage.vala

@@ -0,0 +1,409 @@
+/**
+ * CatalogueIndexStorage - Low-level storage for catalogue group index
+ *
+ * Handles the 'catl:' prefix for storing catalogue group mappings.
+ *
+ * Key formats:
+ * - catl:<catalogue_path>:keys - List of group keys
+ * - catl:<catalogue_path>:group:<key> - Document paths in group
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Low-level storage for catalogue group index.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * catalogue group mappings using the 'catl:' key prefix.
+ */
+public class CatalogueIndexStorage : Object {
+    
+    /**
+     * Key prefix for catalogue index entries.
+     */
+    private const string PREFIX = "catl:";
+    
+    /**
+     * Suffix for catalogue group key.
+     */
+    private const string GROUP_SUFFIX = ":group:";
+    
+    /**
+     * Suffix for catalogue keys list.
+     */
+    private const string KEYS_SUFFIX = ":keys";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new CatalogueIndexStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public CatalogueIndexStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    /**
+     * Adds a document path to a catalogue group.
+     *
+     * Uses HashSet for O(1) membership checking.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @param key The group key (e.g., "john" for author grouping)
+     * @param doc_path The document path to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_to_group(string catalogue_path, string key, string doc_path) throws StorageError {
+        string group_key = group_members_key(catalogue_path, key);
+        var members = load_string_set(group_key);
+        
+        // Use HashSet for O(1) membership check
+        var members_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var m in members) members_hash.add(m);
+        
+        if (!members_hash.has(doc_path)) {
+            members.add(doc_path);
+            save_string_set(group_key, members);
+        }
+        
+        // Also add the key to the keys list
+        add_catalogue_key(catalogue_path, key);
+    }
+    
+    /**
+     * Removes a document path from a catalogue group.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @param key The group key
+     * @param doc_path The document path to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_from_group(string catalogue_path, string key, string doc_path) throws StorageError {
+        string group_key = group_members_key(catalogue_path, key);
+        var members = load_string_set(group_key);
+        
+        // Use HashSet for O(1) membership check
+        var members_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var m in members) members_hash.add(m);
+        
+        if (members_hash.has(doc_path)) {
+            // Rebuild without the removed item
+            var new_members = new Invercargill.DataStructures.Vector<string>();
+            foreach (var m in members) {
+                if (m != doc_path) {
+                    new_members.add(m);
+                }
+            }
+            save_string_set(group_key, new_members);
+        }
+    }
+    
+    /**
+     * Adds multiple document paths to a catalogue group.
+     *
+     * Uses HashSet for O(1) membership checking and change tracking.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @param key The group key
+     * @param doc_paths The document paths to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_to_group_batch(string catalogue_path, string key, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        string group_key = group_members_key(catalogue_path, key);
+        var members = load_string_set(group_key);
+        bool changed = false;
+        
+        // Use HashSet for O(1) membership checks
+        var members_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var m in members) members_hash.add(m);
+        
+        foreach (var doc_path in doc_paths) {
+            if (!members_hash.has(doc_path)) {
+                members_hash.add(doc_path);
+                members.add(doc_path);
+                changed = true;
+            }
+        }
+        
+        if (changed) {
+            save_string_set(group_key, members);
+        }
+        
+        // Also add the key to the keys list if we added anything
+        if (changed) {
+            add_catalogue_key(catalogue_path, key);
+        }
+    }
+    
+    /**
+     * Removes multiple document paths from a catalogue group.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @param key The group key
+     * @param doc_paths The document paths to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_from_group_batch(string catalogue_path, string key, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        string group_key = group_members_key(catalogue_path, key);
+        var members = load_string_set(group_key);
+        
+        // Build HashSet of items to remove for O(1) lookup
+        var to_remove_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc_path in doc_paths) to_remove_hash.add(doc_path);
+        
+        // Check if any items match
+        bool changed = false;
+        foreach (var m in members) {
+            if (to_remove_hash.has(m)) {
+                changed = true;
+                break;
+            }
+        }
+        
+        if (changed) {
+            // Rebuild without removed items
+            var new_members = new Invercargill.DataStructures.Vector<string>();
+            foreach (var m in members) {
+                if (!to_remove_hash.has(m)) {
+                    new_members.add(m);
+                }
+            }
+            save_string_set(group_key, new_members);
+        }
+    }
+    
+    /**
+     * Sets the complete member set for a catalogue group.
+     *
+     * This replaces any existing members with the provided set.
+     * Uses HashSet for O(1) deduplication while preserving order.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @param key The group key
+     * @param doc_paths The complete set of member paths
+     * @throws StorageError if the operation fails
+     */
+    public void set_group_members(string catalogue_path, string key, Invercargill.Enumerable<string> doc_paths) throws StorageError {
+        string group_key = group_members_key(catalogue_path, key);
+        var set = new Invercargill.DataStructures.Vector<string>();
+        var hash_set = new Invercargill.DataStructures.HashSet<string>();
+        
+        // Use HashSet for O(1) deduplication while preserving insertion order
+        foreach (var member in doc_paths) {
+            if (!hash_set.has(member)) {
+                hash_set.add(member);
+                set.add(member);
+            }
+        }
+        save_string_set(group_key, set);
+        
+        // Also add the key to the keys list
+        if (set.count() > 0) {
+            add_catalogue_key(catalogue_path, key);
+        }
+    }
+    
+    /**
+     * Gets all member document paths for a catalogue group.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @param key The group key
+     * @return An enumerable of document paths (may be empty)
+     */
+    public Invercargill.Enumerable<string> get_group_members(string catalogue_path, string key) {
+        string group_key = group_members_key(catalogue_path, key);
+        var members = load_string_set(group_key);
+        return members.as_enumerable();
+    }
+    
+    /**
+     * Gets all keys for a catalogue.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @return An enumerable of keys (may be empty)
+     */
+    public Invercargill.Enumerable<string> get_keys(string catalogue_path) {
+        string key = catalogue_keys_key(catalogue_path);
+        var keys = load_string_set(key);
+        return keys.as_enumerable();
+    }
+    
+    /**
+     * Adds a key to a catalogue's key set.
+     *
+     * Uses HashSet for O(1) membership checking.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @param key The key to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_catalogue_key(string catalogue_path, string key) throws StorageError {
+        string keys_key = catalogue_keys_key(catalogue_path);
+        var keys = load_string_set(keys_key);
+        
+        // Use HashSet for O(1) membership check
+        var keys_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var k in keys) keys_hash.add(k);
+        
+        if (!keys_hash.has(key)) {
+            keys.add(key);
+            save_string_set(keys_key, keys);
+        }
+    }
+    
+    /**
+     * Removes a key from a catalogue's key set.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @param key The key to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_catalogue_key(string catalogue_path, string key) throws StorageError {
+        string keys_key = catalogue_keys_key(catalogue_path);
+        var keys = load_string_set(keys_key);
+        
+        // Use HashSet for O(1) membership check
+        var keys_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var k in keys) keys_hash.add(k);
+        
+        if (keys_hash.has(key)) {
+            // Rebuild without the removed key
+            var new_keys = new Invercargill.DataStructures.Vector<string>();
+            foreach (var k in keys) {
+                if (k != key) {
+                    new_keys.add(k);
+                }
+            }
+            save_string_set(keys_key, new_keys);
+        }
+    }
+    
+    /**
+     * Clears all index data for a catalogue.
+     *
+     * This removes all group members and the keys list.
+     *
+     * @param catalogue_path The catalogue's path string
+     * @throws StorageError if the operation fails
+     */
+    public void clear(string catalogue_path) throws StorageError {
+        // Remove all keys in the keys list
+        var keys = load_string_set(catalogue_keys_key(catalogue_path));
+        foreach (var key in keys) {
+            try {
+                _dbm.delete(group_members_key(catalogue_path, key));
+            } catch (StorageError e) {
+                // Key doesn't exist, continue
+            }
+        }
+        
+        // Remove the keys list itself
+        try {
+            _dbm.delete(catalogue_keys_key(catalogue_path));
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+    
+    /**
+     * Creates a catalogue group key.
+     */
+    private string group_members_key(string catalogue_path, string group_key) {
+        return PREFIX + catalogue_path + GROUP_SUFFIX + group_key;
+    }
+    
+    /**
+     * Creates a catalogue keys list key.
+     */
+    private string catalogue_keys_key(string catalogue_path) {
+        return PREFIX + catalogue_path + KEYS_SUFFIX;
+    }
+    
+    /**
+     * Loads a set of strings from a key.
+     *
+     * Uses HashSet for O(1) deduplication during deserialization.
+     *
+     * @param key The storage key
+     * @return A vector of strings (empty if key doesn't exist)
+     */
+    private Invercargill.DataStructures.Vector<string> load_string_set(string key) {
+        var result = new Invercargill.DataStructures.Vector<string>();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return result;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return result;
+            }
+            
+            // The set is stored as an array of strings
+            var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+            
+            // Use HashSet for O(1) deduplication while preserving order
+            var hash_set = new Invercargill.DataStructures.HashSet<string>();
+            foreach (var item in array) {
+                if (!item.is_null()) {
+                    string value = item.as<string>();
+                    if (!hash_set.has(value)) {
+                        hash_set.add(value);
+                        result.add(value);
+                    }
+                }
+            }
+        } catch (Invercargill.ElementError e) {
+            warning("Failed to read string set at %s: %s", key, e.message);
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Saves a set of strings to a key.
+     *
+     * @param key The storage key
+     * @param set The set of strings to save
+     * @throws StorageError if the operation fails
+     */
+    private void save_string_set(string key, Invercargill.DataStructures.Vector<string> set) throws StorageError {
+        uint count = set.count();
+        if (count == 0) {
+            try {
+                _dbm.delete(key);
+            } catch (StorageError e) {
+                // Key doesn't exist, that's fine
+            }
+            return;
+        }
+        
+        // Store as array of strings
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        foreach (var value in set) {
+            var element = new Invercargill.NativeElement<string>(value);
+            array.add(element);
+        }
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        _dbm.set(key, writer.to_binary_data());
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 134 - 0
src/Storage/LowLevel/CategoryConfigStorage.vala

@@ -0,0 +1,134 @@
+/**
+ * CategoryConfigStorage - Low-level storage for category/index configuration
+ *
+ * Handles the 'config:' prefix for storing category and index configuration.
+ *
+ * Key format: config:<path>
+ * Value: Serialized (string type_label, string expression)
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Configuration data for a Category or Index entity.
+ */
+public class CategoryConfig : Object {
+    /**
+     * The type label for entities in this category.
+     */
+    public string type_label { get; set; }
+    
+    /**
+     * The expression used to generate the index.
+     */
+    public string expression { get; set; }
+    
+    /**
+     * Creates a new CategoryConfig.
+     *
+     * @param type_label The type label for entities
+     * @param expression The index expression
+     */
+    public CategoryConfig(string type_label, string expression) {
+        this.type_label = type_label;
+        this.expression = expression;
+    }
+}
+
+/**
+ * Low-level storage for category/index configuration.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * category and index configuration using the 'config:' key prefix.
+ */
+public class CategoryConfigStorage : Object {
+    
+    /**
+     * Key prefix for category config entries.
+     */
+    private const string PREFIX = "config:";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new CategoryConfigStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public CategoryConfigStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    /**
+     * Stores category configuration.
+     *
+     * @param path The category path
+     * @param type_label The type label for entities
+     * @param expression The index expression
+     * @throws StorageError if the operation fails
+     */
+    public void store(Core.EntityPath path, string type_label, string expression) throws StorageError {
+        string key = PREFIX + path.to_string();
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<string>(type_label));
+        writer.write_element(new Invercargill.NativeElement<string>(expression));
+        
+        _dbm.set(key, writer.to_binary_data());
+    }
+    
+    /**
+     * Loads category configuration.
+     *
+     * @param path The category path
+     * @return The configuration, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public CategoryConfig? load(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var label_element = reader.read_element();
+            var expr_element = reader.read_element();
+            
+            if (label_element.is_null() || expr_element.is_null()) {
+                return null;
+            }
+            
+            string type_label = label_element.as<string>();
+            string expression = expr_element.as<string>();
+            
+            return new CategoryConfig(type_label, expression);
+        } catch (Invercargill.ElementError e) {
+            throw new StorageError.CORRUPT_DATA("Failed to read category config: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Deletes category configuration.
+     *
+     * @param path The category path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        try {
+            _dbm.delete(key);
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 320 - 0
src/Storage/LowLevel/CategoryIndexStorage.vala

@@ -0,0 +1,320 @@
+/**
+ * CategoryIndexStorage - Low-level storage for category member index
+ *
+ * Handles the 'cat:' prefix for storing category member paths.
+ *
+ * Key format: cat:<category_path>:members
+ * Value: Serialized array of document paths
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Low-level storage for category member index.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * category member paths using the 'cat:' key prefix.
+ */
+public class CategoryIndexStorage : Object {
+    
+    /**
+     * Key prefix for category index entries.
+     */
+    private const string PREFIX = "cat:";
+    
+    /**
+     * Suffix for category members key.
+     */
+    private const string MEMBERS_SUFFIX = ":members";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new CategoryIndexStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public CategoryIndexStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    /**
+     * Adds a document path to a category's member set.
+     *
+     * Uses HashSet for O(1) membership checking.
+     *
+     * @param category_path The category's path string
+     * @param doc_path The document path to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_member(string category_path, string doc_path) throws StorageError {
+        string key = members_key(category_path);
+        var members = load_string_set(key);
+        
+        // Use HashSet for O(1) membership check
+        var members_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var m in members) members_hash.add(m);
+        
+        if (!members_hash.has(doc_path)) {
+            members.add(doc_path);
+            save_string_set(key, members);
+        }
+    }
+    
+    /**
+     * Removes a document path from a category's member set.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param category_path The category's path string
+     * @param doc_path The document path to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_member(string category_path, string doc_path) throws StorageError {
+        string key = members_key(category_path);
+        var members = load_string_set(key);
+        
+        // Use HashSet for O(1) membership check
+        var members_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var m in members) members_hash.add(m);
+        
+        if (members_hash.has(doc_path)) {
+            // Rebuild without the removed item
+            var new_members = new Invercargill.DataStructures.Vector<string>();
+            foreach (var m in members) {
+                if (m != doc_path) {
+                    new_members.add(m);
+                }
+            }
+            save_string_set(key, new_members);
+        }
+    }
+    
+    /**
+     * Adds multiple document paths to a category's member set.
+     *
+     * Uses HashSet for O(1) membership checking and change tracking.
+     *
+     * @param category_path The category's path string
+     * @param doc_paths The document paths to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_members(string category_path, Invercargill.Enumerable<string> doc_paths) throws StorageError {
+        string key = members_key(category_path);
+        var members = load_string_set(key);
+        bool changed = false;
+        
+        // Use HashSet for O(1) membership checks
+        var members_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var m in members) members_hash.add(m);
+        
+        foreach (var doc_path in doc_paths) {
+            if (!members_hash.has(doc_path)) {
+                members_hash.add(doc_path);
+                members.add(doc_path);
+                changed = true;
+            }
+        }
+        
+        if (changed) {
+            save_string_set(key, members);
+        }
+    }
+    
+    /**
+     * Removes multiple document paths from a category's member set.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param category_path The category's path string
+     * @param doc_paths The document paths to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_members(string category_path, Invercargill.Enumerable<string> doc_paths) throws StorageError {
+        string key = members_key(category_path);
+        var members = load_string_set(key);
+        
+        // Build HashSet of items to remove for O(1) lookup
+        var to_remove_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc_path in doc_paths) to_remove_hash.add(doc_path);
+        
+        // Check if any items match
+        bool changed = false;
+        foreach (var m in members) {
+            if (to_remove_hash.has(m)) {
+                changed = true;
+                break;
+            }
+        }
+        
+        if (changed) {
+            // Rebuild without removed items
+            var new_members = new Invercargill.DataStructures.Vector<string>();
+            foreach (var m in members) {
+                if (!to_remove_hash.has(m)) {
+                    new_members.add(m);
+                }
+            }
+            save_string_set(key, new_members);
+        }
+    }
+    
+    /**
+     * Sets the complete member set for a category.
+     *
+     * This replaces any existing members with the provided set.
+     * Uses HashSet for O(1) deduplication while preserving order.
+     *
+     * @param category_path The category's path string
+     * @param doc_paths The complete set of member paths
+     * @throws StorageError if the operation fails
+     */
+    public void set_members(string category_path, Invercargill.Enumerable<string> doc_paths) throws StorageError {
+        string key = members_key(category_path);
+        var set = new Invercargill.DataStructures.Vector<string>();
+        var hash_set = new Invercargill.DataStructures.HashSet<string>();
+        
+        // Use HashSet for O(1) deduplication while preserving insertion order
+        foreach (var member in doc_paths) {
+            if (!hash_set.has(member)) {
+                hash_set.add(member);
+                set.add(member);
+            }
+        }
+        save_string_set(key, set);
+    }
+    
+    /**
+     * Gets all member document paths for a category.
+     *
+     * @param category_path The category's path string
+     * @return An enumerable of document paths (may be empty)
+     */
+    public Invercargill.Enumerable<string> get_members(string category_path) {
+        string key = members_key(category_path);
+        var members = load_string_set(key);
+        return members.as_enumerable();
+    }
+    
+    /**
+     * Checks if a document is a member of a category.
+     *
+     * Uses HashSet for O(1) membership check.
+     *
+     * @param category_path The category's path string
+     * @param doc_path The document path to check
+     * @return true if the document is a member
+     */
+    public bool has_member(string category_path, string doc_path) {
+        string key = members_key(category_path);
+        var members = load_string_set(key);
+        
+        var members_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var m in members) members_hash.add(m);
+        
+        return members_hash.has(doc_path);
+    }
+    
+    /**
+     * Clears all index data for a category.
+     *
+     * @param category_path The category's path string
+     * @throws StorageError if the operation fails
+     */
+    public void clear(string category_path) throws StorageError {
+        string key = members_key(category_path);
+        try {
+            _dbm.delete(key);
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+    
+    /**
+     * Creates a category members key.
+     */
+    private string members_key(string category_path) {
+        return PREFIX + category_path + MEMBERS_SUFFIX;
+    }
+    
+    /**
+     * Loads a set of strings from a key.
+     *
+     * Uses HashSet for O(1) deduplication during deserialization.
+     *
+     * @param key The storage key
+     * @return A vector of strings (empty if key doesn't exist)
+     */
+    private Invercargill.DataStructures.Vector<string> load_string_set(string key) {
+        var result = new Invercargill.DataStructures.Vector<string>();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return result;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return result;
+            }
+            
+            // The set is stored as an array of strings
+            var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+            
+            // Use HashSet for O(1) deduplication while preserving order
+            var hash_set = new Invercargill.DataStructures.HashSet<string>();
+            foreach (var item in array) {
+                if (!item.is_null()) {
+                    string value = item.as<string>();
+                    if (!hash_set.has(value)) {
+                        hash_set.add(value);
+                        result.add(value);
+                    }
+                }
+            }
+        } catch (Invercargill.ElementError e) {
+            warning("Failed to read string set at %s: %s", key, e.message);
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Saves a set of strings to a key.
+     *
+     * @param key The storage key
+     * @param set The set of strings to save
+     * @throws StorageError if the operation fails
+     */
+    private void save_string_set(string key, Invercargill.DataStructures.Vector<string> set) throws StorageError {
+        uint count = set.count();
+        if (count == 0) {
+            try {
+                _dbm.delete(key);
+            } catch (StorageError e) {
+                // Key doesn't exist, that's fine
+            }
+            return;
+        }
+        
+        // Store as array of strings
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        foreach (var value in set) {
+            var element = new Invercargill.NativeElement<string>(value);
+            array.add(element);
+        }
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        _dbm.set(key, writer.to_binary_data());
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 181 - 0
src/Storage/LowLevel/ChildrenStorage.vala

@@ -0,0 +1,181 @@
+/**
+ * ChildrenStorage - Low-level storage for structural children
+ *
+ * Handles the 'children:' prefix for storing container child names.
+ *
+ * Key format: children:<parent_path>
+ * Value: Serialized array of child names
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Low-level storage for structural children.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * structural child names using the 'children:' key prefix.
+ */
+public class ChildrenStorage : Object {
+    
+    /**
+     * Key prefix for children entries.
+     */
+    private const string PREFIX = "children:";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new ChildrenStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public ChildrenStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    /**
+     * Adds a child name to a parent's children set.
+     *
+     * @param parent The parent entity path
+     * @param child_name The name of the child to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_child(Core.EntityPath parent, string child_name) throws StorageError {
+        var children = load_children_set(parent);
+        if (!children.contains(child_name)) {
+            children.add(child_name);
+            save_children_set(parent, children);
+        }
+    }
+    
+    /**
+     * Removes a child name from a parent's children set.
+     *
+     * @param parent The parent entity path
+     * @param child_name The name of the child to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_child(Core.EntityPath parent, string child_name) throws StorageError {
+        var children = load_children_set(parent);
+        if (children.contains(child_name)) {
+            children.remove(child_name);
+            save_children_set(parent, children);
+        }
+    }
+    
+    /**
+     * Checks if a parent has a specific child.
+     *
+     * @param parent The parent entity path
+     * @param child_name The name of the child to check
+     * @return True if the child exists
+     */
+    public bool has_child(Core.EntityPath parent, string child_name) {
+        var children = load_children_set(parent);
+        return children.contains(child_name);
+    }
+    
+    /**
+     * Gets all child names for a parent.
+     *
+     * @param parent The parent entity path
+     * @return An enumerable of child names
+     */
+    public Invercargill.Enumerable<string> get_children(Core.EntityPath parent) {
+        var children = load_children_set(parent);
+        return children.as_enumerable();
+    }
+    
+    /**
+     * Deletes all children for a parent.
+     *
+     * @param parent The parent entity path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath parent) throws StorageError {
+        string key = PREFIX + parent.to_string();
+        try {
+            _dbm.delete(key);
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+    
+    /**
+     * Loads the children set for a parent.
+     *
+     * @param parent The parent entity path
+     * @return A vector of child names (empty if not found)
+     */
+    private Invercargill.DataStructures.Vector<string> load_children_set(Core.EntityPath parent) {
+        string key = PREFIX + parent.to_string();
+        var result = new Invercargill.DataStructures.Vector<string>();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return result;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return result;
+            }
+            
+            // The children set is stored as an array of strings
+            var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+            foreach (var child_element in array) {
+                if (!child_element.is_null()) {
+                    string child_name = child_element.as<string>();
+                    if (!result.contains(child_name)) {
+                        result.add(child_name);
+                    }
+                }
+            }
+        } catch (Invercargill.ElementError e) {
+            warning("Failed to read children set: %s", e.message);
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Saves the children set for a parent.
+     *
+     * @param parent The parent entity path
+     * @param children The set of child names to save
+     * @throws StorageError if the operation fails
+     */
+    private void save_children_set(Core.EntityPath parent, Invercargill.DataStructures.Vector<string> children) throws StorageError {
+        string key = PREFIX + parent.to_string();
+        
+        uint count = children.count();
+        if (count == 0) {
+            try {
+                _dbm.delete(key);
+            } catch (StorageError e) {
+                // Key doesn't exist, that's fine
+            }
+            return;
+        }
+        
+        // Store as array of strings
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        foreach (var child_name in children) {
+            var element = new Invercargill.NativeElement<string>(child_name);
+            array.add(element);
+        }
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        _dbm.set(key, writer.to_binary_data());
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 143 - 0
src/Storage/LowLevel/EntityMetadataStorage.vala

@@ -0,0 +1,143 @@
+/**
+ * EntityMetadataStorage - Low-level storage for entity metadata
+ *
+ * Handles the 'entity:' prefix for storing entity type and type_label.
+ *
+ * Key format: entity:<path>
+ * Value: Serialized (EntityType type, string? type_label)
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Low-level storage for entity metadata.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * entity metadata (type and type_label) using the 'entity:' key prefix.
+ */
+public class EntityMetadataStorage : Object {
+    
+    /**
+     * Key prefix for entity metadata entries.
+     */
+    private const string PREFIX = "entity:";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new EntityMetadataStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public EntityMetadataStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    /**
+     * Stores entity metadata.
+     *
+     * @param path The entity path
+     * @param type The entity type
+     * @param type_label Optional type label for documents
+     * @throws StorageError if the operation fails
+     */
+    public void store_metadata(Core.EntityPath path, Core.EntityType type, string? type_label = null) throws StorageError {
+        string key = PREFIX + path.to_string();
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<int64?>((int64) type));
+        writer.write_element(new Invercargill.NativeElement<string>(type_label ?? ""));
+        
+        _dbm.set(key, writer.to_binary_data());
+    }
+    
+    /**
+     * Gets the entity type for a path.
+     *
+     * @param path The entity path
+     * @return The entity type, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public Core.EntityType? get_entity_type(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return null;
+            }
+            int64? type_val = element.as<int64?>();
+            return (Core.EntityType) (type_val == null ? 0 : (!) type_val);
+        } catch (Invercargill.ElementError e) {
+            throw new StorageError.CORRUPT_DATA("Failed to read entity type: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Gets the entity type label for a path.
+     *
+     * @param path The entity path
+     * @return The type label, or null if not found or empty
+     * @throws StorageError if the data is corrupt
+     */
+    public string? get_type_label(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            reader.read_element(); // Skip type
+            var label_element = reader.read_element();
+            if (label_element.is_null()) {
+                return null;
+            }
+            string label = label_element.as<string>();
+            return label == "" ? null : label;
+        } catch (Invercargill.ElementError e) {
+            throw new StorageError.CORRUPT_DATA("Failed to read entity type label: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Checks if an entity exists at the given path.
+     *
+     * @param path The entity path
+     * @return True if the entity exists
+     */
+    public bool exists(Core.EntityPath path) {
+        string key = PREFIX + path.to_string();
+        return _dbm.has_key(key);
+    }
+    
+    /**
+     * Deletes entity metadata.
+     *
+     * @param path The entity path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        try {
+            _dbm.delete(key);
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 100 - 0
src/Storage/LowLevel/PropertiesStorage.vala

@@ -0,0 +1,100 @@
+/**
+ * PropertiesStorage - Low-level storage for document properties
+ *
+ * Handles the 'props:' prefix for storing document properties.
+ *
+ * Key format: props:<path>
+ * Value: Serialized Properties dictionary
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Low-level storage for document properties.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * document properties using the 'props:' key prefix.
+ */
+public class PropertiesStorage : Object {
+    
+    /**
+     * Key prefix for properties entries.
+     */
+    private const string PREFIX = "props:";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new PropertiesStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public PropertiesStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    /**
+     * Stores document properties.
+     *
+     * @param path The entity path
+     * @param properties The properties to store
+     * @throws StorageError if the operation fails
+     */
+    public void store(Core.EntityPath path, Invercargill.Properties properties) throws StorageError {
+        string key = PREFIX + path.to_string();
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Properties>(properties));
+        
+        _dbm.set(key, writer.to_binary_data());
+    }
+    
+    /**
+     * Loads document properties.
+     *
+     * @param path The entity path
+     * @return The properties, or null if not found
+     * @throws StorageError if the data is corrupt
+     */
+    public Invercargill.Properties? load(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return null;
+            }
+            return element.as<Invercargill.Properties>();
+        } catch (Invercargill.ElementError e) {
+            throw new StorageError.CORRUPT_DATA("Failed to read properties: %s".printf(e.message));
+        }
+    }
+    
+    /**
+     * Deletes document properties.
+     *
+     * @param path The entity path
+     * @throws StorageError if the operation fails
+     */
+    public void delete(Core.EntityPath path) throws StorageError {
+        string key = PREFIX + path.to_string();
+        try {
+            _dbm.delete(key);
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 587 - 0
src/Storage/LowLevel/TextIndexStorage.vala

@@ -0,0 +1,587 @@
+/**
+ * TextIndexStorage - Low-level storage for text search n-gram indices
+ *
+ * Handles the 'idx:' prefix for storing n-gram indices.
+ *
+ * Key formats:
+ * - idx:<index_path>:tri:<trigram> - Document paths containing trigram
+ * - idx:<index_path>:bi:<bigram> - Trigrams containing bigram
+ * - idx:<index_path>:uni:<unigram> - Bigrams starting with unigram
+ * - idx:<index_path>:doc:<doc_path> - Cached document content
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Low-level storage for text search n-gram indices.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * n-gram indices using the 'idx:' key prefix.
+ */
+public class TextIndexStorage : Object {
+    
+    /**
+     * Key prefix for text index entries.
+     */
+    private const string PREFIX = "idx:";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new TextIndexStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public TextIndexStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    // === Trigram Index Operations ===
+    
+    /**
+     * Adds a document path to the trigram index.
+     *
+     * Uses HashSet for O(1) membership checking.
+     *
+     * @param index_path The index entity's path string
+     * @param trigram The 3-character trigram
+     * @param doc_path The document path to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_trigram(string index_path, string trigram, string doc_path) throws StorageError {
+        string key = ngram_key(index_path, "tri", trigram);
+        var paths = load_string_set(key);
+        
+        // Use HashSet for O(1) membership check
+        var paths_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var p in paths) paths_hash.add(p);
+        
+        if (!paths_hash.has(doc_path)) {
+            paths.add(doc_path);
+            save_string_set(key, paths);
+        }
+    }
+    
+    /**
+     * Removes a document path from the trigram index.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param index_path The index entity's path string
+     * @param trigram The 3-character trigram
+     * @param doc_path The document path to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_trigram(string index_path, string trigram, string doc_path) throws StorageError {
+        string key = ngram_key(index_path, "tri", trigram);
+        var paths = load_string_set(key);
+        
+        // Use HashSet for O(1) membership check
+        var paths_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var p in paths) paths_hash.add(p);
+        
+        if (paths_hash.has(doc_path)) {
+            // Rebuild without the removed item
+            var new_paths = new Invercargill.DataStructures.Vector<string>();
+            foreach (var p in paths) {
+                if (p != doc_path) {
+                    new_paths.add(p);
+                }
+            }
+            save_string_set(key, new_paths);
+        }
+    }
+    
+    /**
+     * Adds multiple document paths to a trigram index entry.
+     *
+     * Uses HashSet for O(1) membership checking and change tracking.
+     *
+     * @param index_path The index entity's path string
+     * @param trigram The 3-character trigram
+     * @param doc_paths The document paths to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_trigram_batch(string index_path, string trigram, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        string key = ngram_key(index_path, "tri", trigram);
+        var existing = load_string_set(key);
+        bool changed = false;
+        
+        // Use HashSet for O(1) membership checks
+        var existing_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var ex in existing) existing_hash.add(ex);
+        
+        foreach (var doc_path in doc_paths) {
+            if (!existing_hash.has(doc_path)) {
+                existing_hash.add(doc_path);
+                existing.add(doc_path);
+                changed = true;
+            }
+        }
+        
+        if (changed) {
+            save_string_set(key, existing);
+        }
+    }
+    
+    /**
+     * Removes multiple document paths from a trigram index entry.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param index_path The index entity's path string
+     * @param trigram The 3-character trigram
+     * @param doc_paths The document paths to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_trigram_batch(string index_path, string trigram, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        string key = ngram_key(index_path, "tri", trigram);
+        var existing = load_string_set(key);
+        
+        // Build HashSet of items to remove for O(1) lookup
+        var to_remove_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc_path in doc_paths) to_remove_hash.add(doc_path);
+        
+        // Check if any items match
+        bool changed = false;
+        foreach (var ex in existing) {
+            if (to_remove_hash.has(ex)) {
+                changed = true;
+                break;
+            }
+        }
+        
+        if (changed) {
+            // Rebuild without removed items
+            var new_existing = new Invercargill.DataStructures.Vector<string>();
+            foreach (var ex in existing) {
+                if (!to_remove_hash.has(ex)) {
+                    new_existing.add(ex);
+                }
+            }
+            save_string_set(key, new_existing);
+        }
+    }
+    
+    /**
+     * Gets all document paths for a trigram.
+     *
+     * @param index_path The index entity's path string
+     * @param trigram The 3-character trigram
+     * @return An enumerable of document paths
+     */
+    public Invercargill.Enumerable<string> get_documents_for_trigram(string index_path, string trigram) {
+        string key = ngram_key(index_path, "tri", trigram);
+        var paths = load_string_set(key);
+        return paths.as_enumerable();
+    }
+    
+    // === Bigram Reverse Index Operations ===
+    
+    /**
+     * Adds a bigram to trigram mapping for reverse lookup.
+     *
+     * Uses HashSet for O(1) membership checking.
+     *
+     * @param index_path The index entity's path string
+     * @param bigram The 2-character bigram
+     * @param trigram The trigram that contains the bigram
+     * @throws StorageError if the operation fails
+     */
+    public void add_bigram_mapping(string index_path, string bigram, string trigram) throws StorageError {
+        string key = ngram_key(index_path, "bi", bigram);
+        var trigrams = load_string_set(key);
+        
+        // Use HashSet for O(1) membership check
+        var trigrams_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var t in trigrams) trigrams_hash.add(t);
+        
+        if (!trigrams_hash.has(trigram)) {
+            trigrams.add(trigram);
+            save_string_set(key, trigrams);
+        }
+    }
+    
+    /**
+     * Adds multiple bigram to trigram mappings in batch.
+     *
+     * Uses Dictionary for efficient batch processing.
+     *
+     * @param index_path The index entity's path string
+     * @param additions Dictionary mapping bigrams to vectors of trigrams
+     * @throws StorageError if the operation fails
+     */
+    public void add_bigram_mappings_batch(string index_path, Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> additions) throws StorageError {
+        foreach (var bigram in additions.keys) {
+            Invercargill.DataStructures.Vector<string> trigrams;
+            if (additions.try_get(bigram, out trigrams)) {
+                add_ngram_batch(index_path, "bi", bigram, trigrams);
+            }
+        }
+    }
+    
+    /**
+     * Gets all trigrams for a bigram (reverse lookup).
+     *
+     * @param index_path The index entity's path string
+     * @param bigram The 2-character bigram
+     * @return An enumerable of trigrams
+     */
+    public Invercargill.Enumerable<string> get_trigrams_for_bigram(string index_path, string bigram) {
+        string key = ngram_key(index_path, "bi", bigram);
+        var trigrams = load_string_set(key);
+        return trigrams.as_enumerable();
+    }
+    
+    // === Unigram Reverse Index Operations ===
+    
+    /**
+     * Adds a unigram to bigram mapping for reverse lookup.
+     *
+     * Uses HashSet for O(1) membership checking.
+     *
+     * @param index_path The index entity's path string
+     * @param unigram The single character
+     * @param bigram The bigram that starts with the unigram
+     * @throws StorageError if the operation fails
+     */
+    public void add_unigram_mapping(string index_path, string unigram, string bigram) throws StorageError {
+        string key = ngram_key(index_path, "uni", unigram);
+        var bigrams = load_string_set(key);
+        
+        // Use HashSet for O(1) membership check
+        var bigrams_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var b in bigrams) bigrams_hash.add(b);
+        
+        if (!bigrams_hash.has(bigram)) {
+            bigrams.add(bigram);
+            save_string_set(key, bigrams);
+        }
+    }
+    
+    /**
+     * Adds multiple unigram to bigram mappings in batch.
+     *
+     * Uses Dictionary for efficient batch processing.
+     *
+     * @param index_path The index entity's path string
+     * @param additions Dictionary mapping unigrams to vectors of bigrams
+     * @throws StorageError if the operation fails
+     */
+    public void add_unigram_mappings_batch(string index_path, Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> additions) throws StorageError {
+        foreach (var unigram in additions.keys) {
+            Invercargill.DataStructures.Vector<string> bigrams;
+            if (additions.try_get(unigram, out bigrams)) {
+                add_ngram_batch(index_path, "uni", unigram, bigrams);
+            }
+        }
+    }
+    
+    /**
+     * Gets all bigrams for a unigram (reverse lookup).
+     *
+     * @param index_path The index entity's path string
+     * @param unigram The single character
+     * @return An enumerable of bigrams
+     */
+    public Invercargill.Enumerable<string> get_bigrams_for_unigram(string index_path, string unigram) {
+        string key = ngram_key(index_path, "uni", unigram);
+        var bigrams = load_string_set(key);
+        return bigrams.as_enumerable();
+    }
+    
+    // === Generic N-gram Operations ===
+    
+    /**
+     * Adds multiple values to an n-gram index entry.
+     *
+     * Uses HashSet for O(1) membership checking and change tracking.
+     *
+     * @param index_path The index entity's path string
+     * @param ngram_type The n-gram type (tri, bi, uni)
+     * @param ngram The n-gram value
+     * @param values The values to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_ngram_batch(string index_path, string ngram_type, string ngram, Invercargill.DataStructures.Vector<string> values) throws StorageError {
+        string key = ngram_key(index_path, ngram_type, ngram);
+        var existing = load_string_set(key);
+        bool changed = false;
+        
+        // Use HashSet for O(1) membership checks
+        var existing_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var ex in existing) existing_hash.add(ex);
+        
+        foreach (var val in values) {
+            if (!existing_hash.has(val)) {
+                existing_hash.add(val);
+                existing.add(val);
+                changed = true;
+            }
+        }
+        
+        if (changed) {
+            save_string_set(key, existing);
+        }
+    }
+    
+    /**
+     * Removes multiple values from an n-gram index entry.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param index_path The index entity's path string
+     * @param ngram_type The n-gram type (tri, bi, uni)
+     * @param ngram The n-gram value
+     * @param values The values to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_ngram_batch(string index_path, string ngram_type, string ngram, Invercargill.DataStructures.Vector<string> values) throws StorageError {
+        string key = ngram_key(index_path, ngram_type, ngram);
+        var existing = load_string_set(key);
+        
+        // Build HashSet of items to remove for O(1) lookup
+        var to_remove_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var val in values) to_remove_hash.add(val);
+        
+        // Check if any items match
+        bool changed = false;
+        foreach (var ex in existing) {
+            if (to_remove_hash.has(ex)) {
+                changed = true;
+                break;
+            }
+        }
+        
+        if (changed) {
+            // Rebuild without removed items
+            var new_existing = new Invercargill.DataStructures.Vector<string>();
+            foreach (var ex in existing) {
+                if (!to_remove_hash.has(ex)) {
+                    new_existing.add(ex);
+                }
+            }
+            save_string_set(key, new_existing);
+        }
+    }
+    
+    // === Dictionary-based Batch Operations ===
+    
+    /**
+     * Adds multiple trigrams with their document paths in batch.
+     *
+     * Uses Dictionary for efficient batch processing.
+     *
+     * @param index_path The index entity's path string
+     * @param additions Dictionary mapping trigrams to vectors of document paths
+     * @throws StorageError if the operation fails
+     */
+    public void add_trigrams_batch(string index_path, Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> additions) throws StorageError {
+        foreach (var trigram in additions.keys) {
+            Invercargill.DataStructures.Vector<string> docs;
+            if (additions.try_get(trigram, out docs)) {
+                add_ngram_batch(index_path, "tri", trigram, docs);
+            }
+        }
+    }
+    
+    /**
+     * Removes multiple trigrams with their document paths in batch.
+     *
+     * Uses Dictionary for efficient batch processing.
+     *
+     * @param index_path The index entity's path string
+     * @param removals Dictionary mapping trigrams to vectors of document paths
+     * @throws StorageError if the operation fails
+     */
+    public void remove_trigrams_batch(string index_path, Invercargill.DataStructures.Dictionary<string, Invercargill.DataStructures.Vector<string>> removals) throws StorageError {
+        foreach (var trigram in removals.keys) {
+            Invercargill.DataStructures.Vector<string> docs;
+            if (removals.try_get(trigram, out docs)) {
+                remove_ngram_batch(index_path, "tri", trigram, docs);
+            }
+        }
+    }
+    
+    // === Document Content Cache Operations ===
+    
+    /**
+     * Stores the indexed content for a document.
+     *
+     * This is used to cache the indexed field value for verification
+     * and reindexing operations.
+     *
+     * @param index_path The index entity's path string
+     * @param doc_path The document path
+     * @param content The indexed content
+     * @throws StorageError if the operation fails
+     */
+    public void store_document_content(string index_path, string doc_path, string content) throws StorageError {
+        string key = PREFIX + index_path + ":doc:" + doc_path;
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<string>(content));
+        _dbm.set(key, writer.to_binary_data());
+    }
+    
+    /**
+     * Gets the indexed content for a document.
+     *
+     * @param index_path The index entity's path string
+     * @param doc_path The document path
+     * @return The indexed content, or null if not found
+     */
+    public string? get_document_content(string index_path, string doc_path) {
+        string key = PREFIX + index_path + ":doc:" + doc_path;
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return null;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return null;
+            }
+            return element.as<string>();
+        } catch (Invercargill.ElementError e) {
+            warning("Failed to read document content: %s", e.message);
+            return null;
+        }
+    }
+    
+    /**
+     * Removes the indexed content for a document.
+     *
+     * @param index_path The index entity's path string
+     * @param doc_path The document path
+     * @throws StorageError if the operation fails
+     */
+    public void remove_document_content(string index_path, string doc_path) throws StorageError {
+        string key = PREFIX + index_path + ":doc:" + doc_path;
+        try {
+            _dbm.delete(key);
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+    
+    // === Clear All Index Data ===
+    
+    /**
+     * Clears all n-gram index data for an index entity.
+     *
+     * Note: This method iterates through all keys with the index
+     * prefix to remove all n-gram entries.
+     *
+     * @param index_path The index entity's path string
+     * @throws StorageError if the operation fails
+     */
+    public void clear(string index_path) throws StorageError {
+        string prefix = PREFIX + index_path + ":";
+        
+        // Iterate through all keys and delete those with the prefix
+        foreach (var key in _dbm.keys) {
+            if (key.has_prefix(prefix)) {
+                try {
+                    _dbm.delete(key);
+                } catch (StorageError e) {
+                    // Continue even if delete fails
+                }
+            }
+        }
+    }
+    
+    // === Key Format Helpers ===
+    
+    /**
+     * Creates an n-gram index key.
+     */
+    private string ngram_key(string index_path, string ngram_type, string ngram) {
+        return PREFIX + index_path + ":" + ngram_type + ":" + ngram;
+    }
+    
+    // === Serialization Helpers ===
+    
+    /**
+     * Loads a set of strings from a key.
+     *
+     * Uses HashSet for O(1) deduplication during deserialization.
+     *
+     * @param key The storage key
+     * @return A vector of strings (empty if key doesn't exist)
+     */
+    private Invercargill.DataStructures.Vector<string> load_string_set(string key) {
+        var result = new Invercargill.DataStructures.Vector<string>();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return result;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return result;
+            }
+            
+            // The set is stored as an array of strings
+            var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+            
+            // Use HashSet for O(1) deduplication while preserving order
+            var hash_set = new Invercargill.DataStructures.HashSet<string>();
+            foreach (var item in array) {
+                if (!item.is_null()) {
+                    string value = item.as<string>();
+                    if (!hash_set.has(value)) {
+                        hash_set.add(value);
+                        result.add(value);
+                    }
+                }
+            }
+        } catch (Invercargill.ElementError e) {
+            warning("Failed to read string set at %s: %s", key, e.message);
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Saves a set of strings to a key.
+     *
+     * @param key The storage key
+     * @param set The set of strings to save
+     * @throws StorageError if the operation fails
+     */
+    private void save_string_set(string key, Invercargill.DataStructures.Vector<string> set) throws StorageError {
+        uint count = set.count();
+        if (count == 0) {
+            try {
+                _dbm.delete(key);
+            } catch (StorageError e) {
+                // Key doesn't exist, that's fine
+            }
+            return;
+        }
+        
+        // Store as array of strings
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        foreach (var value in set) {
+            var element = new Invercargill.NativeElement<string>(value);
+            array.add(element);
+        }
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        _dbm.set(key, writer.to_binary_data());
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 289 - 0
src/Storage/LowLevel/TypeIndexStorage.vala

@@ -0,0 +1,289 @@
+/**
+ * TypeIndexStorage - Low-level storage for type index
+ *
+ * Handles the 'typeidx:' prefix for storing type label to document paths mapping.
+ *
+ * Key format: typeidx:<type_label>
+ * Value: Serialized array of document paths
+ *
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage.LowLevel {
+
+/**
+ * Low-level storage for the global type index.
+ *
+ * This class provides type-safe operations for storing and retrieving
+ * type label to document path mappings using the 'typeidx:' key prefix.
+ */
+public class TypeIndexStorage : Object {
+    
+    /**
+     * Key prefix for type index entries.
+     */
+    private const string PREFIX = "typeidx:";
+    
+    /**
+     * The underlying Dbm storage.
+     */
+    private Dbm _dbm;
+    
+    /**
+     * Creates a new TypeIndexStorage with the given Dbm backend.
+     *
+     * @param dbm The Dbm backend to use for storage
+     */
+    public TypeIndexStorage(Dbm dbm) {
+        _dbm = dbm;
+    }
+    
+    /**
+     * Adds a document path to the type index.
+     *
+     * Uses HashSet for O(1) membership checking.
+     *
+     * @param type_label The type label to index under
+     * @param doc_path The document path to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_document(string type_label, string doc_path) throws StorageError {
+        string key = PREFIX + type_label;
+        stderr.printf("TypeIndexStorage.add_document: type_label=%s, doc_path=%s, key=%s\n", type_label, doc_path, key);
+        var paths = load_string_set(key);
+        stderr.printf("  - loaded %u existing paths\n", paths.count());
+        
+        // Use HashSet for O(1) membership check
+        var paths_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var p in paths) paths_hash.add(p);
+        
+        if (!paths_hash.has(doc_path)) {
+            paths.add(doc_path);
+            save_string_set(key, paths);
+            stderr.printf("  - saved %u paths total\n", paths.count());
+        } else {
+            stderr.printf("  - path already exists, not adding\n");
+        }
+    }
+    
+    /**
+     * Removes a document path from the type index.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param type_label The type label the document is indexed under
+     * @param doc_path The document path to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_document(string type_label, string doc_path) throws StorageError {
+        string key = PREFIX + type_label;
+        var paths = load_string_set(key);
+        
+        // Use HashSet for O(1) membership check
+        var paths_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var p in paths) paths_hash.add(p);
+        
+        if (paths_hash.has(doc_path)) {
+            // Rebuild without the removed item
+            var new_paths = new Invercargill.DataStructures.Vector<string>();
+            foreach (var p in paths) {
+                if (p != doc_path) {
+                    new_paths.add(p);
+                }
+            }
+            save_string_set(key, new_paths);
+        }
+    }
+    
+    /**
+     * Adds multiple document paths to the type index.
+     *
+     * Uses HashSet for O(1) membership checking and change tracking.
+     *
+     * @param type_label The type label to index under
+     * @param doc_paths The document paths to add
+     * @throws StorageError if the operation fails
+     */
+    public void add_documents(string type_label, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        string key = PREFIX + type_label;
+        var existing = load_string_set(key);
+        bool changed = false;
+        
+        // Use HashSet for O(1) membership checks
+        var existing_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var ex in existing) existing_hash.add(ex);
+        
+        foreach (var doc_path in doc_paths) {
+            if (!existing_hash.has(doc_path)) {
+                existing_hash.add(doc_path);
+                existing.add(doc_path);
+                changed = true;
+            }
+        }
+        
+        if (changed) {
+            save_string_set(key, existing);
+        }
+    }
+    
+    /**
+     * Removes multiple document paths from the type index.
+     *
+     * Uses HashSet for O(1) membership checking and efficient rebuild.
+     *
+     * @param type_label The type label the documents are indexed under
+     * @param doc_paths The document paths to remove
+     * @throws StorageError if the operation fails
+     */
+    public void remove_documents(string type_label, Invercargill.DataStructures.Vector<string> doc_paths) throws StorageError {
+        string key = PREFIX + type_label;
+        var existing = load_string_set(key);
+        
+        // Build HashSet of items to remove for O(1) lookup
+        var to_remove_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var doc_path in doc_paths) to_remove_hash.add(doc_path);
+        
+        // Check if any items match
+        bool changed = false;
+        foreach (var ex in existing) {
+            if (to_remove_hash.has(ex)) {
+                changed = true;
+                break;
+            }
+        }
+        
+        if (changed) {
+            // Rebuild without removed items
+            var new_existing = new Invercargill.DataStructures.Vector<string>();
+            foreach (var ex in existing) {
+                if (!to_remove_hash.has(ex)) {
+                    new_existing.add(ex);
+                }
+            }
+            save_string_set(key, new_existing);
+        }
+    }
+    
+    /**
+     * Gets all document paths for a given type label.
+     *
+     * @param type_label The type label to look up
+     * @return An enumerable of document paths (may be empty)
+     */
+    public Invercargill.Enumerable<string> get_documents(string type_label) {
+        string key = PREFIX + type_label;
+        var paths = load_string_set(key);
+        stderr.printf("TypeIndexStorage.get_documents: key=%s, paths.count=%u\n", key, paths.count());
+        return paths.as_enumerable();
+    }
+    
+    /**
+     * Checks if a document is indexed under a type label.
+     *
+     * Uses HashSet for O(1) membership check.
+     *
+     * @param type_label The type label to check
+     * @param doc_path The document path to check
+     * @return true if the document is indexed under this type
+     */
+    public bool has_document(string type_label, string doc_path) {
+        string key = PREFIX + type_label;
+        var paths = load_string_set(key);
+        
+        var paths_hash = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var p in paths) paths_hash.add(p);
+        
+        return paths_hash.has(doc_path);
+    }
+    
+    /**
+     * Clears all documents for a type label.
+     *
+     * @param type_label The type label to clear
+     * @throws StorageError if the operation fails
+     */
+    public void clear(string type_label) throws StorageError {
+        string key = PREFIX + type_label;
+        try {
+            _dbm.delete(key);
+        } catch (StorageError e) {
+            // Key doesn't exist, that's fine
+        }
+    }
+    
+    /**
+     * Loads a set of strings from a key.
+     *
+     * Uses HashSet for O(1) deduplication during deserialization.
+     *
+     * @param key The storage key
+     * @return A vector of strings (empty if key doesn't exist)
+     */
+    private Invercargill.DataStructures.Vector<string> load_string_set(string key) {
+        var result = new Invercargill.DataStructures.Vector<string>();
+        var data = _dbm.get(key);
+        
+        if (data == null) {
+            return result;
+        }
+        
+        var reader = new ElementReader((!) data);
+        try {
+            var element = reader.read_element();
+            if (element.is_null()) {
+                return result;
+            }
+            
+            // The set is stored as an array of strings
+            var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+            
+            // Use HashSet for O(1) deduplication while preserving order
+            var hash_set = new Invercargill.DataStructures.HashSet<string>();
+            foreach (var item in array) {
+                if (!item.is_null()) {
+                    string value = item.as<string>();
+                    if (!hash_set.has(value)) {
+                        hash_set.add(value);
+                        result.add(value);
+                    }
+                }
+            }
+        } catch (Invercargill.ElementError e) {
+            warning("Failed to read string set at %s: %s", key, e.message);
+        }
+        
+        return result;
+    }
+    
+    /**
+     * Saves a set of strings to a key.
+     *
+     * @param key The storage key
+     * @param set The set of strings to save
+     * @throws StorageError if the operation fails
+     */
+    private void save_string_set(string key, Invercargill.DataStructures.Vector<string> set) throws StorageError {
+        uint count = set.count();
+        if (count == 0) {
+            try {
+                _dbm.delete(key);
+            } catch (StorageError e) {
+                // Key doesn't exist, that's fine
+            }
+            return;
+        }
+        
+        // Store as array of strings
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        foreach (var value in set) {
+            var element = new Invercargill.NativeElement<string>(value);
+            array.add(element);
+        }
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        _dbm.set(key, writer.to_binary_data());
+    }
+}
+
+} // namespace Implexus.Storage.LowLevel

+ 540 - 0
src/Storage/Storage.vala

@@ -0,0 +1,540 @@
+/**
+ * High-level storage interface and implementation for Implexus.
+ *
+ * Provides entity persistence operations built on top of the Dbm layer.
+ */
+
+namespace Implexus.Storage {
+
+    /**
+     * Configuration for a Category/Index entity.
+     */
+    public class CategoryConfig : Object {
+        /**
+         * The type label for entities in this category.
+         */
+        public string type_label { get; set; }
+
+        /**
+         * The expression used to generate the index.
+         */
+        public string expression { get; set; }
+
+        /**
+         * Creates a new CategoryConfig.
+         */
+        public CategoryConfig(string type_label, string expression) {
+            this.type_label = type_label;
+            this.expression = expression;
+        }
+    }
+
+    /**
+     * Configuration for a Catalogue entity.
+     */
+    public class CatalogueConfig : Object {
+        /**
+         * The type label for documents to catalogue.
+         */
+        public string type_label { get; set; }
+
+        /**
+         * The expression used to extract the grouping key.
+         */
+        public string expression { get; set; }
+
+        /**
+         * Creates a new CatalogueConfig.
+         */
+        public CatalogueConfig(string type_label, string expression) {
+            this.type_label = type_label;
+            this.expression = expression;
+        }
+    }
+
+    /**
+     * Interface for high-level entity storage operations.
+     */
+    public interface Storage : Object {
+        /**
+         * Stores entity metadata.
+         *
+         * @param path The entity path
+         * @param type The entity type
+         * @param type_label Optional type label for Categories
+         */
+        public abstract void store_entity_metadata(Core.EntityPath path, Core.EntityType type, string? type_label = null) throws StorageError;
+
+        /**
+         * Gets the entity type for a path.
+         *
+         * @param path The entity path
+         * @return The entity type, or null if not found
+         */
+        public abstract Core.EntityType? get_entity_type(Core.EntityPath path) throws StorageError;
+
+        /**
+         * Gets the entity type label for a path.
+         *
+         * @param path The entity path
+         * @return The type label, or null if not found
+         */
+        public abstract string? get_entity_type_label(Core.EntityPath path) throws StorageError;
+
+        /**
+         * Checks if an entity exists at the given path.
+         *
+         * @param path The entity path
+         * @return True if the entity exists
+         */
+        public abstract bool entity_exists(Core.EntityPath path);
+
+        /**
+         * Deletes an entity and all its data.
+         *
+         * @param path The entity path
+         */
+        public abstract void delete_entity(Core.EntityPath path) throws StorageError;
+
+        /**
+         * Stores document properties for an entity.
+         *
+         * @param path The entity path
+         * @param properties The properties to store
+         */
+        public abstract void store_properties(Core.EntityPath path, Invercargill.Properties properties) throws StorageError;
+
+        /**
+         * Loads document properties for an entity.
+         *
+         * @param path The entity path
+         * @return The properties, or null if not found
+         */
+        public abstract Invercargill.Properties? load_properties(Core.EntityPath path) throws StorageError;
+
+        /**
+         * Adds a child name to a category.
+         *
+         * @param parent The parent category path
+         * @param child_name The name of the child
+         */
+        public abstract void add_child(Core.EntityPath parent, string child_name) throws StorageError;
+
+        /**
+         * Removes a child name from a category.
+         *
+         * @param parent The parent category path
+         * @param child_name The name of the child
+         */
+        public abstract void remove_child(Core.EntityPath parent, string child_name) throws StorageError;
+
+        /**
+         * Gets the children of a category.
+         *
+         * @param parent The parent category path
+         * @return Enumerable of child names
+         */
+        public abstract Invercargill.Enumerable<string> get_children(Core.EntityPath parent) throws StorageError;
+
+        /**
+         * Checks if a category has a specific child.
+         *
+         * @param parent The parent category path
+         * @param child_name The name of the child
+         * @return True if the child exists
+         */
+        public abstract bool has_child(Core.EntityPath parent, string child_name) throws StorageError;
+
+        /**
+         * Stores category configuration.
+         *
+         * @param path The category path
+         * @param type_label The type label for entities
+         * @param expression The index expression
+         */
+        public abstract void store_category_config(Core.EntityPath path, string type_label, string expression) throws StorageError;
+
+        /**
+         * Gets category configuration.
+         *
+         * @param path The category path
+         * @return The configuration, or null if not found
+         */
+        public abstract CategoryConfig? get_category_config(Core.EntityPath path) throws StorageError;
+
+        /**
+         * Stores catalogue configuration.
+         *
+         * @param path The catalogue path
+         * @param type_label The type label for documents to catalogue
+         * @param expression The expression to extract the grouping key
+         */
+        public abstract void store_catalogue_config(Core.EntityPath path, string type_label, string expression) throws StorageError;
+
+        /**
+         * Gets catalogue configuration.
+         *
+         * @param path The catalogue path
+         * @return The configuration, or null if not found
+         */
+        public abstract CatalogueConfig? get_catalogue_config(Core.EntityPath path) throws StorageError;
+    }
+
+    /**
+     * Basic implementation of Storage using Dbm.
+     */
+    public class BasicStorage : Object, Storage {
+        
+        /**
+         * The underlying Dbm backend.
+         *
+         * This is exposed to allow IndexManager to share the same storage.
+         */
+        public Dbm dbm { get { return _dbm; } }
+        
+        private Dbm _dbm;
+
+        // Key prefixes for different data types
+        private const string ENTITY_PREFIX = "entity:";
+        private const string PROPS_PREFIX = "props:";
+        private const string CHILDREN_PREFIX = "children:";
+        private const string CONFIG_PREFIX = "config:";
+
+        /**
+         * Creates a new BasicStorage with the given Dbm backend.
+         *
+         * @param dbm The Dbm backend to use
+         */
+        public BasicStorage(Dbm dbm) {
+            _dbm = dbm;
+        }
+
+        /**
+         * Creates a new BasicStorage with a file-based Dbm.
+         *
+         * @param data_dir Directory to store data in
+         */
+        public BasicStorage.with_directory(string data_dir) {
+            _dbm = new FilesystemDbm(data_dir);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public void store_entity_metadata(Core.EntityPath path, Core.EntityType type, string? type_label = null) throws StorageError {
+            string key = ENTITY_PREFIX + path.to_string();
+
+            var writer = new ElementWriter();
+            // Use write_element to include type code, so read_element can read it back
+            writer.write_element(new Invercargill.NativeElement<int64?>((int64) type));
+            writer.write_element(new Invercargill.NativeElement<string>(type_label ?? ""));
+
+            _dbm.set(key, writer.to_binary_data());
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public Core.EntityType? get_entity_type(Core.EntityPath path) throws StorageError {
+            string key = ENTITY_PREFIX + path.to_string();
+            var data = _dbm.get(key);
+
+            if (data == null) {
+                return null;
+            }
+
+            var reader = new ElementReader((!) data);
+            try {
+                var element = reader.read_element();
+                if (element.is_null()) {
+                    return null;
+                }
+                int64? type_val = element.as<int64?>();
+                return (Core.EntityType) (type_val == null ? 0 : (!) type_val);
+            } catch (Invercargill.ElementError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read entity type: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public string? get_entity_type_label(Core.EntityPath path) throws StorageError {
+            string key = ENTITY_PREFIX + path.to_string();
+            var data = _dbm.get(key);
+
+            if (data == null) {
+                return null;
+            }
+
+            var reader = new ElementReader((!) data);
+            try {
+                reader.read_element(); // Skip type
+                var label_element = reader.read_element();
+                if (label_element.is_null()) {
+                    return null;
+                }
+                string label = label_element.as<string>();
+                return label == "" ? null : label;
+            } catch (Invercargill.ElementError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read entity type label: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public bool entity_exists(Core.EntityPath path) {
+            string key = ENTITY_PREFIX + path.to_string();
+            return _dbm.has_key(key);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public void delete_entity(Core.EntityPath path) throws StorageError {
+            string path_str = path.to_string();
+
+            // Delete entity metadata
+            _dbm.delete(ENTITY_PREFIX + path_str);
+
+            // Delete properties
+            _dbm.delete(PROPS_PREFIX + path_str);
+
+            // Delete children (for categories)
+            _dbm.delete(CHILDREN_PREFIX + path_str);
+
+            // Delete category config (for categories)
+            _dbm.delete(CONFIG_PREFIX + path_str);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public void store_properties(Core.EntityPath path, Invercargill.Properties properties) throws StorageError {
+            string key = PROPS_PREFIX + path.to_string();
+
+            var writer = new ElementWriter();
+            // Use write_element to include type code, so read_element can read it back
+            writer.write_element(new Invercargill.NativeElement<Invercargill.Properties>(properties));
+
+            _dbm.set(key, writer.to_binary_data());
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public Invercargill.Properties? load_properties(Core.EntityPath path) throws StorageError {
+            string key = PROPS_PREFIX + path.to_string();
+            var data = _dbm.get(key);
+
+            if (data == null) {
+                return null;
+            }
+
+            var reader = new ElementReader((!) data);
+            try {
+                var element = reader.read_element();
+                if (element.is_null()) {
+                    return null;
+                }
+                return element.as<Invercargill.Properties>();
+            } catch (Invercargill.ElementError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read properties: %s".printf(e.message));
+            }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public void add_child(Core.EntityPath parent, string child_name) throws StorageError {
+            var children = load_children_set(parent);
+            try {
+                children.set(child_name, true);
+            } catch (Invercargill.IndexError e) {
+                throw new StorageError.IO_ERROR("Failed to add child: %s".printf(e.message));
+            }
+            save_children_set(parent, children);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public void remove_child(Core.EntityPath parent, string child_name) throws StorageError {
+            var children = load_children_set(parent);
+            try {
+                children.remove(child_name);
+            } catch (Invercargill.IndexError e) {
+                // Child not in set, that's fine
+            }
+            save_children_set(parent, children);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public Invercargill.Enumerable<string> get_children(Core.EntityPath parent) throws StorageError {
+            var children = load_children_set(parent);
+            return children.keys;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public bool has_child(Core.EntityPath parent, string child_name) throws StorageError {
+            var children = load_children_set(parent);
+            return children.has(child_name);
+        }
+
+        /**
+         * Loads the children set for a category.
+         */
+        private Invercargill.DataStructures.Dictionary<string, bool> load_children_set(Core.EntityPath parent) throws StorageError {
+            string key = CHILDREN_PREFIX + parent.to_string();
+            var data = _dbm.get(key);
+
+            var result = new Invercargill.DataStructures.Dictionary<string, bool>();
+
+            if (data == null) {
+                return result;
+            }
+
+            var reader = new ElementReader((!) data);
+            try {
+                var element = reader.read_element();
+                if (element.is_null()) {
+                    return result;
+                }
+                // The children set is stored as an array of strings
+                var array = element.as<Invercargill.Enumerable<Invercargill.Element>>();
+                foreach (var child_element in array) {
+                    if (!child_element.is_null()) {
+                        string child_name = child_element.as<string>();
+                        try {
+                            result.set(child_name, true);
+                        } catch (Invercargill.IndexError e) {
+                            // Skip
+                        }
+                    }
+                }
+            } catch (Invercargill.ElementError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read children: %s".printf(e.message));
+            }
+
+            return result;
+        }
+
+        /**
+         * Saves the children set for a category.
+         */
+        private void save_children_set(Core.EntityPath parent, Invercargill.DataStructures.Dictionary<string, bool> children) throws StorageError {
+            string key = CHILDREN_PREFIX + parent.to_string();
+
+            uint count = children.count();
+            if (count == 0) {
+                _dbm.delete(key);
+                return;
+            }
+
+            // Store as array of strings
+            var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+            foreach (var child_name in children.keys) {
+                var element = new Invercargill.NativeElement<string>(child_name);
+                array.add(element);
+            }
+
+            var writer = new ElementWriter();
+            // Use write_element to include type code, so read_element can read it back
+            writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+            _dbm.set(key, writer.to_binary_data());
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public void store_category_config(Core.EntityPath path, string type_label, string expression) throws StorageError {
+            string key = CONFIG_PREFIX + path.to_string();
+
+            var writer = new ElementWriter();
+            // Use write_element to include type code, so read_element can read it back
+            writer.write_element(new Invercargill.NativeElement<string>(type_label));
+            writer.write_element(new Invercargill.NativeElement<string>(expression));
+
+            _dbm.set(key, writer.to_binary_data());
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public CategoryConfig? get_category_config(Core.EntityPath path) throws StorageError {
+            string key = CONFIG_PREFIX + path.to_string();
+            var data = _dbm.get(key);
+
+            if (data == null) {
+                return null;
+            }
+
+            var reader = new ElementReader((!) data);
+            try {
+                var label_element = reader.read_element();
+                var expr_element = reader.read_element();
+
+                if (label_element.is_null() || expr_element.is_null()) {
+                    return null;
+                }
+
+                string type_label = label_element.as<string>();
+                string expression = expr_element.as<string>();
+
+                return new CategoryConfig(type_label, expression);
+            } catch (Invercargill.ElementError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read category config: %s".printf(e.message));
+            }
+        }
+
+        // Key prefix for catalogue configuration
+        private const string CAT_CONFIG_PREFIX = "catcfg:";
+
+        /**
+         * {@inheritDoc}
+         */
+        public void store_catalogue_config(Core.EntityPath path, string type_label, string expression) throws StorageError {
+            string key = CAT_CONFIG_PREFIX + path.to_string();
+
+            var writer = new ElementWriter();
+            writer.write_element(new Invercargill.NativeElement<string>(type_label));
+            writer.write_element(new Invercargill.NativeElement<string>(expression));
+
+            _dbm.set(key, writer.to_binary_data());
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public CatalogueConfig? get_catalogue_config(Core.EntityPath path) throws StorageError {
+            string key = CAT_CONFIG_PREFIX + path.to_string();
+            var data = _dbm.get(key);
+
+            if (data == null) {
+                return null;
+            }
+
+            var reader = new ElementReader((!) data);
+            try {
+                var label_element = reader.read_element();
+                var expr_element = reader.read_element();
+
+                if (label_element.is_null() || expr_element.is_null()) {
+                    return null;
+                }
+
+                string type_label = label_element.as<string>();
+                string expression = expr_element.as<string>();
+
+                return new CatalogueConfig(type_label, expression);
+            } catch (Invercargill.ElementError e) {
+                throw new StorageError.CORRUPT_DATA("Failed to read catalogue config: %s".printf(e.message));
+            }
+        }
+    }
+}

+ 65 - 0
src/Storage/StorageError.vala

@@ -0,0 +1,65 @@
+/**
+ * StorageError - Error domain for storage operations
+ * 
+ * Defines error codes for storage layer operations including
+ * Dbm operations, serialization, and entity persistence.
+ * 
+ * @version 0.1
+ * @since 0.1
+ */
+namespace Implexus.Storage {
+
+/**
+ * Error domain for storage operations.
+ * 
+ * These errors can occur during:
+ * - Database open/close operations
+ * - Read/write operations
+ * - Transaction management
+ * - Data serialization/deserialization
+ */
+public errordomain StorageError {
+    /** I/O error during file operations */
+    IO_ERROR,
+    
+    /** Data corruption detected */
+    CORRUPT_DATA,
+    
+    /** Requested key was not found */
+    KEY_NOT_FOUND,
+    
+    /** Database is already open */
+    ALREADY_OPEN,
+    
+    /** Database is not open */
+    NOT_OPEN,
+    
+    /** Transaction is already active */
+    TRANSACTION_ACTIVE,
+    
+    /** No transaction is active */
+    NO_TRANSACTION,
+    
+    /** Transaction commit failed */
+    COMMIT_FAILED,
+    
+    /** Transaction rollback failed */
+    ROLLBACK_FAILED,
+    
+    /** Invalid key format */
+    INVALID_KEY,
+    
+    /** Invalid data format */
+    INVALID_DATA,
+    
+    /** Storage quota exceeded */
+    QUOTA_EXCEEDED,
+    
+    /** Permission denied */
+    PERMISSION_DENIED,
+    
+    /** Unknown storage error */
+    UNKNOWN
+}
+
+} // namespace Implexus.Storage

+ 147 - 0
src/meson.build

@@ -0,0 +1,147 @@
+# Core sources
+core_sources = files(
+    'Core/EntityType.vala',
+    'Core/EntityError.vala',
+    'Core/EntityPath.vala',
+    'Core/Entity.vala',
+    'Core/Engine.vala',
+    'Core/EntitySet.vala',
+    'Core/SafePath.vala'
+)
+
+# Storage sources
+storage_sources = files(
+    'Storage/StorageError.vala',
+    'Storage/Dbm.vala',
+    'Storage/DbmOperation.vala',
+    'Storage/AsyncDbmQueue.vala',
+    'Storage/FilesystemDbm.vala',
+    'Storage/ElementSerializer.vala',
+    # Low-level prefix stores
+    'Storage/LowLevel/EntityMetadataStorage.vala',
+    'Storage/LowLevel/PropertiesStorage.vala',
+    'Storage/LowLevel/ChildrenStorage.vala',
+    'Storage/LowLevel/CategoryConfigStorage.vala',
+    'Storage/LowLevel/CatalogueConfigStorage.vala',
+    'Storage/LowLevel/TypeIndexStorage.vala',
+    'Storage/LowLevel/CategoryIndexStorage.vala',
+    'Storage/LowLevel/CatalogueIndexStorage.vala',
+    'Storage/LowLevel/TextIndexStorage.vala',
+    # High-level entity stores
+    'Storage/HighLevel/EntityStore.vala',
+    'Storage/HighLevel/DocumentStore.vala',
+    'Storage/HighLevel/ContainerStore.vala',
+    'Storage/HighLevel/CategoryStore.vala',
+    'Storage/HighLevel/CatalogueStore.vala',
+    'Storage/HighLevel/IndexStore.vala',
+    # Main storage implementation
+    'Storage/Storage.vala'
+)
+
+# GDBM-based storage
+gdbm_vala_sources = files('Storage/Gdbm/GdbmDbm.vala')
+gdbm_c_sources = files('Storage/Gdbm/gdbm_wrapper.c')
+storage_sources = storage_sources + gdbm_vala_sources + gdbm_c_sources
+
+# LMDB-based storage
+lmdb_sources = files(
+    'Storage/Lmdb/LmdbDbm.vala'
+)
+storage_sources = storage_sources + lmdb_sources
+
+# Entity sources
+entity_sources = files(
+    'Entities/AbstractEntity.vala',
+    'Entities/Container.vala',
+    'Entities/Document.vala',
+    'Entities/Category.vala',
+    'Entities/Catalogue.vala',
+    'Entities/Index.vala'
+)
+
+# Engine sources
+engine_sources = files(
+    'Engine/EngineConfiguration.vala',
+    'Engine/EngineFactory.vala',
+    'Engine/ConnectionString.vala',
+    'Engine/EmbeddedTransaction.vala',
+    'Engine/EmbeddedEngine.vala',
+    'Engine/RemoteEngine.vala',
+    'Engine/HookManager.vala'
+)
+
+# Protocol sources
+protocol_sources = files(
+    'Protocol/ProtocolError.vala',
+    'Protocol/Message.vala',
+    'Protocol/MessageReader.vala',
+    'Protocol/MessageWriter.vala'
+)
+
+# Server sources
+server_sources = files(
+    'Server/Server.vala',
+    'Server/ClientHandler.vala'
+)
+
+# Migration sources
+migration_sources = files(
+    'Migrations/Migration.vala',
+    'Migrations/MigrationRunner.vala',
+    'Migrations/MigrationStorage.vala',
+    'Migrations/MigrationError.vala',
+    'Migrations/BootstrapMigration.vala'
+)
+
+# Main library file
+lib_sources = files(
+    'Implexus.vala'
+)
+
+# Combine all sources
+all_sources = lib_sources + core_sources + storage_sources + entity_sources + engine_sources + protocol_sources + server_sources + migration_sources
+
+# Vala-only sources for GIR generation (exclude C files)
+vala_sources = lib_sources + core_sources + gdbm_vala_sources + lmdb_sources + entity_sources + engine_sources + protocol_sources + server_sources + migration_sources
+
+# Build library dependencies
+lib_deps = [glib_dep, gobject_dep, gio_dep, invercargill_dep, gdbm_dep, lmdb_dep]
+lib_vala_args = ['--pkg', 'gdbm', '--pkg', 'lmdb']
+
+implexus_lib = library('implexus-0.1',
+    all_sources,
+    dependencies: lib_deps,
+    vala_args: lib_vala_args,
+    version: '0.1.0',
+    install: true,
+    install_dir: [true, true, true]
+)
+
+gnome = import('gnome')
+
+# Get the generated header file for GIR scanning
+implexus_header = meson.current_build_dir() / 'implexus-0.1.h'
+
+implexus_gir = gnome.generate_gir(implexus_lib,
+    sources: vala_sources + [implexus_header],
+    namespace: 'Implexus',
+    nsversion: '0.1',
+    identifier_prefix: 'Implexus',
+    symbol_prefix: 'implexus',
+    includes: ['GObject-2.0', 'Gio-2.0'],
+    install: true
+)
+
+pkgconfig = import('pkgconfig')
+pkgconfig.generate(implexus_lib,
+    name: 'implexus-0.1',
+    description: 'Path-based document database library for Vala',
+    version: meson.project_version(),
+    requires: ['glib-2.0', 'gobject-2.0', 'invercargill-1']
+)
+
+implexus_dep = declare_dependency(
+    link_with: implexus_lib,
+    dependencies: [glib_dep, gobject_dep, gio_dep, invercargill_dep],
+    include_directories: include_directories('.')
+)

+ 450 - 0
tests/Core/EntityPathTest.vala

@@ -0,0 +1,450 @@
+/**
+ * EntityPathTest - Unit tests for EntityPath
+ */
+using Implexus.Core;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // Test 1: Root path parsing
+    if (test_root_path()) {
+        passed++;
+        stdout.puts("PASS: test_root_path\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_root_path\n");
+    }
+
+    // Test 2: Simple path parsing
+    if (test_simple_path()) {
+        passed++;
+        stdout.puts("PASS: test_simple_path\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_simple_path\n");
+    }
+
+    // Test 3: Nested path parsing
+    if (test_nested_path()) {
+        passed++;
+        stdout.puts("PASS: test_nested_path\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_nested_path\n");
+    }
+
+    // Test 4: Path construction
+    if (test_path_construction()) {
+        passed++;
+        stdout.puts("PASS: test_path_construction\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_path_construction\n");
+    }
+
+    // Test 5: Parent relationship
+    if (test_parent_relationship()) {
+        passed++;
+        stdout.puts("PASS: test_parent_relationship\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_parent_relationship\n");
+    }
+
+    // Test 6: Child relationship
+    if (test_child_relationship()) {
+        passed++;
+        stdout.puts("PASS: test_child_relationship\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_child_relationship\n");
+    }
+
+    // Test 7: Ancestor/descendant
+    if (test_ancestor_descendant()) {
+        passed++;
+        stdout.puts("PASS: test_ancestor_descendant\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_ancestor_descendant\n");
+    }
+
+    // Test 8: Path equality
+    if (test_path_equality()) {
+        passed++;
+        stdout.puts("PASS: test_path_equality\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_path_equality\n");
+    }
+
+    // Test 9: Path to string
+    if (test_path_to_string()) {
+        passed++;
+        stdout.puts("PASS: test_path_to_string\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_path_to_string\n");
+    }
+
+    // Test 10: Path key serialization
+    if (test_path_key_serialization()) {
+        passed++;
+        stdout.puts("PASS: test_path_key_serialization\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_path_key_serialization\n");
+    }
+
+    // Test 11: Resolve relative path
+    if (test_resolve_relative()) {
+        passed++;
+        stdout.puts("PASS: test_resolve_relative\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_resolve_relative\n");
+    }
+
+    // Test 12: Edge cases
+    if (test_edge_cases()) {
+        passed++;
+        stdout.puts("PASS: test_edge_cases\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_edge_cases\n");
+    }
+
+    // Test 13: Path depth
+    if (test_path_depth()) {
+        passed++;
+        stdout.puts("PASS: test_path_depth\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_path_depth\n");
+    }
+
+    // Test 14: Path name
+    if (test_path_name()) {
+        passed++;
+        stdout.puts("PASS: test_path_name\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_path_name\n");
+    }
+
+    // Test 15: Relative path
+    if (test_relative_to()) {
+        passed++;
+        stdout.puts("PASS: test_relative_to\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_relative_to\n");
+    }
+
+    // Test 16: Slash rejection in entity names
+    if (test_slash_rejection()) {
+        passed++;
+        stdout.puts("PASS: test_slash_rejection\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_slash_rejection\n");
+    }
+
+    // Test 17: Key separator is slash
+    if (test_key_separator_is_slash()) {
+        passed++;
+        stdout.puts("PASS: test_key_separator_is_slash\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_key_separator_is_slash\n");
+    }
+
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// Test 1: Root path parsing
+bool test_root_path() {
+    var root1 = new EntityPath.root();
+    var root2 = new EntityPath("/");
+    var root3 = new EntityPath("");
+    
+    if (!root1.is_root) return false;
+    if (!root2.is_root) return false;
+    if (!root3.is_root) return false;
+    if (root1.to_string() != "/") return false;
+    if (root1.depth != 0) return false;
+    if (root1.name != "") return false;
+    
+    return true;
+}
+
+// Test 2: Simple path parsing
+bool test_simple_path() {
+    var path = new EntityPath("/users");
+    
+    if (path.is_root) return false;
+    if (path.depth != 1) return false;
+    if (path.name != "users") return false;
+    if (path.to_string() != "/users") return false;
+    
+    return true;
+}
+
+// Test 3: Nested path parsing
+bool test_nested_path() {
+    var path = new EntityPath("/users/john/profile");
+    
+    if (path.is_root) return false;
+    if (path.depth != 3) return false;
+    if (path.name != "profile") return false;
+    if (path.to_string() != "/users/john/profile") return false;
+    
+    return true;
+}
+
+// Test 4: Path construction
+bool test_path_construction() {
+    var root = new EntityPath.root();
+    var users = root.append_child("users");
+    var john = users.append_child("john");
+    
+    if (!root.is_root) return false;
+    if (users.to_string() != "/users") return false;
+    if (john.to_string() != "/users/john") return false;
+    
+    return true;
+}
+
+// Test 5: Parent relationship
+bool test_parent_relationship() {
+    var path = new EntityPath("/users/john");
+    var parent = path.parent;
+    
+    if (parent.is_root) return false;  // Parent of "/users/john" is "/users", not root
+    if (parent.to_string() != "/users") return false;
+    
+    var root = new EntityPath.root();
+    if (root.parent != root) return false;  // Root's parent is itself
+    
+    return true;
+}
+
+// Test 6: Child relationship
+bool test_child_relationship() {
+    var parent = new EntityPath("/users");
+    var child = parent.append_child("john");
+    
+    if (child.to_string() != "/users/john") return false;
+    if (child.parent.equals(parent) == false) return false;
+    
+    return true;
+}
+
+// Test 7: Ancestor/descendant
+bool test_ancestor_descendant() {
+    var root = new EntityPath.root();
+    var users = new EntityPath("/users");
+    var john = new EntityPath("/users/john");
+    var profile = new EntityPath("/users/john/profile");
+    
+    if (!root.is_ancestor_of(users)) return false;
+    if (!root.is_ancestor_of(john)) return false;
+    if (!users.is_ancestor_of(john)) return false;
+    if (!john.is_ancestor_of(profile)) return false;
+    
+    if (root.is_ancestor_of(root)) return false;  // Not ancestor of itself
+    if (john.is_ancestor_of(users)) return false;  // Wrong direction
+    
+    if (!profile.is_descendant_of(root)) return false;
+    if (!profile.is_descendant_of(users)) return false;
+    if (!john.is_descendant_of(root)) return false;
+    
+    return true;
+}
+
+// Test 8: Path equality
+bool test_path_equality() {
+    var path1 = new EntityPath("/users/john");
+    var path2 = new EntityPath("/users/john");
+    var path3 = new EntityPath("/users/jane");
+    var root1 = new EntityPath.root();
+    var root2 = new EntityPath("/");
+    
+    if (!path1.equals(path2)) return false;
+    if (path1.equals(path3)) return false;
+    if (!root1.equals(root2)) return false;
+    
+    // Hash codes should match for equal paths
+    if (path1.hash_code() != path2.hash_code()) return false;
+    if (root1.hash_code() != root2.hash_code()) return false;
+    
+    return true;
+}
+
+// Test 9: Path to string
+bool test_path_to_string() {
+    var paths = new string[] { "/", "/users", "/users/john", "/a/b/c/d" };
+    
+    foreach (var path_str in paths) {
+        var path = new EntityPath(path_str);
+        if (path.to_string() != path_str) return false;
+    }
+    
+    return true;
+}
+
+// Test 10: Path key serialization
+bool test_path_key_serialization() {
+    var paths = new EntityPath[] {
+        new EntityPath.root(),
+        new EntityPath("/users"),
+        new EntityPath("/users/john/profile")
+    };
+    
+    foreach (var path in paths) {
+        var key = path.to_key();
+        var restored = EntityPath.from_key(key);
+        if (!path.equals(restored)) return false;
+    }
+    
+    return true;
+}
+
+// Test 11: Resolve relative path
+bool test_resolve_relative() {
+    var base_path = new EntityPath("/users/john");
+    
+    // Test simple child
+    var result1 = base_path.resolve(new EntityPath("profile"));
+    if (result1.to_string() != "/users/john/profile") return false;
+    
+    // Test parent navigation
+    var result2 = base_path.resolve(new EntityPath(".."));
+    if (result2.to_string() != "/users") return false;
+    
+    // Test current directory
+    var result3 = base_path.resolve(new EntityPath("."));
+    if (result3.to_string() != "/users/john") return false;
+    
+    // Test combined
+    var result4 = base_path.resolve(new EntityPath("../jane"));
+    if (result4.to_string() != "/users/jane") return false;
+    
+    return true;
+}
+
+// Test 12: Edge cases
+bool test_edge_cases() {
+    // Trailing slash
+    var path1 = new EntityPath("/users/");
+    if (path1.to_string() != "/users") return false;
+    
+    // Multiple slashes (empty segments are skipped)
+    var path2 = new EntityPath("/users//john");
+    // This should normalize to /users/john
+    if (path2.depth != 2) return false;
+    
+    // Root variations
+    var root1 = new EntityPath.root();
+    var root2 = new EntityPath("/");
+    var root3 = new EntityPath("");
+    if (!root1.equals(root2)) return false;
+    if (!root1.equals(root3)) return false;
+    
+    return true;
+}
+
+// Test 13: Path depth
+bool test_path_depth() {
+    if (new EntityPath.root().depth != 0) return false;
+    if (new EntityPath("/a").depth != 1) return false;
+    if (new EntityPath("/a/b").depth != 2) return false;
+    if (new EntityPath("/a/b/c").depth != 3) return false;
+    
+    return true;
+}
+
+// Test 14: Path name
+bool test_path_name() {
+    if (new EntityPath.root().name != "") return false;
+    if (new EntityPath("/users").name != "users") return false;
+    if (new EntityPath("/users/john").name != "john") return false;
+    
+    return true;
+}
+
+// Test 15: Relative path
+bool test_relative_to() {
+    var root = new EntityPath.root();
+    var users = new EntityPath("/users");
+    var john = new EntityPath("/users/john");
+    var profile = new EntityPath("/users/john/profile");
+    
+    try {
+        var rel1 = profile.relative_to(john);
+        if (rel1.to_string() != "/profile") return false;
+        
+        var rel2 = profile.relative_to(root);
+        if (rel2.to_string() != "/users/john/profile") return false;
+        
+        var rel3 = john.relative_to(users);
+        if (rel3.to_string() != "/john") return false;
+    } catch (EngineError e) {
+        return false;
+    }
+    
+    // Should throw for non-ancestor
+    try {
+        john.relative_to(profile);
+        return false;  // Should have thrown
+    } catch (EngineError e) {
+        // Expected
+    }
+    
+    return true;
+}
+
+// Test 16: Slash rejection in entity names
+bool test_slash_rejection() {
+    // The validate_name method rejects "/" in entity names
+    // This is tested via append_child which catches the error internally
+    // For now, we verify that paths with "/" in parsing are split correctly
+    // (i.e., "/a/b" becomes two segments "a" and "b", not one segment "a/b")
+    
+    var path = new EntityPath("/users/john");
+    if (path.depth != 2) return false;
+    if (path.name != "john") return false;
+    
+    // Verify KEY_SEPARATOR constant is "/"
+    if (EntityPath.KEY_SEPARATOR != "/") return false;
+    
+    return true;
+}
+
+// Test 17: Key separator is slash
+bool test_key_separator_is_slash() {
+    // Test that to_key() uses "/" as separator
+    var path = new EntityPath("/users/john/profile");
+    var key = path.to_key();
+    
+    // Key should be "users/john/profile" (no leading slash, "/" between segments)
+    if (key != "users/john/profile") return false;
+    
+    // Test round-trip
+    var restored = EntityPath.from_key(key);
+    if (!path.equals(restored)) return false;
+    
+    // Test root path
+    var root = new EntityPath.root();
+    if (root.to_key() != "") return false;
+    var restored_root = EntityPath.from_key("");
+    if (!restored_root.is_root) return false;
+    
+    // Test single segment
+    var single = new EntityPath("/users");
+    if (single.to_key() != "users") return false;
+    
+    return true;
+}

+ 637 - 0
tests/Core/SafePathTest.vala

@@ -0,0 +1,637 @@
+/**
+ * SafePathTest - Unit tests for SafePath
+ * 
+ * Comprehensive tests for the SafePath factory class which creates
+ * URL-encoded EntityPath instances.
+ */
+using Implexus.Core;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // ========================================
+    // 1. Basic Path Construction Tests
+    // ========================================
+    
+    // Test: Single segment path
+    if (test_basic_single_segment()) {
+        passed++;
+        stdout.puts("PASS: test_basic_single_segment\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_basic_single_segment\n");
+    }
+
+    // Test: Multi-segment path
+    if (test_basic_multi_segment()) {
+        passed++;
+        stdout.puts("PASS: test_basic_multi_segment\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_basic_multi_segment\n");
+    }
+
+    // Test: Empty segments handling
+    if (test_basic_empty_segments()) {
+        passed++;
+        stdout.puts("PASS: test_basic_empty_segments\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_basic_empty_segments\n");
+    }
+
+    // ========================================
+    // 2. URL Encoding Tests
+    // ========================================
+
+    // Test: Spaces in segments
+    if (test_encoding_spaces()) {
+        passed++;
+        stdout.puts("PASS: test_encoding_spaces\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_encoding_spaces\n");
+    }
+
+    // Test: Special characters (/, ?, #, =, &)
+    if (test_encoding_special_chars()) {
+        passed++;
+        stdout.puts("PASS: test_encoding_special_chars\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_encoding_special_chars\n");
+    }
+
+    // Test: Unicode characters (Japanese, emoji)
+    if (test_encoding_unicode()) {
+        passed++;
+        stdout.puts("PASS: test_encoding_unicode\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_encoding_unicode\n");
+    }
+
+    // Test: Reserved characters that should be encoded
+    if (test_encoding_reserved_chars()) {
+        passed++;
+        stdout.puts("PASS: test_encoding_reserved_chars\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_encoding_reserved_chars\n");
+    }
+
+    // ========================================
+    // 3. Variadic API Tests
+    // ========================================
+
+    // Test: Null as first argument (should return root path)
+    if (test_variadic_null_first()) {
+        passed++;
+        stdout.puts("PASS: test_variadic_null_first\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_variadic_null_first\n");
+    }
+
+    // Test: Single segment + null
+    if (test_variadic_single_segment()) {
+        passed++;
+        stdout.puts("PASS: test_variadic_single_segment\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_variadic_single_segment\n");
+    }
+
+    // Test: Multiple segments + null
+    if (test_variadic_multiple_segments()) {
+        passed++;
+        stdout.puts("PASS: test_variadic_multiple_segments\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_variadic_multiple_segments\n");
+    }
+
+    // ========================================
+    // 4. Array API Tests
+    // ========================================
+
+    // Test: Normal array of segments
+    if (test_array_normal()) {
+        passed++;
+        stdout.puts("PASS: test_array_normal\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_array_normal\n");
+    }
+
+    // Test: Empty array (should return root path)
+    if (test_array_empty()) {
+        passed++;
+        stdout.puts("PASS: test_array_empty\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_array_empty\n");
+    }
+
+    // Test: Array with special characters
+    if (test_array_special_chars()) {
+        passed++;
+        stdout.puts("PASS: test_array_special_chars\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_array_special_chars\n");
+    }
+
+    // ========================================
+    // 5. Decode Tests
+    // ========================================
+
+    // Test: Decoding %20 to space
+    if (test_decode_space()) {
+        passed++;
+        stdout.puts("PASS: test_decode_space\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_decode_space\n");
+    }
+
+    // Test: Decoding %2F to /
+    if (test_decode_slash()) {
+        passed++;
+        stdout.puts("PASS: test_decode_slash\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_decode_slash\n");
+    }
+
+    // Test: Decoding unicode percent-encoded strings
+    if (test_decode_unicode()) {
+        passed++;
+        stdout.puts("PASS: test_decode_unicode\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_decode_unicode\n");
+    }
+
+    // Test: Invalid encoding error handling
+    if (test_decode_invalid_encoding()) {
+        passed++;
+        stdout.puts("PASS: test_decode_invalid_encoding\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_decode_invalid_encoding\n");
+    }
+
+    // ========================================
+    // 6. Integration Tests
+    // ========================================
+
+    // Test: Verify returned EntityPath works correctly
+    if (test_integration_entity_path()) {
+        passed++;
+        stdout.puts("PASS: test_integration_entity_path\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_integration_entity_path\n");
+    }
+
+    // Test: to_string() on resulting paths
+    if (test_integration_to_string()) {
+        passed++;
+        stdout.puts("PASS: test_integration_to_string\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_integration_to_string\n");
+    }
+
+    // Test: Path operations (parent, append_child)
+    if (test_integration_path_operations()) {
+        passed++;
+        stdout.puts("PASS: test_integration_path_operations\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_integration_path_operations\n");
+    }
+
+    // Test: Round-trip encode/decode
+    if (test_round_trip()) {
+        passed++;
+        stdout.puts("PASS: test_round_trip\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_round_trip\n");
+    }
+
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// ========================================
+// 1. Basic Path Construction Tests
+// ========================================
+
+// Test: Single segment path
+bool test_basic_single_segment() {
+    var path = SafePath.path("catalogue", null);
+    
+    if (path.is_root) return false;
+    if (path.depth != 1) return false;
+    if (path.name != "catalogue") return false;
+    if (path.to_string() != "/catalogue") return false;
+    
+    return true;
+}
+
+// Test: Multi-segment path
+bool test_basic_multi_segment() {
+    var path = SafePath.path("catalogue", "category", "document", null);
+    
+    if (path.is_root) return false;
+    if (path.depth != 3) return false;
+    if (path.name != "document") return false;
+    if (path.to_string() != "/catalogue/category/document") return false;
+    
+    return true;
+}
+
+// Test: Empty segments handling
+bool test_basic_empty_segments() {
+    // Empty string segment should be preserved as empty encoded segment
+    var path = SafePath.path("a", "", "c", null);
+    
+    // Empty segments should still be counted
+    if (path.depth != 3) return false;
+    
+    return true;
+}
+
+// ========================================
+// 2. URL Encoding Tests
+// ========================================
+
+// Test: Spaces in segments (should be encoded as %20)
+bool test_encoding_spaces() {
+    var path = SafePath.path("users", "john doe", null);
+    string path_str = path.to_string();
+    
+    // Space should be encoded as %20
+    if (!path_str.contains("%20")) return false;
+    if (path_str.contains(" ")) return false;  // No literal space
+    if (path_str != "/users/john%20doe") return false;
+    
+    // Test multiple spaces
+    var path2 = SafePath.path("hello world test", null);
+    if (path2.to_string() != "/hello%20world%20test") return false;
+    
+    return true;
+}
+
+// Test: Special characters (/, ?, #, =, &)
+bool test_encoding_special_chars() {
+    // Forward slash should be encoded
+    var path1 = SafePath.path("a/b", null);
+    if (!path1.to_string().contains("%2F")) return false;
+    
+    // Question mark should be encoded
+    var path2 = SafePath.path("query?param", null);
+    if (!path2.to_string().contains("%3F")) return false;
+    
+    // Hash should be encoded
+    var path3 = SafePath.path("anchor#link", null);
+    if (!path3.to_string().contains("%23")) return false;
+    
+    // Equals should be encoded
+    var path4 = SafePath.path("key=value", null);
+    if (!path4.to_string().contains("%3D")) return false;
+    
+    // Ampersand should be encoded
+    var path5 = SafePath.path("a&b", null);
+    if (!path5.to_string().contains("%26")) return false;
+    
+    return true;
+}
+
+// Test: Unicode characters (Japanese, emoji)
+// Note: GLib.Uri.escape_string preserves unicode characters as-is (IRIs)
+// This is correct behavior for modern URIs
+bool test_encoding_unicode() {
+    // Japanese characters - preserved as-is in the path
+    var path1 = SafePath.path("ユーザー", null);
+    string str1 = path1.to_string();
+    // Should contain the unicode characters
+    if (!str1.has_prefix("/")) return false;
+    if (!str1.contains("ユーザー")) return false;  // Unicode preserved
+    
+    // Emoji - also preserved
+    var path2 = SafePath.path("hello🎉world", null);
+    string str2 = path2.to_string();
+    if (!str2.contains("🎉")) return false;
+    
+    // Chinese characters - also preserved
+    var path3 = SafePath.path("用户", null);
+    string str3 = path3.to_string();
+    if (!str3.contains("用户")) return false;
+    
+    // But special URI characters within unicode strings should still be encoded
+    var path4 = SafePath.path("ユーザー/名前", null);
+    string str4 = path4.to_string();
+    if (!str4.contains("%2F")) return false;  // Slash should be encoded
+    
+    return true;
+}
+
+// Test: Reserved characters that should be encoded
+bool test_encoding_reserved_chars() {
+    // Test various reserved characters
+    string[] reserved_chars = { ":", "/", "?", "#", "[", "]", "@", "!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=" };
+    
+    foreach (var ch in reserved_chars) {
+        var path = SafePath.path("test" + ch + "value", null);
+        string result = path.to_string();
+        // Reserved chars should be encoded (contain %)
+        // The character itself should not appear literally (except for special cases)
+        if (ch != "/" && result.contains(ch) && !result.contains("%")) {
+            return false;
+        }
+    }
+    
+    return true;
+}
+
+// ========================================
+// 3. Variadic API Tests
+// ========================================
+
+// Test: Null as first argument (should return root path)
+bool test_variadic_null_first() {
+    var path = SafePath.path(null);
+    
+    if (!path.is_root) return false;
+    if (path.depth != 0) return false;
+    if (path.to_string() != "/") return false;
+    
+    return true;
+}
+
+// Test: Single segment + null
+bool test_variadic_single_segment() {
+    var path = SafePath.path("catalogue", null);
+    
+    if (path.is_root) return false;
+    if (path.depth != 1) return false;
+    if (path.name != "catalogue") return false;
+    
+    return true;
+}
+
+// Test: Multiple segments + null
+bool test_variadic_multiple_segments() {
+    var path = SafePath.path("a", "b", "c", "d", "e", null);
+    
+    if (path.depth != 5) return false;
+    if (path.to_string() != "/a/b/c/d/e") return false;
+    
+    // Verify each segment
+    if (path.name != "e") return false;
+    if (path.parent.name != "d") return false;
+    
+    return true;
+}
+
+// ========================================
+// 4. Array API Tests
+// ========================================
+
+// Test: Normal array of segments
+bool test_array_normal() {
+    string[] segments = { "catalogue", "category", "document" };
+    var path = SafePath.from_array(segments);
+    
+    if (path.depth != 3) return false;
+    if (path.to_string() != "/catalogue/category/document") return false;
+    if (path.name != "document") return false;
+    
+    return true;
+}
+
+// Test: Empty array (should return root path)
+bool test_array_empty() {
+    string[] segments = { };
+    var path = SafePath.from_array(segments);
+    
+    if (!path.is_root) return false;
+    if (path.depth != 0) return false;
+    if (path.to_string() != "/") return false;
+    
+    return true;
+}
+
+// Test: Array with special characters
+bool test_array_special_chars() {
+    string[] segments = { "user name", "doc/with/slashes", "query?test" };
+    var path = SafePath.from_array(segments);
+    
+    if (path.depth != 3) return false;
+    
+    string path_str = path.to_string();
+    
+    // Verify encoding
+    if (!path_str.contains("%20")) return false;  // Space encoded
+    if (!path_str.contains("%2F")) return false;  // Slash encoded
+    if (!path_str.contains("%3F")) return false;  // Question mark encoded
+    
+    return true;
+}
+
+// ========================================
+// 5. Decode Tests
+// ========================================
+
+// Test: Decoding %20 to space
+bool test_decode_space() {
+    try {
+        string decoded = SafePath.decode_segment("john%20doe");
+        if (decoded != "john doe") return false;
+        
+        // Multiple spaces
+        string decoded2 = SafePath.decode_segment("hello%20world%20test");
+        if (decoded2 != "hello world test") return false;
+        
+        return true;
+    } catch (EntityError e) {
+        stdout.printf("Unexpected error: %s\n", e.message);
+        return false;
+    }
+}
+
+// Test: Decoding %2F to /
+bool test_decode_slash() {
+    try {
+        string decoded = SafePath.decode_segment("path%2Fwith%2Fslashes");
+        if (decoded != "path/with/slashes") return false;
+        
+        return true;
+    } catch (EntityError e) {
+        stdout.printf("Unexpected error: %s\n", e.message);
+        return false;
+    }
+}
+
+// Test: Decoding unicode percent-encoded strings
+bool test_decode_unicode() {
+    try {
+        // Japanese characters encoded
+        string decoded = SafePath.decode_segment("%E3%83%A6%E3%83%BC%E3%82%B6%E3%83%BC");
+        if (decoded != "ユーザー") return false;
+        
+        // Emoji encoded
+        string decoded2 = SafePath.decode_segment("hello%F0%9F%8E%89world");
+        if (decoded2 != "hello🎉world") return false;
+        
+        return true;
+    } catch (EntityError e) {
+        stdout.printf("Unexpected error: %s\n", e.message);
+        return false;
+    }
+}
+
+// Test: Invalid encoding error handling
+bool test_decode_invalid_encoding() {
+    // Invalid percent encoding - incomplete sequence
+    try {
+        SafePath.decode_segment("invalid%");
+        return false;  // Should have thrown
+    } catch (EntityError e) {
+        // Expected - continue to test more cases
+    }
+    
+    // Invalid hex digits
+    try {
+        SafePath.decode_segment("invalid%GG");
+        return false;  // Should have thrown
+    } catch (EntityError e) {
+        // Expected
+    }
+    
+    // Incomplete percent sequence at end
+    try {
+        SafePath.decode_segment("test%2");
+        return false;  // Should have thrown
+    } catch (EntityError e) {
+        // Expected
+    }
+    
+    return true;
+}
+
+// ========================================
+// 6. Integration Tests
+// ========================================
+
+// Test: Verify returned EntityPath works correctly
+bool test_integration_entity_path() {
+    var path = SafePath.path("catalogue", "category", null);
+    
+    // Test is_root
+    if (path.is_root) return false;
+    
+    // Test depth
+    if (path.depth != 2) return false;
+    
+    // Test name
+    if (path.name != "category") return false;
+    
+    // Test parent
+    var parent = path.parent;
+    if (parent.depth != 1) return false;
+    if (parent.name != "catalogue") return false;
+    
+    // Test is_ancestor_of / is_descendant_of
+    if (!parent.is_ancestor_of(path)) return false;
+    if (!path.is_descendant_of(parent)) return false;
+    
+    return true;
+}
+
+// Test: to_string() on resulting paths
+bool test_integration_to_string() {
+    // Simple path
+    var path1 = SafePath.path("a", null);
+    if (path1.to_string() != "/a") return false;
+    
+    // Multi-segment path
+    var path2 = SafePath.path("a", "b", "c", null);
+    if (path2.to_string() != "/a/b/c") return false;
+    
+    // Root path
+    var path3 = SafePath.path(null);
+    if (path3.to_string() != "/") return false;
+    
+    // Path with encoded characters
+    var path4 = SafePath.path("hello world", null);
+    if (path4.to_string() != "/hello%20world") return false;
+    
+    // Array API
+    string[] segments = { "x", "y", "z" };
+    var path5 = SafePath.from_array(segments);
+    if (path5.to_string() != "/x/y/z") return false;
+    
+    return true;
+}
+
+// Test: Path operations (parent, append_child)
+bool test_integration_path_operations() {
+    var path = SafePath.path("catalogue", "category", null);
+    
+    // Test parent
+    var parent = path.parent;
+    if (parent.to_string() != "/catalogue") return false;
+    
+    // Test append_child - note: EntityPath.append_child does NOT encode
+    // It expects pre-encoded segment names
+    var child = path.append_child("document");
+    if (child.to_string() != "/catalogue/category/document") return false;
+    if (child.name != "document") return false;
+    
+    // Test parent chain
+    if (!child.parent.equals(path)) return false;
+    
+    // To add a child with special chars, use SafePath to encode first
+    // or manually encode the segment
+    var child_with_space = path.append_child("my%20document");
+    if (!child_with_space.to_string().contains("%20")) return false;
+    
+    return true;
+}
+
+// Test: Round-trip encode/decode
+bool test_round_trip() {
+    try {
+        // Test with spaces
+        string original1 = "john doe";
+        var path1 = SafePath.path(original1, null);
+        string encoded_name1 = path1.name;
+        string decoded1 = SafePath.decode_segment(encoded_name1);
+        if (decoded1 != original1) return false;
+        
+        // Test with special characters
+        string original2 = "a/b?c#d=e&f";
+        var path2 = SafePath.path(original2, null);
+        string encoded_name2 = path2.name;
+        string decoded2 = SafePath.decode_segment(encoded_name2);
+        if (decoded2 != original2) return false;
+        
+        // Test with unicode
+        string original3 = "ユーザー🎉";
+        var path3 = SafePath.path(original3, null);
+        string encoded_name3 = path3.name;
+        string decoded3 = SafePath.decode_segment(encoded_name3);
+        if (decoded3 != original3) return false;
+        
+        return true;
+    } catch (EntityError e) {
+        stdout.printf("Unexpected error in round-trip: %s\n", e.message);
+        return false;
+    }
+}

+ 491 - 0
tests/Engine/ConnectionStringTest.vala

@@ -0,0 +1,491 @@
+/**
+ * ConnectionStringTest - Unit tests for ConnectionString
+ */
+using Implexus.Core;
+using Implexus.Engine;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // Test 1: Embedded LMDB short form
+    if (test_embedded_lmdb_short()) {
+        passed++;
+        stdout.puts("PASS: test_embedded_lmdb_short\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_embedded_lmdb_short\n");
+    }
+
+    // Test 2: Embedded GDBM short form
+    if (test_embedded_gdbm_short()) {
+        passed++;
+        stdout.puts("PASS: test_embedded_gdbm_short\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_embedded_gdbm_short\n");
+    }
+
+    // Test 3: Embedded filesystem short form
+    if (test_embedded_filesystem_short()) {
+        passed++;
+        stdout.puts("PASS: test_embedded_filesystem_short\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_embedded_filesystem_short\n");
+    }
+
+    // Test 4: Embedded full form
+    if (test_embedded_full_form()) {
+        passed++;
+        stdout.puts("PASS: test_embedded_full_form\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_embedded_full_form\n");
+    }
+
+    // Test 5: Remote default port
+    if (test_remote_default_port()) {
+        passed++;
+        stdout.puts("PASS: test_remote_default_port\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_remote_default_port\n");
+    }
+
+    // Test 6: Remote custom port
+    if (test_remote_custom_port()) {
+        passed++;
+        stdout.puts("PASS: test_remote_custom_port\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_remote_custom_port\n");
+    }
+
+    // Test 7: Remote with timeout
+    if (test_remote_with_timeout()) {
+        passed++;
+        stdout.puts("PASS: test_remote_with_timeout\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_remote_with_timeout\n");
+    }
+
+    // Test 8: Remote IP address
+    if (test_remote_ip_address()) {
+        passed++;
+        stdout.puts("PASS: test_remote_ip_address\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_remote_ip_address\n");
+    }
+
+    // Test 9: Try parse success
+    if (test_try_parse_success()) {
+        passed++;
+        stdout.puts("PASS: test_try_parse_success\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_try_parse_success\n");
+    }
+
+    // Test 10: Try parse failure
+    if (test_try_parse_failure()) {
+        passed++;
+        stdout.puts("PASS: test_try_parse_failure\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_try_parse_failure\n");
+    }
+
+    // Test 11: To string embedded
+    if (test_to_string_embedded()) {
+        passed++;
+        stdout.puts("PASS: test_to_string_embedded\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_to_string_embedded\n");
+    }
+
+    // Test 12: To string remote
+    if (test_to_string_remote()) {
+        passed++;
+        stdout.puts("PASS: test_to_string_remote\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_to_string_remote\n");
+    }
+
+    // Test 13: To configuration embedded
+    if (test_to_configuration_embedded()) {
+        passed++;
+        stdout.puts("PASS: test_to_configuration_embedded\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_to_configuration_embedded\n");
+    }
+
+    // Test 14: To configuration remote
+    if (test_to_configuration_remote()) {
+        passed++;
+        stdout.puts("PASS: test_to_configuration_remote\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_to_configuration_remote\n");
+    }
+
+    // Test 15: Invalid format error
+    if (test_invalid_format_error()) {
+        passed++;
+        stdout.puts("PASS: test_invalid_format_error\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_invalid_format_error\n");
+    }
+
+    // Test 16: Unknown backend error
+    if (test_unknown_backend_error()) {
+        passed++;
+        stdout.puts("PASS: test_unknown_backend_error\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_unknown_backend_error\n");
+    }
+
+    // Test 17: Missing path error
+    if (test_missing_path_error()) {
+        passed++;
+        stdout.puts("PASS: test_missing_path_error\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_missing_path_error\n");
+    }
+
+    // Test 18: Describe method
+    if (test_describe()) {
+        passed++;
+        stdout.puts("PASS: test_describe\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_describe\n");
+    }
+
+    // Test 19: Backend options
+    if (test_backend_options()) {
+        passed++;
+        stdout.puts("PASS: test_backend_options\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_backend_options\n");
+    }
+
+    // Test 20: Relative path
+    if (test_relative_path()) {
+        passed++;
+        stdout.puts("PASS: test_relative_path\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_relative_path\n");
+    }
+
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// Test 1: Embedded LMDB short form
+bool test_embedded_lmdb_short() {
+    try {
+        var cs = new ConnectionString("lmdb:///var/lib/db");
+        
+        if (cs.is_remote) return false;
+        if (cs.backend != "lmdb") return false;
+        if (cs.path != "/var/lib/db") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 2: Embedded GDBM short form
+bool test_embedded_gdbm_short() {
+    try {
+        var cs = new ConnectionString("gdbm:///var/lib/db");
+        
+        if (cs.is_remote) return false;
+        if (cs.backend != "gdbm") return false;
+        if (cs.path != "/var/lib/db") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 3: Embedded filesystem short form
+bool test_embedded_filesystem_short() {
+    try {
+        var cs = new ConnectionString("filesystem:///var/lib/db");
+        
+        if (cs.is_remote) return false;
+        if (cs.backend != "filesystem") return false;
+        if (cs.path != "/var/lib/db") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 4: Embedded full form
+bool test_embedded_full_form() {
+    try {
+        var cs = new ConnectionString("implexus://embedded?backend=lmdb&path=/var/lib/db");
+        
+        if (cs.is_remote) return false;
+        if (cs.backend != "lmdb") return false;
+        if (cs.path != "/var/lib/db") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 5: Remote default port
+bool test_remote_default_port() {
+    try {
+        var cs = new ConnectionString("implexus://server.example.com");
+        
+        if (!cs.is_remote) return false;
+        if (cs.host != "server.example.com") return false;
+        if (cs.port != null) return false;  // Should be null, use default
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 6: Remote custom port
+bool test_remote_custom_port() {
+    try {
+        var cs = new ConnectionString("implexus://server.example.com:9999");
+        
+        if (!cs.is_remote) return false;
+        if (cs.host != "server.example.com") return false;
+        if (cs.port == null) return false;
+        if (cs.port != 9999) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 7: Remote with timeout
+bool test_remote_with_timeout() {
+    try {
+        var cs = new ConnectionString("implexus://server.example.com:9999?timeout=60");
+        
+        if (!cs.is_remote) return false;
+        if (cs.host != "server.example.com") return false;
+        if (cs.port != 9999) return false;
+        if (cs.timeout == null) return false;
+        if (cs.timeout != 60) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 8: Remote IP address
+bool test_remote_ip_address() {
+    try {
+        var cs = new ConnectionString("implexus://192.168.1.100:9876");
+        
+        if (!cs.is_remote) return false;
+        if (cs.host != "192.168.1.100") return false;
+        if (cs.port != 9876) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 9: Try parse success
+bool test_try_parse_success() {
+    var cs = ConnectionString.try_parse("lmdb:///var/lib/db");
+    
+    if (cs == null) return false;
+    
+    return true;
+}
+
+// Test 10: Try parse failure
+bool test_try_parse_failure() {
+    var cs = ConnectionString.try_parse("invalid://format");
+    
+    if (cs != null) return false;
+    
+    return true;
+}
+
+// Test 11: To string embedded
+bool test_to_string_embedded() {
+    try {
+        var cs = new ConnectionString("lmdb:///var/lib/db");
+        string str = cs.to_connection_string();
+        
+        if (str != "lmdb:///var/lib/db") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 12: To string remote
+bool test_to_string_remote() {
+    try {
+        var cs = new ConnectionString("implexus://server.example.com:9999?timeout=60");
+        string str = cs.to_connection_string();
+        
+        // Should contain host and port
+        if (!str.contains("server.example.com")) return false;
+        if (!str.contains("9999")) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 13: To configuration embedded
+bool test_to_configuration_embedded() {
+    try {
+        var cs = new ConnectionString("filesystem:///tmp/test-db");
+        var config = cs.to_configuration();
+        
+        if (config.mode != EngineMode.EMBEDDED) return false;
+        if (config.storage_path != "/tmp/test-db") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 14: To configuration remote
+bool test_to_configuration_remote() {
+    try {
+        var cs = new ConnectionString("implexus://server.example.com:9999?timeout=60");
+        var config = cs.to_configuration();
+        
+        if (config.mode != EngineMode.REMOTE) return false;
+        if (config.host != "server.example.com") return false;
+        if (config.port != 9999) return false;
+        if (config.timeout_ms != 60000) return false;  // 60 seconds in ms
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 15: Invalid format error
+bool test_invalid_format_error() {
+    try {
+        new ConnectionString("not-a-valid-format");
+        return false;  // Should have thrown
+    } catch (ConnectionStringError e) {
+        if (e is ConnectionStringError.INVALID_FORMAT) {
+            return true;
+        }
+        return false;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 16: Unknown backend error
+bool test_unknown_backend_error() {
+    try {
+        new ConnectionString("implexus://embedded?backend=unknown&path=/tmp/db");
+        return false;  // Should have thrown
+    } catch (ConnectionStringError e) {
+        if (e is ConnectionStringError.UNKNOWN_BACKEND) {
+            return true;
+        }
+        return false;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 17: Missing path error
+bool test_missing_path_error() {
+    try {
+        new ConnectionString("implexus://embedded?backend=lmdb");
+        return false;  // Should have thrown
+    } catch (ConnectionStringError e) {
+        if (e is ConnectionStringError.MISSING_PARAMETER) {
+            return true;
+        }
+        return false;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 18: Describe method
+bool test_describe() {
+    try {
+        var cs1 = new ConnectionString("lmdb:///var/lib/db");
+        string desc1 = cs1.describe();
+        
+        if (!desc1.contains("EMBEDDED")) return false;
+        if (!desc1.contains("lmdb")) return false;
+        
+        var cs2 = new ConnectionString("implexus://server.example.com:9999");
+        string desc2 = cs2.describe();
+        
+        if (!desc2.contains("REMOTE")) return false;
+        if (!desc2.contains("server.example.com")) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 19: Backend options
+bool test_backend_options() {
+    try {
+        var cs = new ConnectionString("implexus://embedded?backend=lmdb&path=/var/lib/db&map_size=2048&enable_cache=true&cache_size=5000");
+        
+        if (cs.map_size == null || cs.map_size != 2048) return false;
+        if (cs.enable_cache == null || cs.enable_cache != true) return false;
+        if (cs.cache_size == null || cs.cache_size != 5000) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 20: Relative path
+bool test_relative_path() {
+    try {
+        var cs = new ConnectionString("filesystem://./data/db");
+        
+        if (cs.is_remote) return false;
+        if (cs.path != "./data/db") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}

+ 3566 - 0
tests/Engine/EmbeddedEngineTest.vala

@@ -0,0 +1,3566 @@
+/**
+ * EmbeddedEngineTest - Unit tests for EmbeddedEngine
+ * 
+ * All tests use async methods with MainLoop wrappers for the test framework.
+ */
+using Implexus.Core;
+using Implexus.Engine;
+using Implexus.Storage;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // Test 1: Root creation
+    if (test_root_creation()) {
+        passed++;
+        stdout.puts("PASS: test_root_creation\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_root_creation\n");
+    }
+
+    // Test 2: Container creation
+    if (test_container_creation()) {
+        passed++;
+        stdout.puts("PASS: test_container_creation\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_container_creation\n");
+    }
+
+    // Test 3: Document creation
+    if (test_document_creation()) {
+        passed++;
+        stdout.puts("PASS: test_document_creation\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_document_creation\n");
+    }
+
+    // Test 4: Container deletion
+    if (test_container_deletion()) {
+        passed++;
+        stdout.puts("PASS: test_container_deletion\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_container_deletion\n");
+    }
+
+    // Test 5: Document deletion
+    if (test_document_deletion()) {
+        passed++;
+        stdout.puts("PASS: test_document_deletion\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_document_deletion\n");
+    }
+
+    // Test 6: Property access
+    if (test_property_access()) {
+        passed++;
+        stdout.puts("PASS: test_property_access\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_property_access\n");
+    }
+
+    // Test 7: Property removal
+    if (test_property_removal()) {
+        passed++;
+        stdout.puts("PASS: test_property_removal\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_property_removal\n");
+    }
+
+    // Test 8: Get children
+    if (test_get_children()) {
+        passed++;
+        stdout.puts("PASS: test_get_children\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_children\n");
+    }
+
+    // Test 9: Entity exists
+    if (test_entity_exists()) {
+        passed++;
+        stdout.puts("PASS: test_entity_exists\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_entity_exists\n");
+    }
+
+    // Test 10: Get entity
+    if (test_get_entity()) {
+        passed++;
+        stdout.puts("PASS: test_get_entity\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_entity\n");
+    }
+
+    // Test 11: Query by type
+    if (test_query_by_type()) {
+        passed++;
+        stdout.puts("PASS: test_query_by_type\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_query_by_type\n");
+    }
+
+    // Test 12: Nested containers
+    if (test_nested_containers()) {
+        passed++;
+        stdout.puts("PASS: test_nested_containers\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_nested_containers\n");
+    }
+
+    // Test 13: Multiple documents
+    if (test_multiple_documents()) {
+        passed++;
+        stdout.puts("PASS: test_multiple_documents\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_multiple_documents\n");
+    }
+
+    // Test 14: Entity path
+    if (test_entity_path()) {
+        passed++;
+        stdout.puts("PASS: test_entity_path\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_entity_path\n");
+    }
+
+    // Test 15: Persistence
+    if (test_engine_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_engine_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_engine_persistence\n");
+    }
+    
+    // Test 16: Category virtual child resolution
+    if (test_category_virtual_child_resolution()) {
+        passed++;
+        stdout.puts("PASS: test_category_virtual_child_resolution\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_category_virtual_child_resolution\n");
+    }
+    
+    // Test 17: Category virtual child not found
+    if (test_category_virtual_child_not_found()) {
+        passed++;
+        stdout.puts("PASS: test_category_virtual_child_not_found\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_category_virtual_child_not_found\n");
+    }
+    
+    // Test 18: Catalogue virtual child group resolution
+    if (test_catalogue_virtual_child_group()) {
+        passed++;
+        stdout.puts("PASS: test_catalogue_virtual_child_group\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_catalogue_virtual_child_group\n");
+    }
+    
+    // Test 19: Catalogue virtual child document resolution
+    if (test_catalogue_virtual_child_document()) {
+        passed++;
+        stdout.puts("PASS: test_catalogue_virtual_child_document\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_catalogue_virtual_child_document\n");
+    }
+    
+    // Test 20: Catalogue virtual child not found
+    if (test_catalogue_virtual_child_not_found()) {
+        passed++;
+        stdout.puts("PASS: test_catalogue_virtual_child_not_found\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_catalogue_virtual_child_not_found\n");
+    }
+    
+    // Test 21: Index virtual child resolution
+    if (test_index_virtual_child_resolution()) {
+        passed++;
+        stdout.puts("PASS: test_index_virtual_child_resolution\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_index_virtual_child_resolution\n");
+    }
+    
+    // Test 22: Index virtual child no matches
+    if (test_index_virtual_child_no_matches()) {
+        passed++;
+        stdout.puts("PASS: test_index_virtual_child_no_matches\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_index_virtual_child_no_matches\n");
+    }
+    
+    // Test 23: Category direct vs navigation comparison
+    if (test_category_direct_vs_navigation()) {
+        passed++;
+        stdout.puts("PASS: test_category_direct_vs_navigation\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_category_direct_vs_navigation\n");
+    }
+    
+    // Test 24: Catalogue direct vs navigation comparison
+    if (test_catalogue_direct_vs_navigation()) {
+        passed++;
+        stdout.puts("PASS: test_catalogue_direct_vs_navigation\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_catalogue_direct_vs_navigation\n");
+    }
+    
+    // Test 25: Index direct vs navigation comparison
+    if (test_index_direct_vs_navigation()) {
+        passed++;
+        stdout.puts("PASS: test_index_direct_vs_navigation\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_index_direct_vs_navigation\n");
+    }
+    
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// Helper to create temporary directory
+string create_temp_dir() {
+    string temp_dir = DirUtils.mkdtemp("implexus_engine_test_XXXXXX");
+    return temp_dir;
+}
+
+// Test 1: Root creation
+bool test_root_creation() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        stderr.printf("Test error: %s\n", ((!)error).message);
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)root).entity_type != EntityType.CONTAINER) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (!((!)root).path.is_root) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)root).name != "") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 2: Container creation
+bool test_container_creation() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    bool exists = false;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("users", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)container).entity_type != EntityType.CONTAINER) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)container).name != "users") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)container).path.to_string() != "/users") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify it exists
+    engine.entity_exists_async.begin(((!)container).path, (obj, res) => {
+        try {
+            exists = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 3: Document creation
+bool test_document_creation() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? users = null;
+    Entity? john = null;
+    bool exists = false;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("users", (obj, res) => {
+        try {
+            users = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || users == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users).create_document_async.begin("john", "User", (obj, res) => {
+        try {
+            john = ((!)users).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || john == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)john).entity_type != EntityType.DOCUMENT) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)john).name != "john") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)john).type_label != "User") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)john).path.to_string() != "/users/john") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify it exists
+    engine.entity_exists_async.begin(((!)john).path, (obj, res) => {
+        try {
+            exists = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 4: Container deletion
+bool test_container_deletion() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    bool exists_before = false;
+    bool exists_after = true;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("temp", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Check exists before
+    engine.entity_exists_async.begin(((!)container).path, (obj, res) => {
+        try {
+            exists_before = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists_before) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Delete
+    ((!)container).delete_async.begin((obj, res) => {
+        try {
+            ((!)container).delete_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Check exists after
+    engine.entity_exists_async.begin(((!)container).path, (obj, res) => {
+        try {
+            exists_after = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (exists_after) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 5: Document deletion
+bool test_document_deletion() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? docs = null;
+    Entity? doc = null;
+    bool exists_before = false;
+    bool exists_after = true;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("docs", (obj, res) => {
+        try {
+            docs = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || docs == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)docs).create_document_async.begin("doc1", "Document", (obj, res) => {
+        try {
+            doc = ((!)docs).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Check exists before
+    engine.entity_exists_async.begin(((!)doc).path, (obj, res) => {
+        try {
+            exists_before = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists_before) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Delete
+    ((!)doc).delete_async.begin((obj, res) => {
+        try {
+            ((!)doc).delete_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Check exists after
+    engine.entity_exists_async.begin(((!)doc).path, (obj, res) => {
+        try {
+            exists_after = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (exists_after) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 6: Property access
+bool test_property_access() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? docs = null;
+    Entity? doc = null;
+    Invercargill.Element? name = null;
+    Invercargill.Element? count = null;
+    Invercargill.Element? active = null;
+    Invercargill.Element? missing = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("docs", (obj, res) => {
+        try {
+            docs = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || docs == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)docs).create_document_async.begin("doc1", "Document", (obj, res) => {
+        try {
+            doc = ((!)docs).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Set properties
+    ((!)doc).set_entity_property_async.begin("name", new Invercargill.NativeElement<string>("Test Document"), (obj, res) => {
+        try {
+            ((!)doc).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc).set_entity_property_async.begin("count", new Invercargill.NativeElement<int64?>(42), (obj, res) => {
+        try {
+            ((!)doc).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc).set_entity_property_async.begin("active", new Invercargill.NativeElement<bool?>(true), (obj, res) => {
+        try {
+            ((!)doc).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Get properties
+    ((!)doc).get_entity_property_async.begin("name", (obj, res) => {
+        try {
+            name = ((!)doc).get_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || name == null || ((!)name).is_null()) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    string name_val = ((!)name).as<string>();
+    if (name_val != "Test Document") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc).get_entity_property_async.begin("count", (obj, res) => {
+        try {
+            count = ((!)doc).get_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || count == null || ((!)count).is_null()) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    int64? count_val = ((!)count).as<int64?>();
+    if (count_val == null || (!)count_val != 42) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc).get_entity_property_async.begin("active", (obj, res) => {
+        try {
+            active = ((!)doc).get_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || active == null || ((!)active).is_null()) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    bool active_val = ((!)active).as<bool?>();
+    if (active_val != true) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Non-existent property
+    ((!)doc).get_entity_property_async.begin("missing", (obj, res) => {
+        try {
+            missing = ((!)doc).get_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (missing != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 7: Property removal
+bool test_property_removal() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? docs = null;
+    Entity? doc = null;
+    Invercargill.Element? value = null;
+    Invercargill.Element? value_after = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("docs", (obj, res) => {
+        try {
+            docs = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || docs == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)docs).create_document_async.begin("doc1", "Document", (obj, res) => {
+        try {
+            doc = ((!)docs).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc).set_entity_property_async.begin("temp", new Invercargill.NativeElement<string>("temporary"), (obj, res) => {
+        try {
+            ((!)doc).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc).get_entity_property_async.begin("temp", (obj, res) => {
+        try {
+            value = ((!)doc).get_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || value == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc).remove_property_async.begin("temp", (obj, res) => {
+        try {
+            ((!)doc).remove_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc).get_entity_property_async.begin("temp", (obj, res) => {
+        try {
+            value_after = ((!)doc).get_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (value_after != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 8: Get children
+bool test_get_children() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? users = null;
+    Entity[] children = {};
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("users", (obj, res) => {
+        try {
+            users = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || users == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create documents
+    ((!)users).create_document_async.begin("john", "User", (obj, res) => {
+        try {
+            ((!)users).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users).create_document_async.begin("jane", "User", (obj, res) => {
+        try {
+            ((!)users).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users).create_document_async.begin("bob", "User", (obj, res) => {
+        try {
+            ((!)users).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users).get_children_async.begin((obj, res) => {
+        try {
+            children = ((!)users).get_children_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || children.length != 3) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Check all children are present
+    var child_names = new Invercargill.DataStructures.HashSet<string>();
+    foreach (var child in children) {
+        child_names.add(child.name);
+    }
+    
+    if (!child_names.has("john") || !child_names.has("jane") || !child_names.has("bob")) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 9: Entity exists
+bool test_entity_exists() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? users = null;
+    bool exists1 = false;
+    bool exists2 = true;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("users", (obj, res) => {
+        try {
+            users = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || users == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    engine.entity_exists_async.begin(new EntityPath("/users"), (obj, res) => {
+        try {
+            exists1 = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists1) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    engine.entity_exists_async.begin(new EntityPath("/nonexistent"), (obj, res) => {
+        try {
+            exists2 = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (exists2) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 10: Get entity
+bool test_get_entity() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? users = null;
+    Entity? retrieved = null;
+    Entity? nonexistent = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("users", (obj, res) => {
+        try {
+            users = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || users == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users).create_document_async.begin("john", "User", (obj, res) => {
+        try {
+            ((!)users).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Get entity by path
+    engine.get_entity_async.begin(new EntityPath("/users/john"), (obj, res) => {
+        try {
+            retrieved = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || retrieved == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)retrieved).entity_type != EntityType.DOCUMENT) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)retrieved).name != "john") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)retrieved).type_label != "User") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Get non-existent entity - should throw
+    engine.get_entity_async.begin(new EntityPath("/nonexistent"), (obj, res) => {
+        try {
+            nonexistent = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    // Should have thrown
+    if (nonexistent != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 11: Query by type
+bool test_query_by_type() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? users = null;
+    Entity[] user_results = {};
+    Entity[] admin_results = {};
+    Entity[] none_results = {};
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("users", (obj, res) => {
+        try {
+            users = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || users == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users).create_document_async.begin("john", "User", (obj, res) => {
+        try {
+            ((!)users).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users).create_document_async.begin("jane", "User", (obj, res) => {
+        try {
+            ((!)users).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users).create_document_async.begin("admin", "Admin", (obj, res) => {
+        try {
+            ((!)users).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Query for Users
+    engine.query_by_type_async.begin("User", (obj, res) => {
+        try {
+            user_results = engine.query_by_type_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || user_results.length != 2) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Query for Admins
+    engine.query_by_type_async.begin("Admin", (obj, res) => {
+        try {
+            admin_results = engine.query_by_type_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || admin_results.length != 1) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Query for non-existent type
+    engine.query_by_type_async.begin("NonExistent", (obj, res) => {
+        try {
+            none_results = engine.query_by_type_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || none_results.length != 0) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 12: Nested containers
+bool test_nested_containers() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? level1 = null;
+    Entity? level2 = null;
+    Entity? level3 = null;
+    bool exists1 = false;
+    bool exists2 = false;
+    bool exists3 = false;
+    Entity[] level1_children = {};
+    Entity[] level2_children = {};
+    Entity[] level3_children = {};
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("level1", (obj, res) => {
+        try {
+            level1 = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || level1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)level1).create_container_async.begin("level2", (obj, res) => {
+        try {
+            level2 = ((!)level1).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || level2 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)level2).create_container_async.begin("level3", (obj, res) => {
+        try {
+            level3 = ((!)level2).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || level3 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    engine.entity_exists_async.begin(new EntityPath("/level1"), (obj, res) => {
+        try {
+            exists1 = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists1) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    engine.entity_exists_async.begin(new EntityPath("/level1/level2"), (obj, res) => {
+        try {
+            exists2 = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists2) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    engine.entity_exists_async.begin(new EntityPath("/level1/level2/level3"), (obj, res) => {
+        try {
+            exists3 = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists3) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify parent-child relationships
+    ((!)level1).get_children_async.begin((obj, res) => {
+        try {
+            level1_children = ((!)level1).get_children_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || level1_children.length != 1) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)level2).get_children_async.begin((obj, res) => {
+        try {
+            level2_children = ((!)level2).get_children_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || level2_children.length != 1) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)level3).get_children_async.begin((obj, res) => {
+        try {
+            level3_children = ((!)level3).get_children_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || level3_children.length != 0) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 13: Multiple documents
+bool test_multiple_documents() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? users = null;
+    Entity[] children = {};
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("users", (obj, res) => {
+        try {
+            users = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || users == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create multiple documents
+    for (int i = 0; i < 10; i++) {
+        int idx = i;
+        Entity? doc = null;
+        ((!)users).create_document_async.begin(@"user$idx", "User", (obj, res) => {
+            try {
+                doc = ((!)users).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc).set_entity_property_async.begin("index", new Invercargill.NativeElement<int64?>(idx), (obj, res) => {
+            try {
+                ((!)doc).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+    }
+    
+    // Verify all exist
+    ((!)users).get_children_async.begin((obj, res) => {
+        try {
+            children = ((!)users).get_children_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || children.length != 10) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify properties
+    for (int i = 0; i < 10; i++) {
+        int idx = i;
+        Entity? doc = null;
+        Invercargill.Element? index = null;
+        
+        engine.get_entity_async.begin(new EntityPath(@"/users/user$idx"), (obj, res) => {
+            try {
+                doc = engine.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc).get_entity_property_async.begin("index", (obj, res) => {
+            try {
+                index = ((!)doc).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || index == null || ((!)index).is_null()) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        int64? index_val = ((!)index).as<int64?>();
+        if (index_val == null || (!)index_val != idx) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 14: Entity path
+bool test_entity_path() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? a = null;
+    Entity? b = null;
+    Entity? c = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root).create_container_async.begin("a", (obj, res) => {
+        try {
+            a = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || a == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)a).create_container_async.begin("b", (obj, res) => {
+        try {
+            b = ((!)a).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || b == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)b).create_document_async.begin("c", "Document", (obj, res) => {
+        try {
+            c = ((!)b).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || c == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (!((!)root).path.is_root) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)a).path.to_string() != "/a") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)b).path.to_string() != "/a/b") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (((!)c).path.to_string() != "/a/b/c") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Check parent relationships
+    if (!((!)c).path.parent.equals(((!)b).path)) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (!((!)b).path.parent.equals(((!)a).path)) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (!((!)a).path.parent.equals(((!)root).path)) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 15: Persistence
+bool test_engine_persistence() {
+    string temp_dir = create_temp_dir();
+    
+    // Create and write
+    var engine1 = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root1 = null;
+    Entity? users1 = null;
+    Entity? john1 = null;
+    bool exists1 = false;
+    bool exists2 = false;
+    Entity? root2 = null;
+    Entity? john2 = null;
+    Invercargill.Element? email = null;
+    Error? error = null;
+    
+    engine1.get_root_async.begin((obj, res) => {
+        try {
+            root1 = engine1.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)root1).create_container_async.begin("users", (obj, res) => {
+        try {
+            users1 = ((!)root1).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || users1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)users1).create_document_async.begin("john", "User", (obj, res) => {
+        try {
+            john1 = ((!)users1).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || john1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)john1).set_entity_property_async.begin("email", new Invercargill.NativeElement<string>("john@example.com"), (obj, res) => {
+        try {
+            ((!)john1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create new engine instance
+    var engine2 = new EmbeddedEngine.with_path(temp_dir);
+    
+    // Verify data persists
+    engine2.get_root_async.begin((obj, res) => {
+        try {
+            root2 = engine2.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root2 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    engine2.entity_exists_async.begin(new EntityPath("/users"), (obj, res) => {
+        try {
+            exists1 = engine2.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists1) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    engine2.entity_exists_async.begin(new EntityPath("/users/john"), (obj, res) => {
+        try {
+            exists2 = engine2.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (!exists2) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    engine2.get_entity_async.begin(new EntityPath("/users/john"), (obj, res) => {
+        try {
+            john2 = engine2.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || john2 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)john2).get_entity_property_async.begin("email", (obj, res) => {
+        try {
+            email = ((!)john2).get_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || email == null || ((!)email).is_null()) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    string email_val = ((!)email).as<string>();
+    if (email_val != "john@example.com") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Cleanup helper
+void cleanup_dir(string path) {
+    try {
+        Dir dir = Dir.open(path, 0);
+        string? name;
+        while ((name = dir.read_name()) != null) {
+            FileUtils.unlink(Path.build_filename(path, name));
+        }
+    } catch (FileError e) {
+        // Ignore errors
+    }
+    DirUtils.remove(path);
+}
+
+// ============================================================================
+// Virtual Entity Resolution Tests
+// ============================================================================
+
+// Test 16: Category virtual child resolution - get_entity_async for member
+bool test_category_virtual_child_resolution() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? doc2 = null;
+    Entity? category = null;
+    Entity? resolved_doc = null;
+    bool exists = false;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("posts", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // IMPORTANT: Create category FIRST so hooks are registered
+    ((!)container).create_category_async.begin("published", "Post", "!draft", (obj, res) => {
+        try {
+            category = ((!)container).create_category_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || category == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Now create documents - hooks will update the category index
+    ((!)container).create_document_async.begin("post1", "Post", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Set draft = false on post1 (matches !draft predicate)
+    ((!)doc1).set_entity_property_async.begin("draft", new Invercargill.NativeElement<bool?>(false), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)container).create_document_async.begin("post2", "Post", (obj, res) => {
+        try {
+            doc2 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc2 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Set draft = true on post2 (does NOT match !draft predicate)
+    ((!)doc2).set_entity_property_async.begin("draft", new Invercargill.NativeElement<bool?>(true), (obj, res) => {
+        try {
+            ((!)doc2).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: Resolve category member via direct path
+    engine.get_entity_async.begin(new EntityPath("/posts/published/post1"), (obj, res) => {
+        try {
+            resolved_doc = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    // post1 should be found (draft = false, matches !draft)
+    if (error != null || resolved_doc == null) {
+        stderr.printf("Failed to resolve post1: %s\n", error != null ? ((!)error).message : "null");
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)resolved_doc).entity_type != EntityType.DOCUMENT) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)resolved_doc).name != "post1") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: entity_exists_async for category member
+    engine.entity_exists_async.begin(new EntityPath("/posts/published/post1"), (obj, res) => {
+        try {
+            exists = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || !exists) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 17: Category virtual child resolution - non-existent child returns ENTITY_NOT_FOUND
+bool test_category_virtual_child_not_found() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? category = null;
+    Entity? resolved_doc = null;
+    bool exists = true;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("posts", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // IMPORTANT: Create category FIRST so hooks are registered
+    ((!)container).create_category_async.begin("published", "Post", "!draft", (obj, res) => {
+        try {
+            category = ((!)container).create_category_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || category == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Now create document with draft = true (does NOT match !draft predicate)
+    ((!)container).create_document_async.begin("post1", "Post", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("draft", new Invercargill.NativeElement<bool?>(true), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: Try to resolve non-member (post1 has draft=true, doesn't match !draft)
+    engine.get_entity_async.begin(new EntityPath("/posts/published/post1"), (obj, res) => {
+        try {
+            resolved_doc = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    // post1 should NOT be found (draft = true, doesn't match !draft)
+    if (resolved_doc != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Should have thrown ENTITY_NOT_FOUND
+    if (error == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: entity_exists_async for non-member
+    engine.entity_exists_async.begin(new EntityPath("/posts/published/post1"), (obj, res) => {
+        try {
+            exists = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (exists) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 18: Catalogue virtual child resolution - group key returns CatalogueGroup
+bool test_catalogue_virtual_child_group() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? doc2 = null;
+    Entity? catalogue = null;
+    Entity? resolved_group = null;
+    bool exists = false;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("posts", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // IMPORTANT: Create catalogue FIRST so hooks are registered
+    ((!)container).create_catalogue_async.begin("by-author", "Post", "author", (obj, res) => {
+        try {
+            catalogue = ((!)container).create_catalogue_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || catalogue == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Now create documents - hooks will update the catalogue index
+    ((!)container).create_document_async.begin("post1", "Post", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("author", new Invercargill.NativeElement<string>("john"), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)container).create_document_async.begin("post2", "Post", (obj, res) => {
+        try {
+            doc2 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc2 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc2).set_entity_property_async.begin("author", new Invercargill.NativeElement<string>("jane"), (obj, res) => {
+        try {
+            ((!)doc2).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: Resolve group via direct path
+    engine.get_entity_async.begin(new EntityPath("/posts/by-author/john"), (obj, res) => {
+        try {
+            resolved_group = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || resolved_group == null) {
+        stderr.printf("Failed to resolve group: %s\n", error != null ? ((!)error).message : "null");
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Should be a CatalogueGroup (appears as CATALOGUE type)
+    if (((!)resolved_group).entity_type != EntityType.CATALOGUE) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)resolved_group).name != "john") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: entity_exists_async for group
+    engine.entity_exists_async.begin(new EntityPath("/posts/by-author/john"), (obj, res) => {
+        try {
+            exists = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || !exists) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 19: Catalogue virtual child resolution - document within group
+bool test_catalogue_virtual_child_document() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? doc2 = null;
+    Entity? catalogue = null;
+    Entity? resolved_doc = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("posts", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // IMPORTANT: Create catalogue FIRST so hooks are registered
+    ((!)container).create_catalogue_async.begin("by-author", "Post", "author", (obj, res) => {
+        try {
+            catalogue = ((!)container).create_catalogue_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || catalogue == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Now create documents - hooks will update the catalogue index
+    ((!)container).create_document_async.begin("post1", "Post", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("author", new Invercargill.NativeElement<string>("john"), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)container).create_document_async.begin("post2", "Post", (obj, res) => {
+        try {
+            doc2 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc2 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc2).set_entity_property_async.begin("author", new Invercargill.NativeElement<string>("jane"), (obj, res) => {
+        try {
+            ((!)doc2).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: Resolve document via direct path (document name within any group)
+    engine.get_entity_async.begin(new EntityPath("/posts/by-author/post1"), (obj, res) => {
+        try {
+            resolved_doc = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || resolved_doc == null) {
+        stderr.printf("Failed to resolve document: %s\n", error != null ? ((!)error).message : "null");
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Should be a Document
+    if (((!)resolved_doc).entity_type != EntityType.DOCUMENT) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)resolved_doc).name != "post1") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 20: Catalogue virtual child resolution - non-existent returns ENTITY_NOT_FOUND
+bool test_catalogue_virtual_child_not_found() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? catalogue = null;
+    Entity? resolved = null;
+    bool exists = true;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("posts", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create document
+    ((!)container).create_document_async.begin("post1", "Post", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("author", new Invercargill.NativeElement<string>("john"), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create catalogue grouped by author
+    ((!)container).create_catalogue_async.begin("by-author", "Post", "author", (obj, res) => {
+        try {
+            catalogue = ((!)container).create_catalogue_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || catalogue == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Populate the catalogue index
+    var catalogue_impl = catalogue as Implexus.Entities.Catalogue;
+    if (catalogue_impl != null) {
+        try {
+            ((!)catalogue_impl).populate_index();
+        } catch (Error e) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+    }
+    
+    // Test: Try to resolve non-existent group
+    engine.get_entity_async.begin(new EntityPath("/posts/by-author/nonexistent"), (obj, res) => {
+        try {
+            resolved = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    // Should NOT be found
+    if (resolved != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Should have thrown ENTITY_NOT_FOUND
+    if (error == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: entity_exists_async for non-existent
+    engine.entity_exists_async.begin(new EntityPath("/posts/by-author/nonexistent"), (obj, res) => {
+        try {
+            exists = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (exists) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 21: Index virtual child resolution - search pattern returns IndexResult
+bool test_index_virtual_child_resolution() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? doc2 = null;
+    Entity? index = null;
+    Entity? resolved_result = null;
+    bool exists = false;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("articles", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create documents FIRST with searchable content
+    ((!)container).create_document_async.begin("article1", "Article", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("title", new Invercargill.NativeElement<string>("Introduction to Vala"), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)container).create_document_async.begin("article2", "Article", (obj, res) => {
+        try {
+            doc2 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc2 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc2).set_entity_property_async.begin("title", new Invercargill.NativeElement<string>("Advanced Vala Techniques"), (obj, res) => {
+        try {
+            ((!)doc2).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create index AFTER documents - populate_index() will be called internally
+    ((!)container).create_index_async.begin("search", "Article", "title", (obj, res) => {
+        try {
+            index = ((!)container).create_index_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || index == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Explicitly populate the index (needed for tests)
+    var index_impl = index as Implexus.Entities.Index;
+    if (index_impl != null) {
+        try {
+            ((!)index_impl).populate_index();
+        } catch (Error e) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+    }
+    
+    // Test: Resolve search pattern via direct path
+    engine.get_entity_async.begin(new EntityPath("/articles/search/*Vala*"), (obj, res) => {
+        try {
+            resolved_result = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || resolved_result == null) {
+        stderr.printf("Failed to resolve search: %s\n", error != null ? ((!)error).message : "null");
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Should be a Container (IndexResult appears as CONTAINER)
+    if (((!)resolved_result).entity_type != EntityType.CONTAINER) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: entity_exists_async for search with matches
+    engine.entity_exists_async.begin(new EntityPath("/articles/search/*Vala*"), (obj, res) => {
+        try {
+            exists = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || !exists) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 22: Index virtual child resolution - no matches returns ENTITY_NOT_FOUND
+bool test_index_virtual_child_no_matches() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? index = null;
+    Entity? resolved = null;
+    bool exists = true;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("articles", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create document
+    ((!)container).create_document_async.begin("article1", "Article", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("title", new Invercargill.NativeElement<string>("Introduction to Vala"), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create index over title property
+    ((!)container).create_index_async.begin("search", "Article", "title", (obj, res) => {
+        try {
+            index = ((!)container).create_index_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || index == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Populate the index
+    var index_impl = index as Implexus.Entities.Index;
+    if (index_impl != null) {
+        try {
+            ((!)index_impl).populate_index();
+        } catch (Error e) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+    }
+    
+    // Test: Search with no matches
+    engine.get_entity_async.begin(new EntityPath("/articles/search/*NonExistent*"), (obj, res) => {
+        try {
+            resolved = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    // Should NOT be found
+    if (resolved != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Should have thrown ENTITY_NOT_FOUND
+    if (error == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Test: entity_exists_async for search with no matches
+    engine.entity_exists_async.begin(new EntityPath("/articles/search/*NonExistent*"), (obj, res) => {
+        try {
+            exists = engine.entity_exists_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (exists) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 23: Comparison - direct path equals navigation for Category
+bool test_category_direct_vs_navigation() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? category = null;
+    Entity? direct_doc = null;
+    Entity? nav_doc = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("posts", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // IMPORTANT: Create category FIRST so hooks are registered
+    ((!)container).create_category_async.begin("published", "Post", "!draft", (obj, res) => {
+        try {
+            category = ((!)container).create_category_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || category == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Now create document - hooks will update the category index
+    ((!)container).create_document_async.begin("post1", "Post", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("draft", new Invercargill.NativeElement<bool?>(false), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Method 1: Direct path lookup
+    engine.get_entity_async.begin(new EntityPath("/posts/published/post1"), (obj, res) => {
+        try {
+            direct_doc = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || direct_doc == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Method 2: Navigation via get_child_async
+    ((!)category).get_child_async.begin("post1", (obj, res) => {
+        try {
+            nav_doc = ((!)category).get_child_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || nav_doc == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Both methods should return the same document (by path)
+    if (((!)direct_doc).path.to_string() != ((!)nav_doc).path.to_string()) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)direct_doc).name != ((!)nav_doc).name) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 24: Comparison - direct path equals navigation for Catalogue group
+bool test_catalogue_direct_vs_navigation() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? catalogue = null;
+    Entity? direct_group = null;
+    Entity? nav_group = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("posts", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // IMPORTANT: Create catalogue FIRST so hooks are registered
+    ((!)container).create_catalogue_async.begin("by-author", "Post", "author", (obj, res) => {
+        try {
+            catalogue = ((!)container).create_catalogue_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || catalogue == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Now create document - hooks will update the catalogue index
+    ((!)container).create_document_async.begin("post1", "Post", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("author", new Invercargill.NativeElement<string>("john"), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Method 1: Direct path lookup
+    engine.get_entity_async.begin(new EntityPath("/posts/by-author/john"), (obj, res) => {
+        try {
+            direct_group = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || direct_group == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Method 2: Navigation via get_child_async
+    ((!)catalogue).get_child_async.begin("john", (obj, res) => {
+        try {
+            nav_group = ((!)catalogue).get_child_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || nav_group == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Both methods should return the same group (by path)
+    if (((!)direct_group).path.to_string() != ((!)nav_group).path.to_string()) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (((!)direct_group).name != ((!)nav_group).name) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 25: Comparison - direct path equals navigation for Index search
+bool test_index_direct_vs_navigation() {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    
+    var loop = new MainLoop();
+    Entity? root = null;
+    Entity? container = null;
+    Entity? doc1 = null;
+    Entity? index = null;
+    Entity? direct_result = null;
+    Entity? nav_result = null;
+    Error? error = null;
+    
+    engine.get_root_async.begin((obj, res) => {
+        try {
+            root = engine.get_root_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create container
+    ((!)root).create_container_async.begin("articles", (obj, res) => {
+        try {
+            container = ((!)root).create_container_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || container == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create document FIRST with searchable content
+    ((!)container).create_document_async.begin("article1", "Article", (obj, res) => {
+        try {
+            doc1 = ((!)container).create_document_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || doc1 == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    ((!)doc1).set_entity_property_async.begin("title", new Invercargill.NativeElement<string>("Introduction to Vala"), (obj, res) => {
+        try {
+            ((!)doc1).set_entity_property_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Create index AFTER documents - populate_index() will be called internally
+    ((!)container).create_index_async.begin("search", "Article", "title", (obj, res) => {
+        try {
+            index = ((!)container).create_index_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || index == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Explicitly populate the index (needed for tests)
+    var index_impl = index as Implexus.Entities.Index;
+    if (index_impl != null) {
+        try {
+            ((!)index_impl).populate_index();
+        } catch (Error e) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+    }
+    
+    // Method 1: Direct path lookup
+    engine.get_entity_async.begin(new EntityPath("/articles/search/*Vala*"), (obj, res) => {
+        try {
+            direct_result = engine.get_entity_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || direct_result == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Method 2: Navigation via get_child_async
+    ((!)index).get_child_async.begin("*Vala*", (obj, res) => {
+        try {
+            nav_result = ((!)index).get_child_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null || nav_result == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Both methods should return an IndexResult with the same path
+    if (((!)direct_result).path.to_string() != ((!)nav_result).path.to_string()) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}

+ 1081 - 0
tests/Migrations/MigrationTest.vala

@@ -0,0 +1,1081 @@
+/**
+ * MigrationTest - Unit tests for Migration system
+ */
+using Implexus.Core;
+using Implexus.Engine;
+using Implexus.Storage;
+using Implexus.Migrations;
+
+/**
+ * Abstract base class for async test operations (replaces async delegates).
+ */
+public abstract class AsyncTestOperation : Object {
+    public abstract async void execute_async() throws Error;
+}
+
+/**
+ * Helper for running async tests synchronously.
+ */
+void run_async_test(AsyncTestOperation op) {
+    var loop = new MainLoop();
+    Error? error = null;
+    
+    op.execute_async.begin((obj, res) => {
+        try {
+            op.execute_async.end(res);
+        } catch (Error e) {
+            error = e;
+        }
+        loop.quit();
+    });
+    loop.run();
+    
+    if (error != null) {
+        warning("Async test error: %s", ((!)error).message);
+    }
+}
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // === MigrationStorage Tests ===
+    
+    // Test 1: Record migration
+    if (test_record_migration()) {
+        passed++;
+        stdout.puts("PASS: test_record_migration\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_record_migration\n");
+    }
+
+    // Test 2: Get applied versions
+    if (test_get_applied_versions()) {
+        passed++;
+        stdout.puts("PASS: test_get_applied_versions\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_applied_versions\n");
+    }
+
+    // Test 3: Is applied
+    if (test_is_applied()) {
+        passed++;
+        stdout.puts("PASS: test_is_applied\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_is_applied\n");
+    }
+
+    // Test 4: Remove migration
+    if (test_remove_migration()) {
+        passed++;
+        stdout.puts("PASS: test_remove_migration\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_remove_migration\n");
+    }
+
+    // Test 5: Get migration record
+    if (test_get_migration_record()) {
+        passed++;
+        stdout.puts("PASS: test_get_migration_record\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_migration_record\n");
+    }
+
+    // === MigrationRunner Tests ===
+
+    // Test 6: Register migration
+    if (test_register_migration()) {
+        passed++;
+        stdout.puts("PASS: test_register_migration\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_register_migration\n");
+    }
+
+    // Test 7: Get pending versions
+    if (test_get_pending_versions()) {
+        passed++;
+        stdout.puts("PASS: test_get_pending_versions\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_pending_versions\n");
+    }
+
+    // Test 8: Run pending migrations
+    run_async_test(new TestRunPendingOperation(ref passed, ref failed));
+
+    // Test 9: Run to version
+    run_async_test(new TestRunToVersionOperation(ref passed, ref failed));
+
+    // Test 10: Rollback to version
+    run_async_test(new TestRollbackToVersionOperation(ref passed, ref failed));
+
+    // Test 11: Migration order
+    run_async_test(new TestMigrationOrderOperation(ref passed, ref failed));
+
+    // Test 12: Error already applied
+    run_async_test(new TestErrorAlreadyAppliedOperation(ref passed, ref failed));
+
+    // Test 13: Error missing migration
+    run_async_test(new TestErrorMissingMigrationOperation(ref passed, ref failed));
+
+    // Test 14: Version conflict
+    if (test_version_conflict()) {
+        passed++;
+        stdout.puts("PASS: test_version_conflict\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_version_conflict\n");
+    }
+
+    // === BootstrapMigration Tests ===
+
+    // Test 15: Bootstrap version
+    if (test_bootstrap_version()) {
+        passed++;
+        stdout.puts("PASS: test_bootstrap_version\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_bootstrap_version\n");
+    }
+
+    // Test 16: Bootstrap irreversible
+    run_async_test(new TestBootstrapIrreversibleOperation(ref passed, ref failed));
+
+    // Test 17: Bootstrap execution
+    run_async_test(new TestBootstrapExecutionOperation(ref passed, ref failed));
+
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// === Helper Classes ===
+
+/**
+ * Simple test migration that creates a container.
+ */
+public class TestMigration : Object, Migration {
+    public string _version;
+    public string _description;
+    public string _container_name;
+    public bool up_called { get; private set; default = false; }
+    public bool down_called { get; private set; default = false; }
+    
+    public TestMigration(string version, string description, string container_name) {
+        _version = version;
+        _description = description;
+        _container_name = container_name;
+    }
+    
+    public string version { owned get { return _version; } }
+    public string description { owned get { return _description; } }
+    
+    public async void up_async(Engine engine) throws MigrationError {
+        up_called = true;
+        try {
+            var root = yield engine.get_root_async();
+            yield root.create_container_async(_container_name);
+        } catch (EngineError e) {
+            throw new MigrationError.EXECUTION_FAILED(
+                "Failed to create container: %s".printf(e.message)
+            );
+        }
+    }
+    
+    public async void down_async(Engine engine) throws MigrationError {
+        down_called = true;
+        try {
+            var root = yield engine.get_root_async();
+            var child = yield root.get_child_async(_container_name);
+            if (child != null) {
+                yield ((!) child).delete_async();
+            }
+        } catch (EngineError e) {
+            throw new MigrationError.EXECUTION_FAILED(
+                "Failed to delete container: %s".printf(e.message)
+            );
+        }
+    }
+}
+
+/**
+ * Migration that throws an error on up_async().
+ */
+public class FailingMigration : Object, Migration {
+    public string _version;
+    public string _description;
+    
+    public FailingMigration(string version) {
+        _version = version;
+        _description = "Failing migration";
+    }
+    
+    public string version { owned get { return _version; } }
+    public string description { owned get { return _description; } }
+    
+    public async void up_async(Engine engine) throws MigrationError {
+        throw new MigrationError.EXECUTION_FAILED("Intentional failure");
+    }
+    
+    public async void down_async(Engine engine) throws MigrationError {
+        // Does nothing
+    }
+}
+
+// === Async Test Operation Classes ===
+
+public class TestRunPendingOperation : AsyncTestOperation {
+    private unowned int _passed;
+    private unowned int _failed;
+    
+    public TestRunPendingOperation(ref int passed, ref int failed) {
+        _passed = passed;
+        _failed = failed;
+    }
+    
+    public override async void execute_async() throws Error {
+        bool result = yield test_run_pending();
+        if (result) {
+            _passed++;
+            stdout.puts("PASS: test_run_pending\n");
+        } else {
+            _failed++;
+            stdout.puts("FAIL: test_run_pending\n");
+        }
+    }
+}
+
+public class TestRunToVersionOperation : AsyncTestOperation {
+    private unowned int _passed;
+    private unowned int _failed;
+    
+    public TestRunToVersionOperation(ref int passed, ref int failed) {
+        _passed = passed;
+        _failed = failed;
+    }
+    
+    public override async void execute_async() throws Error {
+        bool result = yield test_run_to_version();
+        if (result) {
+            _passed++;
+            stdout.puts("PASS: test_run_to_version\n");
+        } else {
+            _failed++;
+            stdout.puts("FAIL: test_run_to_version\n");
+        }
+    }
+}
+
+public class TestRollbackToVersionOperation : AsyncTestOperation {
+    private unowned int _passed;
+    private unowned int _failed;
+    
+    public TestRollbackToVersionOperation(ref int passed, ref int failed) {
+        _passed = passed;
+        _failed = failed;
+    }
+    
+    public override async void execute_async() throws Error {
+        bool result = yield test_rollback_to_version();
+        if (result) {
+            _passed++;
+            stdout.puts("PASS: test_rollback_to_version\n");
+        } else {
+            _failed++;
+            stdout.puts("FAIL: test_rollback_to_version\n");
+        }
+    }
+}
+
+public class TestMigrationOrderOperation : AsyncTestOperation {
+    private unowned int _passed;
+    private unowned int _failed;
+    
+    public TestMigrationOrderOperation(ref int passed, ref int failed) {
+        _passed = passed;
+        _failed = failed;
+    }
+    
+    public override async void execute_async() throws Error {
+        bool result = yield test_migration_order();
+        if (result) {
+            _passed++;
+            stdout.puts("PASS: test_migration_order\n");
+        } else {
+            _failed++;
+            stdout.puts("FAIL: test_migration_order\n");
+        }
+    }
+}
+
+public class TestErrorAlreadyAppliedOperation : AsyncTestOperation {
+    private unowned int _passed;
+    private unowned int _failed;
+    
+    public TestErrorAlreadyAppliedOperation(ref int passed, ref int failed) {
+        _passed = passed;
+        _failed = failed;
+    }
+    
+    public override async void execute_async() throws Error {
+        bool result = yield test_error_already_applied();
+        if (result) {
+            _passed++;
+            stdout.puts("PASS: test_error_already_applied\n");
+        } else {
+            _failed++;
+            stdout.puts("FAIL: test_error_already_applied\n");
+        }
+    }
+}
+
+public class TestErrorMissingMigrationOperation : AsyncTestOperation {
+    private unowned int _passed;
+    private unowned int _failed;
+    
+    public TestErrorMissingMigrationOperation(ref int passed, ref int failed) {
+        _passed = passed;
+        _failed = failed;
+    }
+    
+    public override async void execute_async() throws Error {
+        bool result = yield test_error_missing_migration();
+        if (result) {
+            _passed++;
+            stdout.puts("PASS: test_error_missing_migration\n");
+        } else {
+            _failed++;
+            stdout.puts("FAIL: test_error_missing_migration\n");
+        }
+    }
+}
+
+public class TestBootstrapIrreversibleOperation : AsyncTestOperation {
+    private unowned int _passed;
+    private unowned int _failed;
+    
+    public TestBootstrapIrreversibleOperation(ref int passed, ref int failed) {
+        _passed = passed;
+        _failed = failed;
+    }
+    
+    public override async void execute_async() throws Error {
+        bool result = yield test_bootstrap_irreversible();
+        if (result) {
+            _passed++;
+            stdout.puts("PASS: test_bootstrap_irreversible\n");
+        } else {
+            _failed++;
+            stdout.puts("FAIL: test_bootstrap_irreversible\n");
+        }
+    }
+}
+
+public class TestBootstrapExecutionOperation : AsyncTestOperation {
+    private unowned int _passed;
+    private unowned int _failed;
+    
+    public TestBootstrapExecutionOperation(ref int passed, ref int failed) {
+        _passed = passed;
+        _failed = failed;
+    }
+    
+    public override async void execute_async() throws Error {
+        bool result = yield test_bootstrap_execution();
+        if (result) {
+            _passed++;
+            stdout.puts("PASS: test_bootstrap_execution\n");
+        } else {
+            _failed++;
+            stdout.puts("FAIL: test_bootstrap_execution\n");
+        }
+    }
+}
+
+// === Helper Functions ===
+
+/**
+ * Creates a temporary directory for testing.
+ */
+string create_temp_dir() {
+    string temp_dir = DirUtils.mkdtemp("implexus_migration_test_XXXXXX");
+    return temp_dir;
+}
+
+/**
+ * Cleans up a temporary directory.
+ */
+void cleanup_dir(string path) {
+    try {
+        Dir dir = Dir.open(path, 0);
+        string? name;
+        while ((name = dir.read_name()) != null) {
+            FileUtils.unlink(Path.build_filename(path, name));
+        }
+    } catch (FileError e) {
+        // Ignore errors
+    }
+    DirUtils.remove(path);
+}
+
+// === MigrationStorage Tests ===
+
+// Test 1: Record migration stores migration records correctly
+bool test_record_migration() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var engine = new EmbeddedEngine.with_path(temp_dir);
+        var storage = new MigrationStorage(engine);
+        
+        // Record a migration
+        storage.record_migration("2026031301", "Create users table");
+        
+        // Verify it was recorded
+        if (!storage.is_applied("2026031301")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 2: Get applied versions returns all recorded versions
+bool test_get_applied_versions() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var engine = new EmbeddedEngine.with_path(temp_dir);
+        var storage = new MigrationStorage(engine);
+        
+        // Record migrations in non-sorted order
+        storage.record_migration("2026031303", "Third migration");
+        storage.record_migration("2026031301", "First migration");
+        storage.record_migration("2026031302", "Second migration");
+        
+        // Get applied versions and convert to vector for checking
+        var versions = storage.get_applied_versions();
+        var version_set = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var v in versions) {
+            version_set.add(v);
+        }
+        
+        // Should have 3 versions
+        if (version_set.length != 3) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Verify all three versions are present
+        if (!version_set.has("2026031301")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (!version_set.has("2026031302")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (!version_set.has("2026031303")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (MigrationError e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 3: Is applied correctly identifies applied migrations
+bool test_is_applied() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var engine = new EmbeddedEngine.with_path(temp_dir);
+        var storage = new MigrationStorage(engine);
+        
+        // Check before recording
+        if (storage.is_applied("2026031301")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Record migration
+        storage.record_migration("2026031301", "Test migration");
+        
+        // Check after recording
+        if (!storage.is_applied("2026031301")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Check non-existent migration
+        if (storage.is_applied("9999999999")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 4: Remove migration removes records correctly
+bool test_remove_migration() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var engine = new EmbeddedEngine.with_path(temp_dir);
+        var storage = new MigrationStorage(engine);
+        
+        // Record migration
+        storage.record_migration("2026031301", "Test migration");
+        
+        // Verify it was recorded
+        if (!storage.is_applied("2026031301")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Remove migration
+        storage.remove_migration("2026031301");
+        
+        // Verify it was removed
+        if (storage.is_applied("2026031301")) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 5: Get migration record returns correct metadata
+bool test_get_migration_record() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var engine = new EmbeddedEngine.with_path(temp_dir);
+        var storage = new MigrationStorage(engine);
+        
+        // Record migration
+        storage.record_migration("2026031301", "Create users table");
+        
+        // Get the record
+        var record = storage.get_migration_record("2026031301");
+        
+        if (record == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!) record).version != "2026031301") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!) record).description != "Create users table") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!) record).applied_at == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Check non-existent migration
+        var missing = storage.get_migration_record("9999999999");
+        if (missing != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// === MigrationRunner Tests ===
+
+// Test 6: Register migration stores migrations
+bool test_register_migration() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var engine = new EmbeddedEngine.with_path(temp_dir);
+        var runner = new MigrationRunner(engine);
+        
+        var migration = new TestMigration("2026031301", "Test migration", "test_container");
+        
+        runner.register_migration(migration);
+        
+        // Verify pending count is 1
+        if (runner.get_pending_count() != 1) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 7: Get pending versions identifies unapplied migrations
+bool test_get_pending_versions() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var engine = new EmbeddedEngine.with_path(temp_dir);
+        var runner = new MigrationRunner(engine);
+        
+        // Register migrations
+        runner.register_migration(new TestMigration("2026031301", "First", "container1"));
+        runner.register_migration(new TestMigration("2026031302", "Second", "container2"));
+        runner.register_migration(new TestMigration("2026031303", "Third", "container3"));
+        
+        // Apply one migration manually
+        var storage = new MigrationStorage(engine);
+        storage.record_migration("2026031302", "Second");
+        
+        // Get pending versions
+        var pending = runner.get_pending_versions();
+        
+        // Convert to vector
+        var pending_list = new Invercargill.DataStructures.Vector<string>();
+        foreach (var v in pending) {
+            pending_list.add(v);
+        }
+        
+        // Should have 2 pending (2026031301 and 2026031303)
+        if (pending_list.length != 2) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Should be sorted
+        if (pending_list[0] != "2026031301") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (pending_list[1] != "2026031303") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 8: Run pending executes migrations in order
+async bool test_run_pending() throws Error {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    var runner = new MigrationRunner(engine);
+    
+    // Register migrations
+    var m1 = new TestMigration("2026031301", "First", "container1");
+    var m2 = new TestMigration("2026031302", "Second", "container2");
+    var m3 = new TestMigration("2026031303", "Third", "container3");
+    
+    runner.register_migration(m1);
+    runner.register_migration(m2);
+    runner.register_migration(m3);
+    
+    // Run pending
+    int count = yield runner.run_pending_async();
+    
+    // Should have run 3 migrations
+    if (count != 3) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify all were called
+    if (!m1.up_called || !m2.up_called || !m3.up_called) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify all are applied
+    if (!runner.is_applied("2026031301") ||
+        !runner.is_applied("2026031302") ||
+        !runner.is_applied("2026031303")) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify containers were created
+    bool exists1 = yield engine.entity_exists_async(new EntityPath("/container1"));
+    bool exists2 = yield engine.entity_exists_async(new EntityPath("/container2"));
+    bool exists3 = yield engine.entity_exists_async(new EntityPath("/container3"));
+    if (!exists1 || !exists2 || !exists3) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 9: Run to version runs migrations up to specific version
+async bool test_run_to_version() throws Error {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    var runner = new MigrationRunner(engine);
+    
+    // Register migrations
+    var m1 = new TestMigration("2026031301", "First", "container1");
+    var m2 = new TestMigration("2026031302", "Second", "container2");
+    var m3 = new TestMigration("2026031303", "Third", "container3");
+    
+    runner.register_migration(m1);
+    runner.register_migration(m2);
+    runner.register_migration(m3);
+    
+    // Run to version (inclusive)
+    int count = yield runner.run_to_version_async("2026031302");
+    
+    // Should have run 2 migrations
+    if (count != 2) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify first two were called
+    if (!m1.up_called || !m2.up_called) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Third should NOT be called
+    if (m3.up_called) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify only first two are applied
+    if (!runner.is_applied("2026031301") || !runner.is_applied("2026031302")) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (runner.is_applied("2026031303")) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 10: Rollback to version rolls back migrations correctly
+async bool test_rollback_to_version() throws Error {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    var runner = new MigrationRunner(engine);
+    
+    // Register migrations
+    var m1 = new TestMigration("2026031301", "First", "container1");
+    var m2 = new TestMigration("2026031302", "Second", "container2");
+    var m3 = new TestMigration("2026031303", "Third", "container3");
+    
+    runner.register_migration(m1);
+    runner.register_migration(m2);
+    runner.register_migration(m3);
+    
+    // Run all
+    yield runner.run_pending_async();
+    
+    // Rollback to version 2026031301 (keeps 2026031301, removes 2026031302 and 2026031303)
+    int count = yield runner.rollback_to_version_async("2026031301");
+    
+    // Should have rolled back 2 migrations
+    if (count != 2) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify down was called on m2 and m3
+    if (!m2.down_called || !m3.down_called) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // m1 down should NOT be called
+    if (m1.down_called) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify only first is still applied
+    if (!runner.is_applied("2026031301")) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (runner.is_applied("2026031302") || runner.is_applied("2026031303")) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify containers 2 and 3 were deleted
+    bool exists1 = yield engine.entity_exists_async(new EntityPath("/container1"));
+    bool exists2 = yield engine.entity_exists_async(new EntityPath("/container2"));
+    bool exists3 = yield engine.entity_exists_async(new EntityPath("/container3"));
+    if (!exists1) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    if (exists2 || exists3) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 11: Migrations run in correct order by version string
+async bool test_migration_order() throws Error {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    var runner = new MigrationRunner(engine);
+    
+    // Register migrations in non-sorted order
+    var m3 = new TestMigration("2026031303", "Third", "container3");
+    var m1 = new TestMigration("2026031301", "First", "container1");
+    var m2 = new TestMigration("2026031302", "Second", "container2");
+    
+    runner.register_migration(m3);
+    runner.register_migration(m1);
+    runner.register_migration(m2);
+    
+    // Run pending - should execute in version order
+    int count = yield runner.run_pending_async();
+    
+    if (count != 3) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify all were called
+    if (!m1.up_called || !m2.up_called || !m3.up_called) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify all are applied in order
+    var versions = runner.get_applied_versions();
+    var version_list = new Invercargill.DataStructures.Vector<string>();
+    foreach (var v in versions) {
+        version_list.add(v);
+    }
+    
+    if (version_list.length != 3) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    if (version_list[0] != "2026031301" ||
+        version_list[1] != "2026031302" ||
+        version_list[2] != "2026031303") {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 12: Error handling for already applied migrations
+async bool test_error_already_applied() throws Error {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    var runner = new MigrationRunner(engine);
+    
+    // Register and apply migration
+    var m1 = new TestMigration("2026031301", "First", "container1");
+    runner.register_migration(m1);
+    yield runner.run_pending_async();
+    
+    // Try to run the same migration again via run_one_async
+    try {
+        yield runner.run_one_async("2026031301");
+        // Should have thrown
+        cleanup_dir(temp_dir);
+        return false;
+    } catch (MigrationError.ALREADY_APPLIED e) {
+        // Expected
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 13: Error handling for missing migrations
+async bool test_error_missing_migration() throws Error {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    var runner = new MigrationRunner(engine);
+    
+    // Verify the migration doesn't exist in pending list
+    var pending = runner.get_pending_versions();
+    bool found = false;
+    foreach (var v in pending) {
+        if (v == "9999999999") {
+            found = true;
+            break;
+        }
+    }
+    
+    // The migration should NOT be in the pending list
+    // because it was never registered
+    if (found) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify is_applied returns false for non-existent migration
+    if (runner.is_applied("9999999999")) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 14: Version conflict when registering duplicate
+bool test_version_conflict() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var engine = new EmbeddedEngine.with_path(temp_dir);
+        var runner = new MigrationRunner(engine);
+        
+        // Register first migration
+        var m1 = new TestMigration("2026031301", "First", "container1");
+        runner.register_migration(m1);
+        
+        // Try to register duplicate version
+        var m2 = new TestMigration("2026031301", "Duplicate", "container2");
+        try {
+            runner.register_migration(m2);
+            // Should have thrown
+            cleanup_dir(temp_dir);
+            return false;
+        } catch (MigrationError.VERSION_CONFLICT e) {
+            // Expected
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// === BootstrapMigration Tests ===
+
+// Test 15: Bootstrap version is "0000000000"
+bool test_bootstrap_version() {
+    var bootstrap = new BootstrapMigration();
+    
+    if (bootstrap.version != "0000000000") {
+        return false;
+    }
+    
+    return true;
+}
+
+// Test 16: Bootstrap down throws IRREVERSIBLE error
+async bool test_bootstrap_irreversible() throws Error {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    var bootstrap = new BootstrapMigration();
+    
+    try {
+        yield bootstrap.down_async(engine);
+        // Should have thrown
+        cleanup_dir(temp_dir);
+        return false;
+    } catch (MigrationError.IRREVERSIBLE e) {
+        // Expected
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}
+
+// Test 17: Bootstrap execution works correctly
+async bool test_bootstrap_execution() throws Error {
+    string temp_dir = create_temp_dir();
+    
+    var engine = new EmbeddedEngine.with_path(temp_dir);
+    var runner = new MigrationRunner(engine);
+    
+    // Register bootstrap
+    var bootstrap = new BootstrapMigration();
+    runner.register_migration(bootstrap);
+    
+    // Run pending
+    int count = yield runner.run_pending_async();
+    
+    // Should have run 1 migration
+    if (count != 1) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify bootstrap is applied
+    if (!runner.is_applied("0000000000")) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    // Verify root exists (bootstrap ensures this)
+    var root = yield engine.get_root_async();
+    if (root == null) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+    
+    cleanup_dir(temp_dir);
+    return true;
+}

+ 571 - 0
tests/Protocol/MessageTest.vala

@@ -0,0 +1,571 @@
+/**
+ * MessageTest - Unit tests for Protocol messages
+ */
+using Implexus.Core;
+using Implexus.Protocol;
+using Implexus.Storage;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // Test 1: Message header serialization
+    if (test_message_header()) {
+        passed++;
+        stdout.puts("PASS: test_message_header\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_message_header\n");
+    }
+
+    // Test 2: GetEntityRequest
+    if (test_get_entity_request()) {
+        passed++;
+        stdout.puts("PASS: test_get_entity_request\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_entity_request\n");
+    }
+
+    // Test 3: EntityExistsRequest
+    if (test_entity_exists_request()) {
+        passed++;
+        stdout.puts("PASS: test_entity_exists_request\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_entity_exists_request\n");
+    }
+
+    // Test 4: CreateContainerRequest
+    if (test_create_container_request()) {
+        passed++;
+        stdout.puts("PASS: test_create_container_request\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_create_container_request\n");
+    }
+
+    // Test 5: CreateDocumentRequest
+    if (test_create_document_request()) {
+        passed++;
+        stdout.puts("PASS: test_create_document_request\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_create_document_request\n");
+    }
+
+    // Test 6: DeleteEntityRequest
+    if (test_delete_entity_request()) {
+        passed++;
+        stdout.puts("PASS: test_delete_entity_request\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_delete_entity_request\n");
+    }
+
+    // Test 7: SetPropertyRequest
+    if (test_set_property_request()) {
+        passed++;
+        stdout.puts("PASS: test_set_property_request\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_set_property_request\n");
+    }
+
+    // Test 8: GetPropertyRequest
+    if (test_get_property_request()) {
+        passed++;
+        stdout.puts("PASS: test_get_property_request\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_property_request\n");
+    }
+
+    // Test 9: EntityResponse
+    if (test_entity_response()) {
+        passed++;
+        stdout.puts("PASS: test_entity_response\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_entity_response\n");
+    }
+
+    // Test 10: BooleanResponse
+    if (test_boolean_response()) {
+        passed++;
+        stdout.puts("PASS: test_boolean_response\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_boolean_response\n");
+    }
+
+    // Test 11: PropertyResponse
+    if (test_property_response()) {
+        passed++;
+        stdout.puts("PASS: test_property_response\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_property_response\n");
+    }
+
+    // Test 12: ErrorResponse
+    if (test_error_response()) {
+        passed++;
+        stdout.puts("PASS: test_error_response\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_error_response\n");
+    }
+
+    // Test 13: SuccessResponse
+    if (test_success_response()) {
+        passed++;
+        stdout.puts("PASS: test_success_response\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_success_response\n");
+    }
+
+    // Test 14: MessageFactory
+    if (test_message_factory()) {
+        passed++;
+        stdout.puts("PASS: test_message_factory\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_message_factory\n");
+    }
+
+    // Test 15: Full message round-trip
+    if (test_full_round_trip()) {
+        passed++;
+        stdout.puts("PASS: test_full_round_trip\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_full_round_trip\n");
+    }
+
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// Test 1: Message header serialization
+bool test_message_header() {
+    var header = new MessageHeader();
+    header.message_type = MessageType.GET_ENTITY;
+    header.payload_length = 100;
+    header.request_id = 42;
+    
+    var data = header.serialize();
+    
+    if (data.length != HEADER_SIZE) return false;
+    
+    // Check magic bytes
+    if (data[0] != 'I' || data[1] != 'M' || data[2] != 'P' || data[3] != 'X') return false;
+    
+    // Deserialize and verify
+    try {
+        var restored = MessageHeader.deserialize(data);
+        
+        if (restored.message_type != MessageType.GET_ENTITY) return false;
+        if (restored.payload_length != 100) return false;
+        if (restored.request_id != 42) return false;
+        
+        return true;
+    } catch (ProtocolError e) {
+        return false;
+    }
+}
+
+// Test 2: GetEntityRequest
+bool test_get_entity_request() {
+    try {
+        var path = new EntityPath("/users/john");
+        var request = new GetEntityRequest.for_path(path);
+        request.request_id = 1;
+        
+        var data = request.serialize();
+        
+        // Deserialize header
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.GET_ENTITY) return false;
+        if (header.request_id != 1) return false;
+        
+        // Deserialize payload
+        var restored = new GetEntityRequest();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (!restored.path.equals(path)) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 3: EntityExistsRequest
+bool test_entity_exists_request() {
+    try {
+        var path = new EntityPath("/test/path");
+        var request = new EntityExistsRequest.for_path(path);
+        request.request_id = 2;
+        
+        var data = request.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.ENTITY_EXISTS) return false;
+        
+        var restored = new EntityExistsRequest();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (!restored.path.equals(path)) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 4: CreateContainerRequest
+bool test_create_container_request() {
+    try {
+        var path = new EntityPath("/new_container");
+        var request = new CreateContainerRequest.for_path(path);
+        request.request_id = 3;
+        
+        var data = request.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.CREATE_CONTAINER) return false;
+        
+        var restored = new CreateContainerRequest();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (!restored.path.equals(path)) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 5: CreateDocumentRequest
+bool test_create_document_request() {
+    try {
+        var path = new EntityPath("/docs/new_doc");
+        var request = new CreateDocumentRequest.for_path_and_type(path, "Document");
+        request.request_id = 4;
+        
+        var data = request.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.CREATE_DOCUMENT) return false;
+        
+        var restored = new CreateDocumentRequest();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (!restored.path.equals(path)) return false;
+        if (restored.type_label != "Document") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 6: DeleteEntityRequest
+bool test_delete_entity_request() {
+    try {
+        var path = new EntityPath("/to_delete");
+        var request = new DeleteEntityRequest.for_path(path);
+        request.request_id = 5;
+        
+        var data = request.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.DELETE_ENTITY) return false;
+        
+        var restored = new DeleteEntityRequest();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (!restored.path.equals(path)) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 7: SetPropertyRequest
+bool test_set_property_request() {
+    try {
+        var path = new EntityPath("/entity");
+        var request = new SetPropertyRequest();
+        request.path = path;
+        request.property_name = "test_prop";
+        request.value = new Invercargill.NativeElement<string>("test_value");
+        request.request_id = 6;
+        
+        var data = request.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.SET_PROPERTY) return false;
+        
+        var restored = new SetPropertyRequest();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (!restored.path.equals(path)) return false;
+        if (restored.property_name != "test_prop") return false;
+        
+        // Check value
+        if (restored.value == null || ((!) restored.value).is_null()) return false;
+        string val = ((!) restored.value).as<string>();
+        if (val != "test_value") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 8: GetPropertyRequest
+bool test_get_property_request() {
+    try {
+        var path = new EntityPath("/entity");
+        var request = new GetPropertyRequest();
+        request.path = path;
+        request.property_name = "get_prop";
+        request.request_id = 7;
+        
+        var data = request.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.GET_PROPERTY) return false;
+        
+        var restored = new GetPropertyRequest();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (!restored.path.equals(path)) return false;
+        if (restored.property_name != "get_prop") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 9: EntityResponse
+bool test_entity_response() {
+    try {
+        var response = new EntityResponse();
+        response.entity_data.entity_type = EntityType.DOCUMENT;
+        response.entity_data.path = new EntityPath("/test/document");
+        response.entity_data.type_label = "TestDoc";
+        response.request_id = 8;
+        
+        var data = response.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.ENTITY_RESPONSE) return false;
+        
+        var restored = new EntityResponse();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (restored.entity_data.entity_type != EntityType.DOCUMENT) return false;
+        if (!restored.entity_data.path.equals(new EntityPath("/test/document"))) return false;
+        if (restored.entity_data.type_label != "TestDoc") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 10: BooleanResponse
+bool test_boolean_response() {
+    try {
+        // Test true
+        var response_true = new BooleanResponse();
+        response_true.value = true;
+        response_true.request_id = 9;
+        
+        var data_true = response_true.serialize();
+        var header_true = MessageHeader.deserialize(data_true);
+        var restored_true = new BooleanResponse();
+        restored_true.header = header_true;
+        restored_true.deserialize_payload(data_true[HEADER_SIZE:data_true.length]);
+        
+        if (restored_true.value != true) return false;
+        
+        // Test false
+        var response_false = new BooleanResponse();
+        response_false.value = false;
+        
+        var data_false = response_false.serialize();
+        var header_false = MessageHeader.deserialize(data_false);
+        var restored_false = new BooleanResponse();
+        restored_false.header = header_false;
+        restored_false.deserialize_payload(data_false[HEADER_SIZE:data_false.length]);
+        
+        if (restored_false.value != false) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 11: PropertyResponse
+bool test_property_response() {
+    try {
+        var response = new PropertyResponse();
+        response.value = new Invercargill.NativeElement<int64?>(12345);
+        response.request_id = 10;
+        
+        var data = response.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.PROPERTY_RESPONSE) return false;
+        
+        var restored = new PropertyResponse();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (restored.value == null || ((!) restored.value).is_null()) return false;
+        int64? val = ((!) restored.value).as<int64?>();
+        if (val == null || (!) val != 12345) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 12: ErrorResponse
+bool test_error_response() {
+    try {
+        var response = new ErrorResponse.with_error(404, "Entity not found");
+        response.request_id = 11;
+        
+        var data = response.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.ERROR) return false;
+        
+        var restored = new ErrorResponse();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        if (restored.error_code != 404) return false;
+        if (restored.error_message != "Entity not found") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 13: SuccessResponse
+bool test_success_response() {
+    try {
+        var response = new SuccessResponse();
+        response.request_id = 12;
+        
+        var data = response.serialize();
+        
+        var header = MessageHeader.deserialize(data);
+        if (header.message_type != MessageType.SUCCESS) return false;
+        if (header.payload_length != 0) return false;
+        
+        var restored = new SuccessResponse();
+        restored.header = header;
+        restored.deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 14: MessageFactory
+bool test_message_factory() {
+    try {
+        // Test all message types
+        var types = new MessageType[] {
+            MessageType.GET_ENTITY,
+            MessageType.ENTITY_EXISTS,
+            MessageType.CREATE_CONTAINER,
+            MessageType.CREATE_DOCUMENT,
+            MessageType.DELETE_ENTITY,
+            MessageType.SET_PROPERTY,
+            MessageType.GET_PROPERTY,
+            MessageType.GET_CHILDREN,
+            MessageType.QUERY_BY_TYPE,
+            MessageType.BEGIN_TRANSACTION,
+            MessageType.COMMIT_TRANSACTION,
+            MessageType.ROLLBACK_TRANSACTION,
+            MessageType.ENTITY_RESPONSE,
+            MessageType.BOOLEAN_RESPONSE,
+            MessageType.PROPERTY_RESPONSE,
+            MessageType.CHILDREN_RESPONSE,
+            MessageType.QUERY_RESPONSE,
+            MessageType.ERROR,
+            MessageType.SUCCESS,
+            MessageType.WELCOME
+        };
+        
+        foreach (var type in types) {
+            var message = MessageFactory.create_message(type);
+            if (message == null) return false;
+            if (message.message_type != type) return false;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 15: Full message round-trip
+bool test_full_round_trip() {
+    try {
+        // Create a complex message
+        var request = new CreateDocumentRequest.for_path_and_type(
+            new EntityPath("/deeply/nested/path/document"),
+            "ComplexDocument"
+        );
+        request.request_id = 999;
+        
+        // Serialize
+        var data = request.serialize();
+        
+        // Parse header
+        var header = MessageHeader.deserialize(data);
+        
+        // Create message from factory
+        var restored = MessageFactory.create_message(header.message_type);
+        if (restored == null) return false;
+        
+        ((!) restored).header = header;
+        ((!) restored).deserialize_payload(data[HEADER_SIZE:data.length]);
+        
+        // Verify
+        var doc_request = ((!) restored) as CreateDocumentRequest;
+        if (doc_request == null) return false;
+        
+        if (((!) doc_request).path.to_string() != "/deeply/nested/path/document") return false;
+        if (((!) doc_request).type_label != "ComplexDocument") return false;
+        if (((!) doc_request).request_id != 999) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}

+ 2735 - 0
tests/Storage/DbmPersistenceTest.vala

@@ -0,0 +1,2735 @@
+/**
+ * DbmPersistenceTest - Comprehensive persistence tests for all DBM backends
+ * 
+ * These tests verify that data persists correctly across engine instances
+ * for each supported DBM backend (Filesystem, GDBM, LMDB).
+ * 
+ * Test pattern:
+ * 1. Create an engine with the specific backend
+ * 2. Write data (containers, documents, properties)
+ * 3. Shutdown and destroy the engine
+ * 4. Create a new engine with the same path
+ * 5. Verify all data persists correctly
+ */
+using Implexus.Core;
+using Implexus.Engine;
+using Implexus.Storage;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // Filesystem Dbm Persistence Tests
+    stdout.puts("\n=== Filesystem Dbm Persistence Tests ===\n");
+    
+    if (test_filesystem_basic_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_filesystem_basic_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_filesystem_basic_persistence\n");
+    }
+
+    if (test_filesystem_container_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_filesystem_container_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_filesystem_container_persistence\n");
+    }
+
+    if (test_filesystem_document_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_filesystem_document_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_filesystem_document_persistence\n");
+    }
+
+    if (test_filesystem_property_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_filesystem_property_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_filesystem_property_persistence\n");
+    }
+
+    if (test_filesystem_nested_structure_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_filesystem_nested_structure_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_filesystem_nested_structure_persistence\n");
+    }
+
+    if (test_filesystem_multiple_properties_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_filesystem_multiple_properties_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_filesystem_multiple_properties_persistence\n");
+    }
+
+    // GDBM Persistence Tests
+    stdout.puts("\n=== GDBM Dbm Persistence Tests ===\n");
+    
+    if (test_gdbm_basic_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_gdbm_basic_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_gdbm_basic_persistence\n");
+    }
+
+    if (test_gdbm_container_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_gdbm_container_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_gdbm_container_persistence\n");
+    }
+
+    if (test_gdbm_document_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_gdbm_document_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_gdbm_document_persistence\n");
+    }
+
+    if (test_gdbm_property_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_gdbm_property_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_gdbm_property_persistence\n");
+    }
+
+    if (test_gdbm_nested_structure_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_gdbm_nested_structure_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_gdbm_nested_structure_persistence\n");
+    }
+
+    if (test_gdbm_multiple_properties_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_gdbm_multiple_properties_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_gdbm_multiple_properties_persistence\n");
+    }
+
+    // LMDB Persistence Tests
+    stdout.puts("\n=== LMDB Dbm Persistence Tests ===\n");
+    
+    if (test_lmdb_basic_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_lmdb_basic_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_lmdb_basic_persistence\n");
+    }
+
+    if (test_lmdb_container_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_lmdb_container_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_lmdb_container_persistence\n");
+    }
+
+    if (test_lmdb_document_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_lmdb_document_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_lmdb_document_persistence\n");
+    }
+
+    if (test_lmdb_property_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_lmdb_property_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_lmdb_property_persistence\n");
+    }
+
+    if (test_lmdb_nested_structure_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_lmdb_nested_structure_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_lmdb_nested_structure_persistence\n");
+    }
+
+    if (test_lmdb_multiple_properties_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_lmdb_multiple_properties_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_lmdb_multiple_properties_persistence\n");
+    }
+
+    stdout.printf("\n=== Results: %d passed, %d failed ===\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// ============================================================================
+// Helper Functions
+// ============================================================================
+
+// Helper to create temporary directory
+string create_temp_dir(string prefix = "implexus_persistence_test_") {
+    return DirUtils.mkdtemp(prefix + "XXXXXX");
+}
+
+// Helper to run async operation synchronously
+delegate void AsyncOperation(MainLoop loop, ref Error? error);
+
+void run_async(AsyncOperation op) {
+    var loop = new MainLoop();
+    Error? error = null;
+    op(loop, ref error);
+    loop.run();
+    if (error != null) {
+        warning("Async operation error: %s", ((!)error).message);
+    }
+}
+
+// Cleanup helper for directory
+void cleanup_dir(string path) {
+    try {
+        Dir dir = Dir.open(path, 0);
+        string? name;
+        while ((name = dir.read_name()) != null) {
+            string file_path = Path.build_filename(path, name);
+            if (FileUtils.test(file_path, FileTest.IS_DIR)) {
+                cleanup_dir(file_path);
+            } else {
+                FileUtils.unlink(file_path);
+            }
+        }
+    } catch (FileError e) {
+        // Ignore errors
+    }
+    DirUtils.remove(path);
+}
+
+// ============================================================================
+// Filesystem Dbm Persistence Tests
+// ============================================================================
+
+// Test: Basic root persistence
+bool test_filesystem_basic_persistence() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        // Phase 1: Create engine and get root
+        var dbm1 = new FilesystemDbm(temp_dir);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Shutdown engine
+        engine1.shutdown();
+        
+        // Phase 2: Create new engine and verify root exists
+        var dbm2 = new FilesystemDbm(temp_dir);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? root2 = null;
+        bool exists = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/"), (obj, res) => {
+            try {
+                exists = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.get_root_async.begin((obj, res) => {
+            try {
+                root2 = engine2.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Container persistence
+bool test_filesystem_container_persistence() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        // Phase 1: Create container
+        var dbm1 = new FilesystemDbm(temp_dir);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("test_container", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        
+        // Phase 2: Verify container persists
+        var dbm2 = new FilesystemDbm(temp_dir);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        bool exists = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/test_container"), (obj, res) => {
+            try {
+                exists = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        Entity? container2 = null;
+        engine2.get_entity_async.begin(new EntityPath("/test_container"), (obj, res) => {
+            try {
+                container2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)container2).entity_type != EntityType.CONTAINER) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)container2).name != "test_container") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Document persistence
+bool test_filesystem_document_persistence() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        // Phase 1: Create document
+        var dbm1 = new FilesystemDbm(temp_dir);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("docs", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)container1).create_document_async.begin("mydoc", "TestType", (obj, res) => {
+            try {
+                doc1 = ((!)container1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        
+        // Phase 2: Verify document persists
+        var dbm2 = new FilesystemDbm(temp_dir);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/docs/mydoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).entity_type != EntityType.DOCUMENT) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).name != "mydoc") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).type_label != "TestType") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Property persistence
+bool test_filesystem_property_persistence() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        // Phase 1: Create document with property
+        var dbm1 = new FilesystemDbm(temp_dir);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("docs", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)container1).create_document_async.begin("mydoc", "TestType", (obj, res) => {
+            try {
+                doc1 = ((!)container1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Set property
+        ((!)doc1).set_entity_property_async.begin("email", new Invercargill.NativeElement<string>("test@example.com"), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        
+        // Phase 2: Verify property persists
+        var dbm2 = new FilesystemDbm(temp_dir);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        Invercargill.Element? email = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/docs/mydoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("email", (obj, res) => {
+            try {
+                email = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || email == null || ((!)email).is_null()) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        string email_val = ((!)email).as<string>();
+        if (email_val != "test@example.com") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Nested structure persistence
+bool test_filesystem_nested_structure_persistence() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        // Phase 1: Create nested structure
+        var dbm1 = new FilesystemDbm(temp_dir);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? level1 = null;
+        Entity? level2 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("level1", (obj, res) => {
+            try {
+                level1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || level1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)level1).create_container_async.begin("level2", (obj, res) => {
+            try {
+                level2 = ((!)level1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || level2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)level2).create_document_async.begin("deepdoc", "DeepType", (obj, res) => {
+            try {
+                doc1 = ((!)level2).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        
+        // Phase 2: Verify nested structure persists
+        var dbm2 = new FilesystemDbm(temp_dir);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        bool exists1 = false;
+        bool exists2 = false;
+        bool exists3 = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1"), (obj, res) => {
+            try {
+                exists1 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists1) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1/level2"), (obj, res) => {
+            try {
+                exists2 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists2) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1/level2/deepdoc"), (obj, res) => {
+            try {
+                exists3 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists3) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        Entity? doc2 = null;
+        engine2.get_entity_async.begin(new EntityPath("/level1/level2/deepdoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).type_label != "DeepType") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Multiple properties persistence
+bool test_filesystem_multiple_properties_persistence() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        // Phase 1: Create document with multiple properties
+        var dbm1 = new FilesystemDbm(temp_dir);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_document_async.begin("multidoc", "MultiType", (obj, res) => {
+            try {
+                doc1 = ((!)root1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Set multiple properties of different types
+        ((!)doc1).set_entity_property_async.begin("name", new Invercargill.NativeElement<string>("Test Name"), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("count", new Invercargill.NativeElement<int64?>(42), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("active", new Invercargill.NativeElement<bool?>(true), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("score", new Invercargill.NativeElement<double?>(3.14159), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        
+        // Phase 2: Verify all properties persist
+        var dbm2 = new FilesystemDbm(temp_dir);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        Invercargill.Element? name = null;
+        Invercargill.Element? count = null;
+        Invercargill.Element? active = null;
+        Invercargill.Element? score = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/multidoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("name", (obj, res) => {
+            try {
+                name = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || name == null || ((!)name).as<string>() != "Test Name") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("count", (obj, res) => {
+            try {
+                count = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || count == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        int64? count_val = ((!)count).as<int64?>();
+        if (count_val == null || (!)count_val != 42) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("active", (obj, res) => {
+            try {
+                active = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || active == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        bool? active_val = ((!)active).as<bool?>();
+        if (active_val == null || (!)active_val != true) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("score", (obj, res) => {
+            try {
+                score = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || score == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        double? score_val = ((!)score).as<double?>();
+        if (score_val == null || (!)score_val < 3.14158 || (!)score_val > 3.14160) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// ============================================================================
+// GDBM Persistence Tests
+// ============================================================================
+
+// Test: Basic root persistence with GDBM
+bool test_gdbm_basic_persistence() {
+    string temp_dir = create_temp_dir("implexus_gdbm_persist_");
+    string db_path = Path.build_filename(temp_dir, "test.db");
+    
+    try {
+        // Phase 1: Create engine and get root
+        var dbm1 = new GdbmDbm();
+        dbm1.open(db_path, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Shutdown engine and close DB
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Create new engine and verify root exists
+        var dbm2 = new GdbmDbm();
+        dbm2.open(db_path, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? root2 = null;
+        bool exists = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/"), (obj, res) => {
+            try {
+                exists = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.get_root_async.begin((obj, res) => {
+            try {
+                root2 = engine2.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Container persistence with GDBM
+bool test_gdbm_container_persistence() {
+    string temp_dir = create_temp_dir("implexus_gdbm_persist_");
+    string db_path = Path.build_filename(temp_dir, "test.db");
+    
+    try {
+        // Phase 1: Create container
+        var dbm1 = new GdbmDbm();
+        dbm1.open(db_path, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("test_container", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify container persists
+        var dbm2 = new GdbmDbm();
+        dbm2.open(db_path, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        bool exists = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/test_container"), (obj, res) => {
+            try {
+                exists = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        Entity? container2 = null;
+        engine2.get_entity_async.begin(new EntityPath("/test_container"), (obj, res) => {
+            try {
+                container2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)container2).entity_type != EntityType.CONTAINER) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)container2).name != "test_container") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Document persistence with GDBM
+bool test_gdbm_document_persistence() {
+    string temp_dir = create_temp_dir("implexus_gdbm_persist_");
+    string db_path = Path.build_filename(temp_dir, "test.db");
+    
+    try {
+        // Phase 1: Create document
+        var dbm1 = new GdbmDbm();
+        dbm1.open(db_path, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("docs", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)container1).create_document_async.begin("mydoc", "TestType", (obj, res) => {
+            try {
+                doc1 = ((!)container1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify document persists
+        var dbm2 = new GdbmDbm();
+        dbm2.open(db_path, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/docs/mydoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).entity_type != EntityType.DOCUMENT) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).name != "mydoc") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).type_label != "TestType") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Property persistence with GDBM
+bool test_gdbm_property_persistence() {
+    string temp_dir = create_temp_dir("implexus_gdbm_persist_");
+    string db_path = Path.build_filename(temp_dir, "test.db");
+    
+    try {
+        // Phase 1: Create document with property
+        var dbm1 = new GdbmDbm();
+        dbm1.open(db_path, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("docs", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)container1).create_document_async.begin("mydoc", "TestType", (obj, res) => {
+            try {
+                doc1 = ((!)container1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Set property
+        ((!)doc1).set_entity_property_async.begin("email", new Invercargill.NativeElement<string>("test@example.com"), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify property persists
+        var dbm2 = new GdbmDbm();
+        dbm2.open(db_path, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        Invercargill.Element? email = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/docs/mydoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("email", (obj, res) => {
+            try {
+                email = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || email == null || ((!)email).is_null()) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        string email_val = ((!)email).as<string>();
+        if (email_val != "test@example.com") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Nested structure persistence with GDBM
+bool test_gdbm_nested_structure_persistence() {
+    string temp_dir = create_temp_dir("implexus_gdbm_persist_");
+    string db_path = Path.build_filename(temp_dir, "test.db");
+    
+    try {
+        // Phase 1: Create nested structure
+        var dbm1 = new GdbmDbm();
+        dbm1.open(db_path, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? level1 = null;
+        Entity? level2 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("level1", (obj, res) => {
+            try {
+                level1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || level1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)level1).create_container_async.begin("level2", (obj, res) => {
+            try {
+                level2 = ((!)level1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || level2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)level2).create_document_async.begin("deepdoc", "DeepType", (obj, res) => {
+            try {
+                doc1 = ((!)level2).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify nested structure persists
+        var dbm2 = new GdbmDbm();
+        dbm2.open(db_path, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        bool exists1 = false;
+        bool exists2 = false;
+        bool exists3 = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1"), (obj, res) => {
+            try {
+                exists1 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists1) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1/level2"), (obj, res) => {
+            try {
+                exists2 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists2) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1/level2/deepdoc"), (obj, res) => {
+            try {
+                exists3 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists3) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        Entity? doc2 = null;
+        engine2.get_entity_async.begin(new EntityPath("/level1/level2/deepdoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).type_label != "DeepType") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Multiple properties persistence with GDBM
+bool test_gdbm_multiple_properties_persistence() {
+    string temp_dir = create_temp_dir("implexus_gdbm_persist_");
+    string db_path = Path.build_filename(temp_dir, "test.db");
+    
+    try {
+        // Phase 1: Create document with multiple properties
+        var dbm1 = new GdbmDbm();
+        dbm1.open(db_path, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_document_async.begin("multidoc", "MultiType", (obj, res) => {
+            try {
+                doc1 = ((!)root1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Set multiple properties of different types
+        ((!)doc1).set_entity_property_async.begin("name", new Invercargill.NativeElement<string>("Test Name"), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("count", new Invercargill.NativeElement<int64?>(42), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("active", new Invercargill.NativeElement<bool?>(true), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("score", new Invercargill.NativeElement<double?>(3.14159), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify all properties persist
+        var dbm2 = new GdbmDbm();
+        dbm2.open(db_path, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        Invercargill.Element? name = null;
+        Invercargill.Element? count = null;
+        Invercargill.Element? active = null;
+        Invercargill.Element? score = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/multidoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("name", (obj, res) => {
+            try {
+                name = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || name == null || ((!)name).as<string>() != "Test Name") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("count", (obj, res) => {
+            try {
+                count = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || count == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        int64? count_val = ((!)count).as<int64?>();
+        if (count_val == null || (!)count_val != 42) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("active", (obj, res) => {
+            try {
+                active = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || active == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        bool? active_val = ((!)active).as<bool?>();
+        if (active_val == null || (!)active_val != true) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("score", (obj, res) => {
+            try {
+                score = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || score == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        double? score_val = ((!)score).as<double?>();
+        if (score_val == null || (!)score_val < 3.14158 || (!)score_val > 3.14160) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// ============================================================================
+// LMDB Persistence Tests
+// ============================================================================
+
+// Test: Basic root persistence with LMDB
+bool test_lmdb_basic_persistence() {
+    string temp_dir = create_temp_dir("implexus_lmdb_persist_");
+    
+    try {
+        // Phase 1: Create engine and get root
+        var dbm1 = new LmdbDbm();
+        dbm1.open(temp_dir, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Shutdown engine and close DB
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Create new engine and verify root exists
+        var dbm2 = new LmdbDbm();
+        dbm2.open(temp_dir, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? root2 = null;
+        bool exists = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/"), (obj, res) => {
+            try {
+                exists = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.get_root_async.begin((obj, res) => {
+            try {
+                root2 = engine2.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Container persistence with LMDB
+bool test_lmdb_container_persistence() {
+    string temp_dir = create_temp_dir("implexus_lmdb_persist_");
+    
+    try {
+        // Phase 1: Create container
+        var dbm1 = new LmdbDbm();
+        dbm1.open(temp_dir, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("test_container", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify container persists
+        var dbm2 = new LmdbDbm();
+        dbm2.open(temp_dir, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        bool exists = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/test_container"), (obj, res) => {
+            try {
+                exists = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        Entity? container2 = null;
+        engine2.get_entity_async.begin(new EntityPath("/test_container"), (obj, res) => {
+            try {
+                container2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)container2).entity_type != EntityType.CONTAINER) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)container2).name != "test_container") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Document persistence with LMDB
+bool test_lmdb_document_persistence() {
+    string temp_dir = create_temp_dir("implexus_lmdb_persist_");
+    
+    try {
+        // Phase 1: Create document
+        var dbm1 = new LmdbDbm();
+        dbm1.open(temp_dir, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("docs", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)container1).create_document_async.begin("mydoc", "TestType", (obj, res) => {
+            try {
+                doc1 = ((!)container1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify document persists
+        var dbm2 = new LmdbDbm();
+        dbm2.open(temp_dir, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/docs/mydoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).entity_type != EntityType.DOCUMENT) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).name != "mydoc") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).type_label != "TestType") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Property persistence with LMDB
+bool test_lmdb_property_persistence() {
+    string temp_dir = create_temp_dir("implexus_lmdb_persist_");
+    
+    try {
+        // Phase 1: Create document with property
+        var dbm1 = new LmdbDbm();
+        dbm1.open(temp_dir, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? container1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("docs", (obj, res) => {
+            try {
+                container1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || container1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)container1).create_document_async.begin("mydoc", "TestType", (obj, res) => {
+            try {
+                doc1 = ((!)container1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Set property
+        ((!)doc1).set_entity_property_async.begin("email", new Invercargill.NativeElement<string>("test@example.com"), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify property persists
+        var dbm2 = new LmdbDbm();
+        dbm2.open(temp_dir, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        Invercargill.Element? email = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/docs/mydoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("email", (obj, res) => {
+            try {
+                email = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || email == null || ((!)email).is_null()) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        string email_val = ((!)email).as<string>();
+        if (email_val != "test@example.com") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Nested structure persistence with LMDB
+bool test_lmdb_nested_structure_persistence() {
+    string temp_dir = create_temp_dir("implexus_lmdb_persist_");
+    
+    try {
+        // Phase 1: Create nested structure
+        var dbm1 = new LmdbDbm();
+        dbm1.open(temp_dir, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? level1 = null;
+        Entity? level2 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_container_async.begin("level1", (obj, res) => {
+            try {
+                level1 = ((!)root1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || level1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)level1).create_container_async.begin("level2", (obj, res) => {
+            try {
+                level2 = ((!)level1).create_container_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || level2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)level2).create_document_async.begin("deepdoc", "DeepType", (obj, res) => {
+            try {
+                doc1 = ((!)level2).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify nested structure persists
+        var dbm2 = new LmdbDbm();
+        dbm2.open(temp_dir, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        bool exists1 = false;
+        bool exists2 = false;
+        bool exists3 = false;
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1"), (obj, res) => {
+            try {
+                exists1 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists1) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1/level2"), (obj, res) => {
+            try {
+                exists2 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists2) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.entity_exists_async.begin(new EntityPath("/level1/level2/deepdoc"), (obj, res) => {
+            try {
+                exists3 = engine2.entity_exists_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || !exists3) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        Entity? doc2 = null;
+        engine2.get_entity_async.begin(new EntityPath("/level1/level2/deepdoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        if (((!)doc2).type_label != "DeepType") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test: Multiple properties persistence with LMDB
+bool test_lmdb_multiple_properties_persistence() {
+    string temp_dir = create_temp_dir("implexus_lmdb_persist_");
+    
+    try {
+        // Phase 1: Create document with multiple properties
+        var dbm1 = new LmdbDbm();
+        dbm1.open(temp_dir, false);
+        var storage1 = new BasicStorage(dbm1);
+        var config1 = new StorageConfiguration(storage1);
+        var engine1 = new EmbeddedEngine(config1);
+        
+        Entity? root1 = null;
+        Entity? doc1 = null;
+        Error? error = null;
+        var loop = new MainLoop();
+        
+        engine1.get_root_async.begin((obj, res) => {
+            try {
+                root1 = engine1.get_root_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || root1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)root1).create_document_async.begin("multidoc", "MultiType", (obj, res) => {
+            try {
+                doc1 = ((!)root1).create_document_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc1 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Set multiple properties of different types
+        ((!)doc1).set_entity_property_async.begin("name", new Invercargill.NativeElement<string>("Test Name"), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("count", new Invercargill.NativeElement<int64?>(42), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("active", new Invercargill.NativeElement<bool?>(true), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc1).set_entity_property_async.begin("score", new Invercargill.NativeElement<double?>(3.14159), (obj, res) => {
+            try {
+                ((!)doc1).set_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine1.shutdown();
+        dbm1.close();
+        
+        // Phase 2: Verify all properties persist
+        var dbm2 = new LmdbDbm();
+        dbm2.open(temp_dir, false);
+        var storage2 = new BasicStorage(dbm2);
+        var config2 = new StorageConfiguration(storage2);
+        var engine2 = new EmbeddedEngine(config2);
+        
+        Entity? doc2 = null;
+        Invercargill.Element? name = null;
+        Invercargill.Element? count = null;
+        Invercargill.Element? active = null;
+        Invercargill.Element? score = null;
+        
+        engine2.get_entity_async.begin(new EntityPath("/multidoc"), (obj, res) => {
+            try {
+                doc2 = engine2.get_entity_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || doc2 == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("name", (obj, res) => {
+            try {
+                name = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || name == null || ((!)name).as<string>() != "Test Name") {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("count", (obj, res) => {
+            try {
+                count = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || count == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        int64? count_val = ((!)count).as<int64?>();
+        if (count_val == null || (!)count_val != 42) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("active", (obj, res) => {
+            try {
+                active = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || active == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        bool? active_val = ((!)active).as<bool?>();
+        if (active_val == null || (!)active_val != true) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        ((!)doc2).get_entity_property_async.begin("score", (obj, res) => {
+            try {
+                score = ((!)doc2).get_entity_property_async.end(res);
+            } catch (Error e) {
+                error = e;
+            }
+            loop.quit();
+        });
+        loop.run();
+        
+        if (error != null || score == null) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        double? score_val = ((!)score).as<double?>();
+        if (score_val == null || (!)score_val < 3.14158 || (!)score_val > 3.14160) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        engine2.shutdown();
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}

+ 556 - 0
tests/Storage/ElementSerializerTest.vala

@@ -0,0 +1,556 @@
+/**
+ * ElementSerializerTest - Unit tests for Element serialization
+ */
+using Implexus.Storage;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // Test 1: Null element
+    if (test_null_element()) {
+        passed++;
+        stdout.puts("PASS: test_null_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_null_element\n");
+    }
+
+    // Test 2: Boolean elements
+    if (test_bool_element()) {
+        passed++;
+        stdout.puts("PASS: test_bool_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_bool_element\n");
+    }
+
+    // Test 3: Int64 elements
+    if (test_int64_element()) {
+        passed++;
+        stdout.puts("PASS: test_int64_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_int64_element\n");
+    }
+
+    // Test 4: UInt64 elements
+    if (test_uint64_element()) {
+        passed++;
+        stdout.puts("PASS: test_uint64_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_uint64_element\n");
+    }
+
+    // Test 5: Double elements
+    if (test_double_element()) {
+        passed++;
+        stdout.puts("PASS: test_double_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_double_element\n");
+    }
+
+    // Test 6: String elements
+    if (test_string_element()) {
+        passed++;
+        stdout.puts("PASS: test_string_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_string_element\n");
+    }
+
+    // Test 7: Binary elements
+    if (test_binary_element()) {
+        passed++;
+        stdout.puts("PASS: test_binary_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_binary_element\n");
+    }
+
+    // Test 8: Array elements
+    if (test_array_element()) {
+        passed++;
+        stdout.puts("PASS: test_array_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_array_element\n");
+    }
+
+    // Test 9: Dictionary/Properties elements
+    if (test_dictionary_element()) {
+        passed++;
+        stdout.puts("PASS: test_dictionary_element\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_dictionary_element\n");
+    }
+
+    // Test 10: Round-trip all types
+    if (test_round_trip()) {
+        passed++;
+        stdout.puts("PASS: test_round_trip\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_round_trip\n");
+    }
+
+    // Test 11: Complex nested structure
+    if (test_nested_structure()) {
+        passed++;
+        stdout.puts("PASS: test_nested_structure\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_nested_structure\n");
+    }
+
+    // Test 12: Empty array
+    if (test_empty_array()) {
+        passed++;
+        stdout.puts("PASS: test_empty_array\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_empty_array\n");
+    }
+
+    // Test 13: Empty dictionary
+    if (test_empty_dictionary()) {
+        passed++;
+        stdout.puts("PASS: test_empty_dictionary\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_empty_dictionary\n");
+    }
+
+    // Test 14: Large integers
+    if (test_large_integers()) {
+        passed++;
+        stdout.puts("PASS: test_large_integers\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_large_integers\n");
+    }
+
+    // Test 15: Special string values
+    if (test_special_strings()) {
+        passed++;
+        stdout.puts("PASS: test_special_strings\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_special_strings\n");
+    }
+
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// Test 1: Null element
+bool test_null_element() {
+    try {
+        var writer = new ElementWriter();
+        writer.write_null();
+        
+        var data = writer.to_binary_data();
+        var reader = new ElementReader(data);
+        
+        var element = reader.read_element();
+        return element.is_null();
+    } catch (StorageError e) {
+        return false;
+    }
+}
+
+// Test 2: Boolean elements
+bool test_bool_element() {
+    try {
+        // Test true
+        var writer1 = new ElementWriter();
+        writer1.write_element(new Invercargill.NativeElement<bool?>(true));
+        var reader1 = new ElementReader(writer1.to_binary_data());
+        var elem1 = reader1.read_element();
+        if (elem1.is_null()) return false;
+        bool val1 = elem1.as<bool?>();
+        if (val1 != true) return false;
+        
+        // Test false
+        var writer2 = new ElementWriter();
+        writer2.write_element(new Invercargill.NativeElement<bool?>(false));
+        var reader2 = new ElementReader(writer2.to_binary_data());
+        var elem2 = reader2.read_element();
+        if (elem2.is_null()) return false;
+        bool val2 = elem2.as<bool?>();
+        if (val2 != false) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 3: Int64 elements
+bool test_int64_element() {
+    try {
+        int64[] values = { 0, 1, -1, 42, -42, int64.MAX, int64.MIN };
+        
+        foreach (var val in values) {
+            var writer = new ElementWriter();
+            writer.write_element(new Invercargill.NativeElement<int64?>(val));
+            
+            var reader = new ElementReader(writer.to_binary_data());
+            var elem = reader.read_element();
+            if (elem.is_null()) return false;
+            int64? read_val = elem.as<int64?>();
+            if (read_val == null || (!) read_val != val) return false;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 4: UInt64 elements
+bool test_uint64_element() {
+    try {
+        uint64[] values = { 0, 1, 42, uint64.MAX };
+        
+        foreach (var val in values) {
+            var writer = new ElementWriter();
+            writer.write_element(new Invercargill.NativeElement<uint64?>(val));
+            
+            var reader = new ElementReader(writer.to_binary_data());
+            var elem = reader.read_element();
+            if (elem.is_null()) return false;
+            uint64? read_val = elem.as<uint64?>();
+            if (read_val == null || (!) read_val != val) return false;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 5: Double elements
+bool test_double_element() {
+    try {
+        double[] values = { 0.0, 1.0, -1.0, 3.14159, -3.14159, double.MAX, double.MIN };
+        
+        foreach (var val in values) {
+            var writer = new ElementWriter();
+            writer.write_element(new Invercargill.NativeElement<double?>(val));
+            
+            var reader = new ElementReader(writer.to_binary_data());
+            var elem = reader.read_element();
+            if (elem.is_null()) return false;
+            double? read_val = elem.as<double?>();
+            if (read_val == null) return false;
+            // Use approximate comparison for doubles
+            if (((!) read_val - val).abs() > 0.0001) return false;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 6: String elements
+bool test_string_element() {
+    try {
+        string[] values = { "", "hello", "Hello, World!", "日本語", "line1\nline2" };
+        
+        foreach (var val in values) {
+            var writer = new ElementWriter();
+            writer.write_element(new Invercargill.NativeElement<string>(val));
+            
+            var reader = new ElementReader(writer.to_binary_data());
+            var elem = reader.read_element();
+            if (elem.is_null()) return false;
+            string read_val = elem.as<string>();
+            if (read_val != val) return false;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 7: Binary elements
+bool test_binary_element() {
+    try {
+        // Create binary data
+        var data = new uint8[] { 0x00, 0x01, 0x02, 0xFF, 0xFE, 0xFD };
+        var binary = new Invercargill.DataStructures.ByteBuffer.from_byte_array(data);
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.BinaryData>(binary));
+        
+        var reader = new ElementReader(writer.to_binary_data());
+        var elem = reader.read_element();
+        if (elem.is_null()) return false;
+        
+        var read_binary = elem.as<Invercargill.BinaryData>();
+        var read_bytes = read_binary.to_bytes();
+        
+        if (read_bytes.length != data.length) return false;
+        for (int i = 0; i < data.length; i++) {
+            if (read_bytes.get_data()[i] != data[i]) return false;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 8: Array elements
+bool test_array_element() {
+    try {
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        array.add(new Invercargill.NativeElement<int64?>(1));
+        array.add(new Invercargill.NativeElement<int64?>(2));
+        array.add(new Invercargill.NativeElement<int64?>(3));
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        
+        var reader = new ElementReader(writer.to_binary_data());
+        var elem = reader.read_element();
+        if (elem.is_null()) return false;
+        
+        var read_array = elem.as<Invercargill.Enumerable<Invercargill.Element>>();
+        var count = read_array.count();
+        if (count != 3) return false;
+        
+        int64 expected = 1;
+        foreach (var item in read_array) {
+            if (item.is_null()) return false;
+            int64? val = item.as<int64?>();
+            if (val == null || (!) val != expected) return false;
+            expected++;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 9: Dictionary/Properties elements
+bool test_dictionary_element() {
+    try {
+        var props = new Invercargill.DataStructures.PropertyDictionary();
+        props.set("name", new Invercargill.NativeElement<string>("John"));
+        props.set("age", new Invercargill.NativeElement<int64?>(30));
+        props.set("active", new Invercargill.NativeElement<bool?>(true));
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Properties>(props));
+        
+        var reader = new ElementReader(writer.to_binary_data());
+        var elem = reader.read_element();
+        if (elem.is_null()) return false;
+        
+        var read_props = elem.as<Invercargill.Properties>();
+        if (read_props.count() != 3) return false;
+        
+        // Check name
+        var name_elem = read_props.get("name");
+        if (name_elem == null || ((!) name_elem).is_null()) return false;
+        string name = ((!) name_elem).as<string>();
+        if (name != "John") return false;
+        
+        // Check age
+        var age_elem = read_props.get("age");
+        if (age_elem == null || ((!) age_elem).is_null()) return false;
+        int64? age = ((!) age_elem).as<int64?>();
+        if (age == null || (!) age != 30) return false;
+        
+        // Check active
+        var active_elem = read_props.get("active");
+        if (active_elem == null || ((!) active_elem).is_null()) return false;
+        bool active = ((!) active_elem).as<bool?>();
+        if (active != true) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 10: Round-trip all types
+bool test_round_trip() {
+    try {
+        // Write multiple elements
+        var writer = new ElementWriter();
+        
+        writer.write_element(new Invercargill.NativeElement<bool?>(true));
+        writer.write_element(new Invercargill.NativeElement<int64?>(42));
+        writer.write_element(new Invercargill.NativeElement<string>("test"));
+        
+        var data = writer.to_binary_data();
+        var reader = new ElementReader(data);
+        
+        // Read and verify
+        var elem1 = reader.read_element();
+        if (elem1.is_null()) return false;
+        bool val1 = elem1.as<bool?>();
+        if (val1 != true) return false;
+        
+        var elem2 = reader.read_element();
+        if (elem2.is_null()) return false;
+        int64? val2 = elem2.as<int64?>();
+        if (val2 == null || (!) val2 != 42) return false;
+        
+        var elem3 = reader.read_element();
+        if (elem3.is_null()) return false;
+        string val3 = elem3.as<string>();
+        if (val3 != "test") return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 11: Complex nested structure
+bool test_nested_structure() {
+    try {
+        // Create nested structure: array containing dictionaries
+        var inner_dict = new Invercargill.DataStructures.PropertyDictionary();
+        inner_dict.set("key", new Invercargill.NativeElement<string>("value"));
+        
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        array.add(new Invercargill.NativeElement<Invercargill.Properties>(inner_dict));
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        
+        var reader = new ElementReader(writer.to_binary_data());
+        var elem = reader.read_element();
+        if (elem.is_null()) return false;
+        
+        var read_array = elem.as<Invercargill.Enumerable<Invercargill.Element>>();
+        if (read_array.count() != 1) return false;
+        
+        foreach (var item in read_array) {
+            if (item.is_null()) return false;
+            var dict = item.as<Invercargill.Properties>();
+            var key_elem = dict.get("key");
+            if (key_elem == null || ((!) key_elem).is_null()) return false;
+            string key = ((!) key_elem).as<string>();
+            if (key != "value") return false;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 12: Empty array
+bool test_empty_array() {
+    try {
+        var array = new Invercargill.DataStructures.Vector<Invercargill.Element>();
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Enumerable<Invercargill.Element>>(array));
+        
+        var reader = new ElementReader(writer.to_binary_data());
+        var elem = reader.read_element();
+        if (elem.is_null()) return false;
+        
+        var read_array = elem.as<Invercargill.Enumerable<Invercargill.Element>>();
+        if (read_array.count() != 0) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 13: Empty dictionary
+bool test_empty_dictionary() {
+    try {
+        var props = new Invercargill.DataStructures.PropertyDictionary();
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<Invercargill.Properties>(props));
+        
+        var reader = new ElementReader(writer.to_binary_data());
+        var elem = reader.read_element();
+        if (elem.is_null()) return false;
+        
+        var read_props = elem.as<Invercargill.Properties>();
+        if (read_props.count() != 0) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 14: Large integers
+bool test_large_integers() {
+    try {
+        // Test boundary values
+        int64 max = int64.MAX;
+        int64 min = int64.MIN;
+        
+        var writer = new ElementWriter();
+        writer.write_element(new Invercargill.NativeElement<int64?>(max));
+        writer.write_element(new Invercargill.NativeElement<int64?>(min));
+        
+        var reader = new ElementReader(writer.to_binary_data());
+        
+        var elem1 = reader.read_element();
+        if (elem1.is_null()) return false;
+        int64? val1 = elem1.as<int64?>();
+        if (val1 == null || (!) val1 != max) return false;
+        
+        var elem2 = reader.read_element();
+        if (elem2.is_null()) return false;
+        int64? val2 = elem2.as<int64?>();
+        if (val2 == null || (!) val2 != min) return false;
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}
+
+// Test 15: Special string values
+bool test_special_strings() {
+    try {
+        string[] special = {
+            "",                          // Empty
+            " ",                         // Space
+            "\t\n\r",                    // Whitespace
+            "null",                      // Literal "null"
+            "\"quoted\"",               // Quotes
+            "back\\slash",              // Backslash
+            "日本語",                    // Unicode
+            "🎉",                        // Emoji
+            "a\0b",                      // Null byte in string
+        };
+        
+        foreach (var str in special) {
+            var writer = new ElementWriter();
+            writer.write_element(new Invercargill.NativeElement<string>(str));
+            
+            var reader = new ElementReader(writer.to_binary_data());
+            var elem = reader.read_element();
+            if (elem.is_null()) return false;
+            string read = elem.as<string>();
+            if (read != str) return false;
+        }
+        
+        return true;
+    } catch (Error e) {
+        return false;
+    }
+}

+ 608 - 0
tests/Storage/FilesystemDbmTest.vala

@@ -0,0 +1,608 @@
+/**
+ * FilesystemDbmTest - Unit tests for FilesystemDbm storage
+ */
+using Implexus.Storage;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // Test 1: Basic set and get
+    if (test_basic_set_get()) {
+        passed++;
+        stdout.puts("PASS: test_basic_set_get\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_basic_set_get\n");
+    }
+
+    // Test 2: Get non-existent key
+    if (test_get_nonexistent()) {
+        passed++;
+        stdout.puts("PASS: test_get_nonexistent\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_nonexistent\n");
+    }
+
+    // Test 3: Delete key
+    if (test_delete_key()) {
+        passed++;
+        stdout.puts("PASS: test_delete_key\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_delete_key\n");
+    }
+
+    // Test 4: Has key
+    if (test_has_key()) {
+        passed++;
+        stdout.puts("PASS: test_has_key\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_has_key\n");
+    }
+
+    // Test 5: Keys iteration
+    if (test_keys_iteration()) {
+        passed++;
+        stdout.puts("PASS: test_keys_iteration\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_keys_iteration\n");
+    }
+
+    // Test 6: Transaction commit
+    if (test_transaction_commit()) {
+        passed++;
+        stdout.puts("PASS: test_transaction_commit\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_transaction_commit\n");
+    }
+
+    // Test 7: Transaction rollback
+    if (test_transaction_rollback()) {
+        passed++;
+        stdout.puts("PASS: test_transaction_rollback\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_transaction_rollback\n");
+    }
+
+    // Test 8: Transaction with delete
+    if (test_transaction_delete()) {
+        passed++;
+        stdout.puts("PASS: test_transaction_delete\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_transaction_delete\n");
+    }
+
+    // Test 9: Overwrite value
+    if (test_overwrite_value()) {
+        passed++;
+        stdout.puts("PASS: test_overwrite_value\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_overwrite_value\n");
+    }
+
+    // Test 10: Special key names
+    if (test_special_key_names()) {
+        passed++;
+        stdout.puts("PASS: test_special_key_names\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_special_key_names\n");
+    }
+
+    // Test 11: Binary data
+    if (test_binary_data()) {
+        passed++;
+        stdout.puts("PASS: test_binary_data\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_binary_data\n");
+    }
+
+    // Test 12: Empty key
+    if (test_empty_key()) {
+        passed++;
+        stdout.puts("PASS: test_empty_key\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_empty_key\n");
+    }
+
+    // Test 13: Large value
+    if (test_large_value()) {
+        passed++;
+        stdout.puts("PASS: test_large_value\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_large_value\n");
+    }
+
+    // Test 14: Multiple operations
+    if (test_multiple_operations()) {
+        passed++;
+        stdout.puts("PASS: test_multiple_operations\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_multiple_operations\n");
+    }
+
+    // Test 15: Persistence
+    if (test_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_persistence\n");
+    }
+
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// Helper to create temporary directory
+string create_temp_dir() {
+    string temp_dir = DirUtils.mkdtemp("implexus_test_XXXXXX");
+    return temp_dir;
+}
+
+// Helper to create BinaryData from string (includes null terminator)
+Invercargill.BinaryData string_to_binary(string str) {
+    // Include null terminator for proper string reconstruction when reading back
+    uint8[] data = new uint8[str.length + 1];
+    Memory.copy(data, str, str.length);
+    data[str.length] = 0; // null terminator
+    return new Invercargill.DataStructures.ByteBuffer.from_byte_array(data);
+}
+
+// Helper to convert BinaryData to string
+string binary_to_string(Invercargill.BinaryData data) {
+    var bytes = data.to_bytes();
+    return (string) bytes.get_data();
+}
+
+// Test 1: Basic set and get
+bool test_basic_set_get() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        dbm.set("test_key", string_to_binary("test_value"));
+        
+        var value = dbm.get("test_key");
+        if (value == null) return false;
+        if (binary_to_string((!) value) != "test_value") return false;
+        
+        // Cleanup
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 2: Get non-existent key
+bool test_get_nonexistent() {
+    string temp_dir = create_temp_dir();
+    
+    var dbm = new FilesystemDbm(temp_dir);
+    
+    var value = dbm.get("nonexistent");
+    
+    cleanup_dir(temp_dir);
+    return value == null;
+}
+
+// Test 3: Delete key
+bool test_delete_key() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        if (!dbm.has_key("key1")) return false;
+        
+        dbm.delete("key1");
+        
+        if (dbm.has_key("key1")) return false;
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 4: Has key
+bool test_has_key() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        if (dbm.has_key("key1")) return false;  // Should not exist
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        if (!dbm.has_key("key1")) return false;  // Should exist
+        if (dbm.has_key("key2")) return false;   // Should not exist
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 5: Keys iteration
+bool test_keys_iteration() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        dbm.set("key2", string_to_binary("value2"));
+        dbm.set("key3", string_to_binary("value3"));
+        
+        var keys = dbm.keys;
+        if (keys.count() != 3) return false;
+        
+        // Check all keys are present
+        var key_set = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var key in keys) {
+            key_set.add(key);
+        }
+        
+        if (!key_set.has("key1")) return false;
+        if (!key_set.has("key2")) return false;
+        if (!key_set.has("key3")) return false;
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 6: Transaction commit
+bool test_transaction_commit() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        dbm.begin_transaction();
+        
+        dbm.set("key2", string_to_binary("value2"));
+        dbm.set("key3", string_to_binary("value3"));
+        
+        // Before commit, values should be in transaction buffer
+        var value2 = dbm.get("key2");
+        if (value2 == null) return false;
+        
+        dbm.commit_transaction();
+        
+        // After commit, values should be persisted
+        if (!dbm.has_key("key1")) return false;
+        if (!dbm.has_key("key2")) return false;
+        if (!dbm.has_key("key3")) return false;
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 7: Transaction rollback
+bool test_transaction_rollback() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        dbm.begin_transaction();
+        
+        dbm.set("key2", string_to_binary("value2"));
+        
+        // Value should be visible in transaction
+        if (!dbm.has_key("key2")) return false;
+        
+        dbm.rollback_transaction();
+        
+        // After rollback, key2 should not exist
+        if (dbm.has_key("key2")) return false;
+        
+        // key1 should still exist
+        if (!dbm.has_key("key1")) return false;
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 8: Transaction with delete
+bool test_transaction_delete() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        dbm.set("key2", string_to_binary("value2"));
+        
+        dbm.begin_transaction();
+        
+        dbm.delete("key1");
+        
+        // In transaction, key1 should not be visible
+        if (dbm.has_key("key1")) return false;
+        
+        dbm.commit_transaction();
+        
+        // After commit, key1 should be deleted
+        if (dbm.has_key("key1")) return false;
+        
+        // key2 should still exist
+        if (!dbm.has_key("key2")) return false;
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 9: Overwrite value
+bool test_overwrite_value() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        var value = dbm.get("key1");
+        if (value == null || binary_to_string((!) value) != "value1") return false;
+        
+        dbm.set("key1", string_to_binary("value2"));
+        
+        value = dbm.get("key1");
+        if (value == null || binary_to_string((!) value) != "value2") return false;
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 10: Special key names
+bool test_special_key_names() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        string[] special_keys = {
+            "key/with/slashes",
+            "key:with:colons",
+            "key.with.dots",
+            "key with spaces",
+            "key\nwith\nnewlines",
+            "日本語キー",
+            "key\0with\0nulls"
+        };
+        
+        foreach (var key in special_keys) {
+            dbm.set(key, string_to_binary(@"value for $key"));
+            
+            var value = dbm.get(key);
+            if (value == null) return false;
+            if (binary_to_string((!) value) != @"value for $key") return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 11: Binary data
+bool test_binary_data() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        // Create binary data with all byte values
+        var data = new uint8[256];
+        for (int i = 0; i < 256; i++) {
+            data[i] = (uint8) i;
+        }
+        
+        var binary = new Invercargill.DataStructures.ByteBuffer.from_byte_array(data);
+        
+        dbm.set("binary_key", binary);
+        
+        var value = dbm.get("binary_key");
+        if (value == null) return false;
+        
+        var read_data = ((!) value).to_bytes();
+        if (read_data.length != 256) return false;
+        
+        for (int i = 0; i < 256; i++) {
+            if (read_data.get_data()[i] != (uint8) i) return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 12: Empty key
+bool test_empty_key() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        dbm.set("", string_to_binary("empty_key_value"));
+        
+        var value = dbm.get("");
+        if (value == null) return false;
+        if (binary_to_string((!) value) != "empty_key_value") return false;
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 13: Large value
+bool test_large_value() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        // Create a 1MB value
+        var data = new uint8[1024 * 1024];
+        for (int i = 0; i < data.length; i++) {
+            data[i] = (uint8) (i % 256);
+        }
+        
+        var binary = new Invercargill.DataStructures.ByteBuffer.from_byte_array(data);
+        
+        dbm.set("large_key", binary);
+        
+        var value = dbm.get("large_key");
+        if (value == null) return false;
+        
+        var read_data = ((!) value).to_bytes();
+        if (read_data.length != data.length) return false;
+        
+        for (int i = 0; i < data.length; i++) {
+            if (read_data.get_data()[i] != data[i]) return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 14: Multiple operations
+bool test_multiple_operations() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        var dbm = new FilesystemDbm(temp_dir);
+        
+        // Set multiple keys
+        for (int i = 0; i < 100; i++) {
+            dbm.set(@"key$i", string_to_binary(@"value$i"));
+        }
+        
+        // Verify all keys
+        for (int i = 0; i < 100; i++) {
+            var value = dbm.get(@"key$i");
+            if (value == null) return false;
+            if (binary_to_string((!) value) != @"value$i") return false;
+        }
+        
+        // Delete half the keys
+        for (int i = 0; i < 50; i++) {
+            dbm.delete(@"key$i");
+        }
+        
+        // Verify deleted keys are gone
+        for (int i = 0; i < 50; i++) {
+            if (dbm.has_key(@"key$i")) return false;
+        }
+        
+        // Verify remaining keys still exist
+        for (int i = 50; i < 100; i++) {
+            if (!dbm.has_key(@"key$i")) return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 15: Persistence
+bool test_persistence() {
+    string temp_dir = create_temp_dir();
+    
+    try {
+        // Create and write
+        var dbm1 = new FilesystemDbm(temp_dir);
+        dbm1.set("persistent_key", string_to_binary("persistent_value"));
+        
+        // Create new instance with same directory
+        var dbm2 = new FilesystemDbm(temp_dir);
+        
+        // Data should persist
+        var value = dbm2.get("persistent_key");
+        if (value == null) return false;
+        if (binary_to_string((!) value) != "persistent_value") return false;
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Cleanup helper
+void cleanup_dir(string path) {
+    // Remove directory and contents
+    try {
+        Dir dir = Dir.open(path, 0);
+        string? name;
+        while ((name = dir.read_name()) != null) {
+            FileUtils.unlink(Path.build_filename(path, name));
+        }
+    } catch (FileError e) {
+        // Ignore errors
+    }
+    DirUtils.remove(path);
+}

+ 868 - 0
tests/Storage/GdbmDbmTest.vala

@@ -0,0 +1,868 @@
+/**
+ * GdbmDbmTest - Unit tests for GdbmDbm storage
+ */
+using Implexus.Storage;
+
+public static int main(string[] args) {
+    int passed = 0;
+    int failed = 0;
+
+    // Test 1: Basic set and get
+    if (test_basic_set_get()) {
+        passed++;
+        stdout.puts("PASS: test_basic_set_get\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_basic_set_get\n");
+    }
+
+    // Test 2: Get non-existent key
+    if (test_get_nonexistent()) {
+        passed++;
+        stdout.puts("PASS: test_get_nonexistent\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_get_nonexistent\n");
+    }
+
+    // Test 3: Delete key
+    if (test_delete_key()) {
+        passed++;
+        stdout.puts("PASS: test_delete_key\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_delete_key\n");
+    }
+
+    // Test 4: Has key
+    if (test_has_key()) {
+        passed++;
+        stdout.puts("PASS: test_has_key\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_has_key\n");
+    }
+
+    // Test 5: Keys iteration
+    if (test_keys_iteration()) {
+        passed++;
+        stdout.puts("PASS: test_keys_iteration\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_keys_iteration\n");
+    }
+
+    // Test 6: Transaction commit
+    if (test_transaction_commit()) {
+        passed++;
+        stdout.puts("PASS: test_transaction_commit\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_transaction_commit\n");
+    }
+
+    // Test 7: Transaction rollback
+    if (test_transaction_rollback()) {
+        passed++;
+        stdout.puts("PASS: test_transaction_rollback\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_transaction_rollback\n");
+    }
+
+    // Test 8: Transaction with delete
+    if (test_transaction_delete()) {
+        passed++;
+        stdout.puts("PASS: test_transaction_delete\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_transaction_delete\n");
+    }
+
+    // Test 9: Overwrite value
+    if (test_overwrite_value()) {
+        passed++;
+        stdout.puts("PASS: test_overwrite_value\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_overwrite_value\n");
+    }
+
+    // Test 10: Special key names
+    if (test_special_key_names()) {
+        passed++;
+        stdout.puts("PASS: test_special_key_names\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_special_key_names\n");
+    }
+
+    // Test 11: Binary data
+    if (test_binary_data()) {
+        passed++;
+        stdout.puts("PASS: test_binary_data\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_binary_data\n");
+    }
+
+    // Test 12: Empty key
+    if (test_empty_key()) {
+        passed++;
+        stdout.puts("PASS: test_empty_key\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_empty_key\n");
+    }
+
+    // Test 13: Large value
+    if (test_large_value()) {
+        passed++;
+        stdout.puts("PASS: test_large_value\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_large_value\n");
+    }
+
+    // Test 14: Multiple operations
+    if (test_multiple_operations()) {
+        passed++;
+        stdout.puts("PASS: test_multiple_operations\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_multiple_operations\n");
+    }
+
+    // Test 15: Persistence
+    if (test_persistence()) {
+        passed++;
+        stdout.puts("PASS: test_persistence\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_persistence\n");
+    }
+
+    // Test 16: Open and close
+    if (test_open_close()) {
+        passed++;
+        stdout.puts("PASS: test_open_close\n");
+    } else {
+        failed++;
+        stdout.puts("FAIL: test_open_close\n");
+    }
+
+    stdout.printf("\nResults: %d passed, %d failed\n", passed, failed);
+    return failed > 0 ? 1 : 0;
+}
+
+// Helper to create temporary database path
+string create_temp_db_path() {
+    string temp_dir = DirUtils.mkdtemp("implexus_gdbm_test_XXXXXX");
+    return Path.build_filename(temp_dir, "test.db");
+}
+
+// Helper to get directory from db path
+string get_dir_from_path(string db_path) {
+    return Path.get_dirname(db_path);
+}
+
+// Helper to create BinaryData from string (includes null terminator)
+Invercargill.BinaryData string_to_binary(string str) {
+    // Include null terminator for proper string reconstruction when reading back
+    uint8[] data = new uint8[str.length + 1];
+    Memory.copy(data, str, str.length);
+    data[str.length] = 0; // null terminator
+    return new Invercargill.DataStructures.ByteBuffer.from_byte_array(data);
+}
+
+// Helper to convert BinaryData to string
+string binary_to_string(Invercargill.BinaryData data) {
+    var bytes = data.to_bytes();
+    return (string) bytes.get_data();
+}
+
+// Test 1: Basic set and get
+bool test_basic_set_get() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        dbm.set("test_key", string_to_binary("test_value"));
+        
+        var value = dbm.get("test_key");
+        if (value == null) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (binary_to_string((!) value) != "test_value") {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 2: Get non-existent key
+bool test_get_nonexistent() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        var value = dbm.get("nonexistent");
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return value == null;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 3: Delete key
+bool test_delete_key() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        if (!dbm.has_key("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.delete("key1");
+        
+        if (dbm.has_key("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 4: Has key
+bool test_has_key() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        if (dbm.has_key("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;  // Should not exist
+        }
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        if (!dbm.has_key("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;  // Should exist
+        }
+        if (dbm.has_key("key2")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;   // Should not exist
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 5: Keys iteration
+bool test_keys_iteration() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        dbm.set("key2", string_to_binary("value2"));
+        dbm.set("key3", string_to_binary("value3"));
+        
+        var keys = dbm.keys;
+        if (keys.count() != 3) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // Check all keys are present
+        var key_set = new Invercargill.DataStructures.HashSet<string>();
+        foreach (var key in keys) {
+            key_set.add(key);
+        }
+        
+        if (!key_set.has("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (!key_set.has("key2")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (!key_set.has("key3")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 6: Transaction commit
+bool test_transaction_commit() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        dbm.begin_transaction();
+        
+        dbm.set("key2", string_to_binary("value2"));
+        dbm.set("key3", string_to_binary("value3"));
+        
+        // Before commit, values should be in transaction buffer
+        var value2 = dbm.get("key2");
+        if (value2 == null) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.commit_transaction();
+        
+        // After commit, values should be persisted
+        if (!dbm.has_key("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (!dbm.has_key("key2")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (!dbm.has_key("key3")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 7: Transaction rollback
+bool test_transaction_rollback() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        dbm.begin_transaction();
+        
+        dbm.set("key2", string_to_binary("value2"));
+        
+        // Value should be visible in transaction
+        if (!dbm.has_key("key2")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.rollback_transaction();
+        
+        // After rollback, key2 should not exist
+        if (dbm.has_key("key2")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // key1 should still exist
+        if (!dbm.has_key("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 8: Transaction with delete
+bool test_transaction_delete() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        dbm.set("key2", string_to_binary("value2"));
+        
+        dbm.begin_transaction();
+        
+        dbm.delete("key1");
+        
+        // In transaction, key1 should not be visible
+        if (dbm.has_key("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.commit_transaction();
+        
+        // After commit, key1 should be deleted
+        if (dbm.has_key("key1")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        // key2 should still exist
+        if (!dbm.has_key("key2")) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 9: Overwrite value
+bool test_overwrite_value() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        dbm.set("key1", string_to_binary("value1"));
+        
+        var value = dbm.get("key1");
+        if (value == null || binary_to_string((!) value) != "value1") {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.set("key1", string_to_binary("value2"));
+        
+        value = dbm.get("key1");
+        if (value == null || binary_to_string((!) value) != "value2") {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 10: Special key names
+bool test_special_key_names() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        string[] special_keys = {
+            "key/with/slashes",
+            "key:with:colons",
+            "key.with.dots",
+            "key with spaces",
+            "key\nwith\nnewlines",
+            "日本語キー"
+        };
+        
+        foreach (var key in special_keys) {
+            dbm.set(key, string_to_binary(@"value for $key"));
+            
+            var value = dbm.get(key);
+            if (value == null) {
+                dbm.close();
+                cleanup_dir(temp_dir);
+                return false;
+            }
+            if (binary_to_string((!) value) != @"value for $key") {
+                dbm.close();
+                cleanup_dir(temp_dir);
+                return false;
+            }
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 11: Binary data
+bool test_binary_data() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        // Create binary data with all byte values
+        var data = new uint8[256];
+        for (int i = 0; i < 256; i++) {
+            data[i] = (uint8) i;
+        }
+        
+        var binary = new Invercargill.DataStructures.ByteBuffer.from_byte_array(data);
+        
+        dbm.set("binary_key", binary);
+        
+        var value = dbm.get("binary_key");
+        if (value == null) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        var read_data = ((!) value).to_bytes();
+        if (read_data.length != 256) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        for (int i = 0; i < 256; i++) {
+            if (read_data.get_data()[i] != (uint8) i) {
+                dbm.close();
+                cleanup_dir(temp_dir);
+                return false;
+            }
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 12: Empty key
+bool test_empty_key() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        dbm.set("", string_to_binary("empty_key_value"));
+        
+        var value = dbm.get("");
+        if (value == null) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (binary_to_string((!) value) != "empty_key_value") {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 13: Large value
+bool test_large_value() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        // Create a 1MB value
+        var data = new uint8[1024 * 1024];
+        for (int i = 0; i < data.length; i++) {
+            data[i] = (uint8) (i % 256);
+        }
+        
+        var binary = new Invercargill.DataStructures.ByteBuffer.from_byte_array(data);
+        
+        dbm.set("large_key", binary);
+        
+        var value = dbm.get("large_key");
+        if (value == null) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        var read_data = ((!) value).to_bytes();
+        if (read_data.length != data.length) {
+            dbm.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        for (int i = 0; i < data.length; i++) {
+            if (read_data.get_data()[i] != data[i]) {
+                dbm.close();
+                cleanup_dir(temp_dir);
+                return false;
+            }
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 14: Multiple operations
+bool test_multiple_operations() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        dbm.open(db_path, false);
+        
+        // Set multiple keys
+        for (int i = 0; i < 100; i++) {
+            dbm.set(@"key$i", string_to_binary(@"value$i"));
+        }
+        
+        // Verify all keys
+        for (int i = 0; i < 100; i++) {
+            var value = dbm.get(@"key$i");
+            if (value == null) {
+                dbm.close();
+                cleanup_dir(temp_dir);
+                return false;
+            }
+            if (binary_to_string((!) value) != @"value$i") {
+                dbm.close();
+                cleanup_dir(temp_dir);
+                return false;
+            }
+        }
+        
+        // Delete half the keys
+        for (int i = 0; i < 50; i++) {
+            dbm.delete(@"key$i");
+        }
+        
+        // Verify deleted keys are gone
+        for (int i = 0; i < 50; i++) {
+            if (dbm.has_key(@"key$i")) {
+                dbm.close();
+                cleanup_dir(temp_dir);
+                return false;
+            }
+        }
+        
+        // Verify remaining keys still exist
+        for (int i = 50; i < 100; i++) {
+            if (!dbm.has_key(@"key$i")) {
+                dbm.close();
+                cleanup_dir(temp_dir);
+                return false;
+            }
+        }
+        
+        dbm.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 15: Persistence
+bool test_persistence() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        // Create and write
+        var dbm1 = new GdbmDbm();
+        dbm1.open(db_path, false);
+        dbm1.set("persistent_key", string_to_binary("persistent_value"));
+        dbm1.close();
+        
+        // Create new instance with same path
+        var dbm2 = new GdbmDbm();
+        dbm2.open(db_path, false);
+        
+        // Data should persist
+        var value = dbm2.get("persistent_key");
+        if (value == null) {
+            dbm2.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        if (binary_to_string((!) value) != "persistent_value") {
+            dbm2.close();
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm2.close();
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Test 16: Open and close
+bool test_open_close() {
+    string db_path = create_temp_db_path();
+    string temp_dir = get_dir_from_path(db_path);
+    
+    try {
+        var dbm = new GdbmDbm();
+        
+        if (dbm.is_open) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.open(db_path, false);
+        
+        if (!dbm.is_open) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        dbm.close();
+        
+        if (dbm.is_open) {
+            cleanup_dir(temp_dir);
+            return false;
+        }
+        
+        cleanup_dir(temp_dir);
+        return true;
+    } catch (Error e) {
+        cleanup_dir(temp_dir);
+        return false;
+    }
+}
+
+// Cleanup helper
+void cleanup_dir(string path) {
+    // Remove all files in directory and the directory itself
+    try {
+        Dir dir = Dir.open(path, 0);
+        string? name;
+        while ((name = dir.read_name()) != null) {
+            string file_path = Path.build_filename(path, name);
+            if (FileUtils.test(file_path, FileTest.IS_DIR)) {
+                cleanup_dir(file_path);
+            } else {
+                FileUtils.unlink(file_path);
+            }
+        }
+    } catch (FileError e) {
+        // Ignore errors
+    }
+    DirUtils.remove(path);
+}

Некоторые файлы не были показаны из-за большого количества измененных файлов