Browse Source

feat(compression): add multi-algorithm compression support

Add support for Gzip, Brotli, and Zstd compression as pluggable pipeline
components. Each compressor handles content negotiation via Accept-Encoding
headers and supports streaming compression.

This commit also includes several related improvements:

Router enhancements:
- Add catch-all route support with /** pattern for matching subpaths
- Add route validation to prevent invalid wildcard patterns
- Return 404 result instead of throwing for unmatched routes

New endpoint utilities:
- Add FastResource for high-performance static content serving
- Add FilesystemResource for directory serving with compression support

Core changes:
- Add HttpResultFlag enum for controlling compression/chunking behavior
- Add HttpEmptyResult for responses without body content
- Replace ConverterAsyncOutput with simpler BufferAsyncOutput
- Add NOT_MODIFIED (304) status code

BREAKING CHANGE: Compression component replaced with individual GzipCompressor,
BrotliCompressor, and ZstdCompressor classes. Pipeline configurations must be
updated to use the new compressor classes.
Billy Barrow 3 tuần trước cách đây
mục cha
commit
eee17a6f75

+ 199 - 0
examples/FastResources.vala

@@ -0,0 +1,199 @@
+using Astralis;
+using Invercargill;
+using Invercargill.DataStructures;
+
+/**
+ * FastResources Example
+ * 
+ * Demonstrates the FastResource endpoint which provides high-performance
+ * static content serving with pre-loaded data. FastResource loads content
+ * into memory at startup, making it ideal for small static files that
+ * need to be served quickly.
+ * 
+ * This example shows three ways to create FastResource endpoints:
+ *   1. from_string - For serving string content (like HTML)
+ *   2. from_byte_array - For serving binary data (like images)
+ *   3. Default constructor - For loading files from the filesystem
+ * 
+ * Usage: fast-resources [port]
+ * 
+ * Examples:
+ *   fast-resources
+ *   fast-resources 8080
+ */
+
+// Simple 1x1 pixel PNG image (transparent) as a byte array
+private const uint8[] TRANSPARENT_PIXEL = {
+    0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A,  // PNG signature
+    0x00, 0x00, 0x00, 0x0D, 0x49, 0x48, 0x44, 0x52,  // IHDR chunk
+    0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
+    0x08, 0x06, 0x00, 0x00, 0x00, 0x1F, 0x15, 0xCA,
+    0x4B, 0x00, 0x00, 0x00, 0x0A, 0x49, 0x44, 0x41,  // IDAT chunk
+    0x54, 0x78, 0x9C, 0x63, 0x60, 0x00, 0x00, 0x00,
+    0x02, 0x00, 0x01, 0xE5, 0x27, 0xDE, 0xFC, 0x00,  // IEND chunk
+    0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4E, 0x44,
+    0xAE, 0x42, 0x60, 0x82
+};
+
+void main(string[] args) {
+    int port = args.length > 1 ? int.parse(args[1]) : 8080;
+    
+    // Get the path to the currently running binary (argv[0])
+    string binary_path = args[0];
+    
+    // Resolve to absolute path if needed
+    var binary_file = File.new_for_path(binary_path);
+    if (!binary_file.is_native() || !Path.is_absolute(binary_path)) {
+        binary_path = binary_file.get_path();
+    }
+    
+    print("╔══════════════════════════════════════════════════════════════╗\n");
+    print("║                Astralis FastResources Example                ║\n");
+    print("╠══════════════════════════════════════════════════════════════╣\n");
+    print(@"║  Port: $port");
+    for (int i = 0; i < 50 - port.to_string().length - 7; i++) print(" ");
+    print(" ║\n");
+    print("╠══════════════════════════════════════════════════════════════╣\n");
+    print("║  Endpoints:                                                  ║\n");
+    print("║    /           - Home page (from_string)                     ║\n");
+    print("║    /pixel.png  - 1x1 transparent pixel (from_byte_array)     ║\n");
+    print("║    /binary     - This executable (filesystem load)           ║\n");
+    print("╠══════════════════════════════════════════════════════════════╣\n");
+    print(@"║  Binary: $(binary_path)");
+    int path_len = binary_path.length;
+    if (path_len < 54) {
+        for (int i = 0; i < 54 - path_len; i++) print(" ");
+    }
+    print(" ║\n");
+    print("╚══════════════════════════════════════════════════════════════╝\n");
+    print("\nPress Ctrl+C to stop the server\n\n");
+    
+    // Create endpoints
+    try {
+        // 1. Home page using FastResource.from_string
+        // This is ideal for serving static HTML, CSS, or other text content
+        // that you want to embed directly in your application
+        var home_page = new FastResource.from_string("/", """
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
+    <title>FastResources Example</title>
+    <style>
+        body {
+            font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
+            max-width: 800px;
+            margin: 0 auto;
+            padding: 20px;
+            background: #f5f5f5;
+        }
+        .card {
+            background: white;
+            border-radius: 8px;
+            padding: 20px;
+            margin: 10px 0;
+            box-shadow: 0 2px 4px rgba(0,0,0,0.1);
+        }
+        h1 { color: #333; }
+        code {
+            background: #e8e8e8;
+            padding: 2px 6px;
+            border-radius: 4px;
+        }
+        .endpoint {
+            display: flex;
+            align-items: center;
+            padding: 10px;
+            background: #fafafa;
+            margin: 5px 0;
+            border-radius: 4px;
+        }
+        .method {
+            background: #4CAF50;
+            color: white;
+            padding: 4px 8px;
+            border-radius: 4px;
+            font-weight: bold;
+            margin-right: 10px;
+        }
+        a { color: #2196F3; text-decoration: none; }
+        a:hover { text-decoration: underline; }
+    </style>
+</head>
+<body>
+    <div class="card">
+        <h1>🚀 FastResources Example</h1>
+        <p>This example demonstrates the <code>FastResource</code> endpoint which provides
+        high-performance static content serving with pre-loaded data.</p>
+    </div>
+    
+    <div class="card">
+        <h2>Available Endpoints</h2>
+        
+        <div class="endpoint">
+            <span class="method">GET</span>
+            <a href="/">/</a>
+            <span style="margin-left: auto; color: #666;">This page (from_string)</span>
+        </div>
+        
+        <div class="endpoint">
+            <span class="method">GET</span>
+            <a href="/pixel.png">/pixel.png</a>
+            <span style="margin-left: auto; color: #666;">1x1 pixel image (from_byte_array)</span>
+        </div>
+        
+        <div class="endpoint">
+            <span class="method">GET</span>
+            <a href="/binary">/binary</a>
+            <span style="margin-left: auto; color: #666;">Running executable (filesystem)</span>
+        </div>
+    </div>
+    
+    <div class="card">
+        <h2>FastResource Constructors</h2>
+        <ul>
+            <li><code>FastResource.from_string(route, content)</code> - Load from string</li>
+            <li><code>FastResource.from_byte_array(route, bytes)</code> - Load from byte array</li>
+            <li><code>FastResource(route, path)</code> - Load from filesystem</li>
+        </ul>
+    </div>
+</body>
+</html>
+""").with_content_type("text/html; charset=utf-8").with_default_compressors();
+        
+        // 2. Image using FastResource.from_byte_array
+        // This is ideal for serving binary content like images, fonts, or other
+        // assets that you want to embed directly in your application binary
+        var pixel_image = new FastResource.from_byte_array("/pixel.png", TRANSPARENT_PIXEL)
+            .with_content_type("image/png")
+            .with_default_compressors();
+        
+        // 3. Running binary using FastResource default constructor
+        // This loads a file from the filesystem at startup
+        // Using argv[0] to serve the currently running executable
+        var binary_endpoint = new FastResource("/binary", binary_path)
+            .with_content_type("application/octet-stream")
+            .with_default_compressors();
+        
+        // Set up the router with all endpoints
+        var router = new EndpointRouter()
+            .add_endpoint(home_page)
+            .add_endpoint(pixel_image)
+            .add_endpoint(binary_endpoint);
+        
+        // Build the pipeline
+        var pipeline = new Pipeline()
+            .add_component(router);
+        
+        // Create and configure the server
+        var server = new Server(port, pipeline);
+        
+        // Run the server
+        server.run();
+        
+    } catch (Error e) {
+        printerr("Error: %s\n", e.message);
+        Process.exit(1);
+    }
+}

+ 148 - 0
examples/FileServer.vala

@@ -0,0 +1,148 @@
+using Astralis;
+using Invercargill;
+using Invercargill.DataStructures;
+
+/**
+ * FileServer Example
+ * 
+ * A static file server that serves files from a directory specified
+ * on the command line. Supports compression via gzip, brotli, and zstd.
+ * 
+ * Usage: file-server <directory> [port]
+ * 
+ * Examples:
+ *   file-server /var/www/html
+ *   file-server ./public 8080
+ */
+
+// Root endpoint that shows server info
+class ServerInfoEndpoint : Object, Endpoint {
+    private string served_directory;
+    private int port;
+    
+    public ServerInfoEndpoint(string served_directory, int port) {
+        this.served_directory = served_directory;
+        this.port = port;
+    }
+    
+    public string route { get { return "/__server_info"; } }
+    public Method[] methods { owned get { return new Method[] { Method.GET }; } }
+    
+    public async HttpResult handle_request(HttpContext http_context, RouteInformation route) throws Error {
+        var info = @"Astralis File Server
+        
+Serving: $served_directory
+Port: $port
+
+Available compression: gzip, brotli, zstd
+
+Endpoints:
+  /**                 - Serves files from the directory
+  /__server_info      - This information page
+
+Try:
+  curl http://localhost:$port/
+  curl --compressed http://localhost:$port/style.css
+";
+        return new HttpStringResult(info)
+            .set_header("Content-Type", "text/plain");
+    }
+}
+
+void main(string[] args) {
+    // Parse command line arguments
+    if (args.length < 2) {
+        printerr("Usage: %s <directory> [port]\n", args[0]);
+        printerr("\nServes files from the specified directory over HTTP.\n");
+        printerr("Supports compression: gzip, brotli, zstd\n");
+        printerr("\nExamples:\n");
+        printerr("  %s /var/www/html\n", args[0]);
+        printerr("  %s ./public 8080\n", args[0]);
+        Process.exit(1);
+    }
+    
+    string directory = args[1];
+    int port = args.length > 2 ? int.parse(args[2]) : 8080;
+    
+    // Validate directory exists
+    var dir_file = File.new_for_path(directory);
+    if (!dir_file.query_exists()) {
+        printerr("Error: Directory '%s' does not exist\n", directory);
+        Process.exit(1);
+    }
+    
+    // Check if it's actually a directory
+    try {
+        var info = dir_file.query_info("standard::type", FileQueryInfoFlags.NONE);
+        if (info.get_file_type() != FileType.DIRECTORY) {
+            printerr("Error: '%s' is not a directory\n", directory);
+            Process.exit(1);
+        }
+    } catch (Error e) {
+        printerr("Error checking directory: %s\n", e.message);
+        Process.exit(1);
+    }
+    
+    // Resolve to absolute path
+    string absolute_path = dir_file.get_path();
+    
+    // Create the filesystem resource with deep matching
+    FilesystemResource file_resource;
+    try {
+        file_resource = new FilesystemResource("/**", absolute_path);
+        file_resource.allow_directory_listing = true;
+        file_resource.index_file = "index.html";
+    } catch (FilesystemResourceError e) {
+        printerr("Error creating file resource: %s\n", e.message);
+        Process.exit(1);
+    }
+    
+    // Set up the router with file serving and server info endpoints
+    var router = new EndpointRouter()
+        .add_endpoint(new ServerInfoEndpoint(absolute_path, port))
+        .add_endpoint(file_resource);
+    
+    // Create compression pipeline components
+    // Order matters: we try brotli first (best compression), then zstd, then gzip (most compatible)
+    var brotli = new BrotliCompressor();
+    var zstd = new ZstdCompressor();
+    var gzip = new GzipCompressor();
+    
+    // Build the pipeline with compression support
+    var pipeline = new Pipeline()
+        .add_component(gzip)
+        .add_component(zstd)
+        .add_component(brotli)
+        .add_component(router);
+    
+    // Create and configure the server
+    var server = new Server(port, pipeline);
+    
+    // Print startup information
+    print("╔══════════════════════════════════════════════════════════════╗\n");
+    print("║                  Astralis File Server                        ║\n");
+    print("╠══════════════════════════════════════════════════════════════╣\n");
+    print(@"║  Serving: $(absolute_path)");
+    if (absolute_path.length < 50) {
+        for (int i = 0; i < 50 - absolute_path.length; i++) print(" ");
+    }
+    print(" ║\n");
+    print(@"║  Port:    $port");
+    for (int i = 0; i < 50 - port.to_string().length; i++) print(" ");
+    print(" ║\n");
+    print("╠══════════════════════════════════════════════════════════════╣\n");
+    print("║  Compression: gzip, brotli, zstd                             ║\n");
+    print("╠══════════════════════════════════════════════════════════════╣\n");
+    print("║  Try:                                                        ║\n");
+    print(@"║    http://localhost:$port/");
+    for (int i = 0; i < 50 - 21 - port.to_string().length; i++) print(" ");
+    print(" ║\n");
+    print(@"║    http://localhost:$port/__server_info");
+    for (int i = 0; i < 50 - 35 - port.to_string().length; i++) print(" ");
+    print(" ║\n");
+    print("╚══════════════════════════════════════════════════════════════╝\n");
+    print("\nPress Ctrl+C to stop the server\n\n");
+    
+    // Run the server
+    server.run();
+}

+ 8 - 2
examples/FormData.vala

@@ -13,8 +13,10 @@ using Invercargill.DataStructures;
 class FormPageEndpoint : Object, Endpoint {
     public string route { get { return "/"; } }
     public Method[] methods { owned get { return { Method.GET }; } }
+
     public async HttpResult handle_request(HttpContext context, RouteInformation route) throws Error {
-        return new HttpStringResult("""<!DOCTYPE html>
+        
+        var res = new HttpStringResult("""<!DOCTYPE html>
 <html>
 <head>
     <title>Form Data Example</title>
@@ -121,6 +123,8 @@ class FormPageEndpoint : Object, Endpoint {
 </body>
 </html>""")
             .set_header("Content-Type", "text/html");
+
+        return res;
     }
 }
 
@@ -489,7 +493,9 @@ void main() {
         .add_endpoint(new FormDebugEndpoint());
     
     var pipeline = new Pipeline()
-        .add_component(new Compression())
+        .add_component(new GzipCompressor())
+        .add_component(new ZstdCompressor())
+        .add_component(new BrotliCompressor())
         .add_component(router);
 
     var server = new Server(8084, pipeline);

+ 14 - 0
examples/meson.build

@@ -67,3 +67,17 @@ executable('remote-address',
     dependencies: [astralis_dep, invercargill_dep, json_glib_dep],
     install: false
 )
+
+# File Server Example - serves a directory with compression support
+executable('file-server',
+    'FileServer.vala',
+    dependencies: [astralis_dep, invercargill_dep],
+    install: false
+)
+
+# FastResources Example - demonstrates FastResource with different constructors
+executable('fast-resources',
+    'FastResources.vala',
+    dependencies: [astralis_dep, invercargill_dep],
+    install: false
+)

+ 3 - 0
meson.build

@@ -11,6 +11,9 @@ mhd_dep = dependency('libmicrohttpd')
 invercargill_dep = dependency('invercargill-1')
 json_glib_dep = dependency('json-glib-1.0')
 invercargill_json_dep = dependency('invercargill-json')
+zlib_dep = dependency('zlib')
+brotli_dep = dependency('libbrotlienc')
+zstd_dep = dependency('libzstd')
 
 # VAPI Directory
 add_project_arguments(['--vapidir', join_paths(meson.current_source_dir(), 'vapi')], language: 'vala')

+ 266 - 0
src/Components/BrotliCompressor.vala

@@ -0,0 +1,266 @@
+using Invercargill;
+using Invercargill.DataStructures;
+
+namespace Astralis {
+
+    public class BrotliCompressor : Compressor {
+
+        private int _quality;
+
+        public BrotliCompressor(int quality = 9, uint64 max_buffer_size = 1024 * 1024 * 16) {
+            base(max_buffer_size);
+            this._quality = quality.clamp(Brotli.MIN_QUALITY, Brotli.MAX_QUALITY);
+        }
+
+        public override string encoding_token { get { return "br"; } }
+
+        /// Picks an EncoderMode based on MIME type for optimal compression
+        private static Brotli.EncoderMode pick_encoder_mode(string? content_type) {
+            if (content_type == null) {
+                return Brotli.EncoderMode.GENERIC;
+            }
+
+            string mime = content_type.down();
+
+            // Text mode for text-based content types
+            if (mime.has_prefix("text/")) {
+                return Brotli.EncoderMode.TEXT;
+            }
+
+            // Text mode for application text formats
+            if (mime.has_prefix("application/json") ||
+                mime.has_prefix("application/xml") ||
+                mime.has_prefix("application/javascript") ||
+                mime.has_prefix("application/x-javascript") ||
+                mime.has_prefix("application/xhtml+xml")) {
+                return Brotli.EncoderMode.TEXT;
+            }
+
+            // Font mode for web fonts
+            if (mime.has_prefix("font/") ||
+                mime.has_prefix("application/font-") ||
+                mime.has_prefix("application/x-font-") ||
+                mime.has_suffix("woff") ||
+                mime.has_suffix("woff2") ||
+                mime.has_suffix("ttf") ||
+                mime.has_suffix("otf") ||
+                mime.has_suffix("eot")) {
+                return Brotli.EncoderMode.FONT;
+            }
+
+            // Default to generic for everything else
+            return Brotli.EncoderMode.GENERIC;
+        }
+
+        public override ByteBuffer compress_buffer(ByteBuffer data, string? content_type) throws Error {
+            // Get maximum compressed size
+            size_t max_output_size = Brotli.encoder_max_compressed_size((size_t) data.length);
+            if (max_output_size == 0) {
+                throw new IOError.FAILED("Brotli Error");
+            }
+
+            // Prepare input
+            uint8[] input_bytes = data.to_array();
+
+            // Allocate output buffer
+            var output_buffer = new uint8[max_output_size];
+            size_t encoded_size = max_output_size;
+
+            // Pick encoder mode based on content type
+            var mode = pick_encoder_mode(content_type);
+
+            // Compress in one shot (using pointers for the C API)
+            Brotli.Bool result = Brotli.encoder_compress(
+                _quality,
+                Brotli.DEFAULT_WINDOW,
+                mode,
+                input_bytes.length,
+                input_bytes,
+                ref encoded_size,
+                output_buffer
+            );
+            
+            if (result != Brotli.TRUE) {
+                throw new IOError.FAILED("Brotli Error");
+            }
+            
+            // Return compressed data (base class handles size comparison)
+            return new ByteBuffer.from_byte_array(output_buffer[0:encoded_size]);
+        }
+
+        public override HttpResult compress_chunked(HttpResult inner_result) {
+            // Get content type for compression optimization
+            string? content_type = null;
+            if (inner_result.headers.has("Content-Type")) {
+                content_type = inner_result.headers["Content-Type"];
+            }
+            var mode = pick_encoder_mode(content_type);
+
+            var streaming_result = new StreamingBrotliResult(inner_result, _quality, mode);
+            return streaming_result;
+        }
+
+        /// Streaming compression result that compresses data on-the-fly using brotli
+        private class StreamingBrotliResult : HttpResult {
+            private HttpResult inner_result;
+            private int quality;
+            private Brotli.EncoderMode mode;
+
+            public StreamingBrotliResult(HttpResult result, int quality, Brotli.EncoderMode mode) {
+                base(result.status, null); // No content length for streaming
+                copy_headers(result, this, {"content-length", "content-encoding"});
+                this.inner_result = result;
+                this.quality = quality;
+                this.mode = mode;
+            }
+
+            public async override void send_body(AsyncOutput output) throws Error {
+                // Create a brotli output stream wrapper that compresses on-the-fly
+                var brotli_output = new BrotliAsyncOutput(output, quality, mode);
+
+                // Send the body through the brotli wrapper
+                yield inner_result.send_body(brotli_output);
+
+                // Finish the compression stream
+                yield brotli_output.finish();
+            }
+        }
+
+        /// An AsyncOutput that compresses data on-the-fly using brotli format
+        private class BrotliAsyncOutput : Object, AsyncOutput {
+            private AsyncOutput downstream;
+            private Brotli.EncoderState encoder;
+            private bool finished = false;
+            private uint8[] output_buffer;
+
+            public BrotliAsyncOutput(AsyncOutput downstream, int quality, Brotli.EncoderMode mode) throws Error {
+                this.downstream = downstream;
+
+                // Initialize brotli encoder
+                this.encoder = new Brotli.EncoderState(null, null, null);
+
+                // Set encoder parameters
+                this.encoder.set_parameter(Brotli.EncoderParameter.MODE, (uint32) mode);
+                this.encoder.set_parameter(Brotli.EncoderParameter.QUALITY, (uint32) quality);
+                this.encoder.set_parameter(Brotli.EncoderParameter.LGWIN, (uint32) Brotli.DEFAULT_WINDOW);
+
+                // Allocate output buffer (16KB should be plenty for compressed chunks)
+                this.output_buffer = new uint8[16384];
+            }
+
+            public async void write(BinaryData data) throws Error {
+                if (finished) {
+                    throw new IOError.FAILED("Cannot write to finished brotli stream");
+                }
+                
+                uint8[] input_bytes = data.to_array();
+                if (input_bytes.length == 0) {
+                    return;
+                }
+                
+                // Set up input
+                size_t available_in = input_bytes.length;
+                uint8* next_in = input_bytes;
+                
+                // Compress with PROCESS operation (don't flush yet)
+                while (available_in > 0) {
+                    size_t available_out = output_buffer.length;
+                    uint8* next_out = output_buffer;
+                    size_t total_out = 0;
+                    
+                    Brotli.Bool result = encoder.compress_stream(
+                        Brotli.EncoderOperation.PROCESS,
+                        ref available_in,
+                        ref next_in,
+                        ref available_out,
+                        ref next_out,
+                        out total_out
+                    );
+                    
+                    if (result != Brotli.TRUE) {
+                        throw new IOError.FAILED("Brotli compression stream error");
+                    }
+                    
+                    // Calculate how much compressed data was produced
+                    size_t compressed_size = output_buffer.length - available_out;
+                    if (compressed_size > 0) {
+                        yield downstream.write(new ByteBuffer.from_byte_array(output_buffer[0:compressed_size]));
+                    }
+                }
+                
+                // Flush any pending output
+                yield flush_pending();
+            }
+
+            public async void write_stream(InputStream stream) throws Error {
+                uint8[] chunk = new uint8[8192];
+                while (true) {
+                    ssize_t bytes_read = yield stream.read_async(chunk);
+                    if (bytes_read <= 0) {
+                        break;
+                    }
+                    yield write(new ByteBuffer.from_byte_array(chunk[0:bytes_read]));
+                }
+            }
+            
+            /// Flush any pending output from the encoder
+            private async void flush_pending() throws Error {
+                // Check if there's more output available
+                while (encoder.has_more_output() == Brotli.TRUE) {
+                    size_t size = output_buffer.length;
+                    unowned uint8* taken = encoder.take_output(ref size);
+                    
+                    if (size > 0) {
+                        // Copy data from taken pointer to array
+                        uint8[] data = new uint8[size];
+                        Memory.copy(data, taken, size);
+                        yield downstream.write(new ByteBuffer.from_byte_array(data));
+                    }
+                }
+            }
+            
+            /// Finish the compression stream
+            public async void finish() throws Error {
+                if (finished) {
+                    return;
+                }
+                finished = true;
+                
+                // Finish the stream
+                while (encoder.is_finished() != Brotli.TRUE) {
+                    size_t available_in = 0;
+                    uint8* next_in = null;
+                    size_t available_out = output_buffer.length;
+                    uint8* next_out = output_buffer;
+                    size_t total_out = 0;
+                    
+                    Brotli.Bool result = encoder.compress_stream(
+                        Brotli.EncoderOperation.FINISH,
+                        ref available_in,
+                        ref next_in,
+                        ref available_out,
+                        ref next_out,
+                        out total_out
+                    );
+                    
+                    if (result != Brotli.TRUE) {
+                        throw new IOError.FAILED("Brotli compression stream error during finish");
+                    }
+                    
+                    // Write any compressed data produced
+                    size_t compressed_size = output_buffer.length - available_out;
+                    if (compressed_size > 0) {
+                        yield downstream.write(new ByteBuffer.from_byte_array(output_buffer[0:compressed_size]));
+                    }
+                    
+                    // Also take any remaining output
+                    yield flush_pending();
+                }
+                
+                // Encoder will be cleaned up by the free_function when the object is finalized
+            }
+        }
+
+    }
+
+}

+ 118 - 0
src/Components/Compressor.vala

@@ -0,0 +1,118 @@
+using Invercargill;
+using Invercargill.DataStructures;
+
+namespace Astralis {
+
+    public abstract class Compressor : Object, PipelineComponent {
+
+        protected uint64 max_buffer_size;
+
+        protected Compressor(uint64 max_buffer_size = 1024 * 1024 * 16) {
+            this.max_buffer_size = max_buffer_size;
+        }
+
+        // The encoding token that identifies this compression format (e.g., "gzip")
+        public abstract string encoding_token { get; }
+
+        public async HttpResult process_request(HttpContext http_context, PipelineContext pipeline_context) throws Error {
+            var result = yield pipeline_context.next();
+            
+            // Check existing encoding on http result (to avoid double encoding)
+            // Do not compress if: DO_NOT_COMPRESS flag set, or `Content-Encoding` is set and is not "identity"
+            var existing_encoding = http_context.request.headers.get_any_or_default("Content-Encoding");
+            if (result.flag_is_set(HttpResultFlag.DO_NOT_COMPRESS) || (existing_encoding != null && existing_encoding != "identity")) {
+                return result;
+            }
+            
+            var accept_encoding = http_context.request.headers.get_any_or_default("Accept-Encoding");
+            
+            // Check if the client accepts this encoding
+            if (accept_encoding == null || !accept_encoding.contains(encoding_token)) {
+                return result;
+            }
+
+            // Case 1: Content length known and within threshold -> buffer and compress
+            if (result.content_length != null && result.content_length <= max_buffer_size) {
+                var buffered_result = yield buffer_and_compress(result);
+                if (buffered_result != null) {
+                    buffered_result.set_header("Content-Encoding", encoding_token);
+                    buffered_result.set_header("Vary", "Accept-Encoding");
+                    return buffered_result;
+                }
+                return result;
+            }
+
+            // Case 2: Content length known and above threshold, and `DO_NOT_CHUNK` is set -> do nothing
+            else if(result.content_length != null && result.flag_is_set(HttpResultFlag.DO_NOT_CHUNK)) {
+                return result;
+            }
+
+            // Case 3: Content length above threshold or is unknown -> send chunked response (no Content-Length header)
+            var streaming_result = compress_chunked(result);
+            streaming_result.set_header("Content-Encoding", encoding_token);
+            streaming_result.set_header("Vary", "Accept-Encoding");
+            return streaming_result;
+
+        }
+
+        /// Compress a ByteBuffer of data, returning null if compression doesn't reduce size
+        /// @param data The data to compress
+        /// @param content_type Optional content type hint for compression optimization
+        /// @return Compressed data as ByteBuffer, or null if compression doesn't reduce size
+        public abstract ByteBuffer compress_buffer(ByteBuffer data, string? content_type) throws Error;
+
+        /// Create a streaming compression result for the given inner result, or null if not compressible
+        public abstract HttpResult compress_chunked(HttpResult inner_result) throws Error;
+
+        /// Buffer and compress the result, returning null if compression doesn't reduce size
+        private async HttpResult? buffer_and_compress(HttpResult inner_result) throws Error {
+            // Buffer the input data
+            var input_buffer = new BufferAsyncOutput();
+            yield inner_result.send_body(input_buffer);
+            var input_data = input_buffer.get_buffer();
+            
+            // Get content type for potential compression optimization
+            string? content_type = null;
+            if (inner_result.headers.has("Content-Type")) {
+                content_type = inner_result.headers["Content-Type"];
+            }
+            
+            // Compress the data
+            ByteBuffer? compressed_data = compress_buffer(input_data, content_type);
+            if (compressed_data == null) {
+                return null;
+            }
+            
+            // Check if compressed size is larger than or equal to original
+            if (compressed_data.length >= input_data.length) {
+                return null;
+            }
+            
+            // Construct HttpDataResult and copy headers
+            var http_result = new HttpDataResult(compressed_data, inner_result.status);
+            copy_headers(inner_result, http_result, {"content-length", "content-encoding"});
+            return http_result;
+        }
+
+        protected static void copy_headers(HttpResult? source, HttpResult dest, string[] skip_headers) {
+            if (source == null) {
+                return;
+            }
+            foreach (var header in source.headers) {
+                var lower_key = header.key.down();
+                bool skip = false;
+                foreach (var skip_key in skip_headers) {
+                    if (lower_key == skip_key) {
+                        skip = true;
+                        break;
+                    }
+                }
+                if (!skip) {
+                    dest.set_header(header.key, header.value);
+                }
+            }
+        }
+
+    }
+
+}

+ 122 - 43
src/Components/EndpointRouter.vala

@@ -11,15 +11,16 @@ namespace Astralis {
             var path_components = http_context.request.path_components.to_vector();
             var endpoint_attempt = endpoint_sources
                 .attempt_select<Endpoint>(s => s.get_endpoint())
+                .to_series() // Todo fix probelms some day so we don't need this!
                 .where(e => Wrap.array<Method>(e.methods).any(m => m == http_context.request.method))
                 .where(e => matches_endpoint(path_components, e))
                 .first_or_default();
 
             if(endpoint_attempt == null) {
-                throw new EndpointError.ROUTE_NOT_FOUND(@"No route found for /$(path_components.to_string(null, "/"))");
+                return new HttpStringResult("Not Found", StatusCode.NOT_FOUND);
             }
 
-            var endpoint = endpoint_attempt.unwrap();
+            var endpoint = endpoint_attempt;
             var info = new RouteInformation() {
                 path_components = path_components,
                 named_components = extract_named_components(path_components, endpoint)
@@ -27,7 +28,8 @@ namespace Astralis {
             return yield endpoint.handle_request(http_context, info);
         }
 
-        public EndpointRouter add_endpoint(Endpoint endpoint) {
+        public EndpointRouter add_endpoint(Endpoint endpoint) throws RouterError {
+            validate_route(endpoint.route);
             this.endpoint_sources.add(new SingletonEndpointSource(endpoint));
             return this;
         }
@@ -37,12 +39,59 @@ namespace Astralis {
             return this;
         }
 
+        private void validate_route(string route) throws RouterError {
+            // Check if "/**" appears anywhere except at the end
+            if (route.contains("/**")) {
+                // Route must end with "/**" and "/**" must not appear elsewhere
+                if (!route.has_suffix("/**")) {
+                    throw new RouterError.INVALID_ROUTE_DEFINITION(
+                        "\"/**\" can only appear at the end of a route"
+                    );
+                }
+                // Check that "/**" doesn't appear multiple times
+                var parts = route.split("/**");
+                if (parts.length > 2) {
+                    throw new RouterError.INVALID_ROUTE_DEFINITION(
+                        "\"/**\" can only appear at the end of a route"
+                    );
+                }
+            }
+        }
+
         private bool matches_endpoint(ReadOnlyAddressable<string> path_components, Endpoint endpoint) {
             // "*" (with no preceding slash) is match all.
             if(endpoint.route == "*") {
                 return true;
             }
-            print(@"Checking route $(endpoint.route)\n");
+
+            // Check for "/**" catch-all subroutes
+            if(endpoint.route.has_suffix("/**")) {
+                string prefix_route = endpoint.route.substring(0, endpoint.route.length - 3);
+                var prefix_components = Wrap.array<string>(prefix_route.split("/"))
+                    .where(c => c.length != 0)
+                    .to_immutable_buffer();
+
+                // If prefix is empty (route is "/**"), always match
+                if(prefix_components.length == 0) {
+                    return true;
+                }
+
+                // Check that all prefix components match
+                // We need at least as many path components as prefix components
+                // (or exactly equal if we want to match the base path too)
+                return prefix_components
+                    .pair_up<string>(path_components)
+                    .all(p => {
+                        if(!p.value1_is_set) {
+                            return true;  // Prefix exhausted - OK, remaining captured by **
+                        }
+                        if(!p.value2_is_set) {
+                            return false; // Path exhausted but prefix remains
+                        }
+                        return component_matches(p.value1, p.value2);
+                    });
+            }
+
             var endpoint_components = Wrap.array<string>(endpoint.route.split("/")).where(c => c.length != 0);
             return endpoint_components
                 .pair_up<string>(path_components)
@@ -50,42 +99,79 @@ namespace Astralis {
                     if(p.value1_is_set != p.value2_is_set) {
                         return false;
                     }
-                    print(@"$(p.value1) - $(p.value2)\n");
-                    if(p.value1.has_prefix("{") && p.value1.has_suffix("}")) {
-                        return true;
-                    }
-                    if(p.value1 == "*") {
-                        return true;
+                    return component_matches(p.value1, p.value2);
+                });
+        }
+
+        private bool component_matches(string pattern, string value) {
+            if(pattern.has_prefix("{") && pattern.has_suffix("}")) {
+                return true;  // Named parameter matches anything
+            }
+            if(pattern == "*") {
+                return true;  // Single wildcard
+            }
+            if(pattern.contains("*")) {
+                var parts = pattern.split("*");
+                if(!value.has_prefix(parts[0]) || !value.has_suffix(parts[parts.length-1])) {
+                    return false;
+                }
+                if(parts.length == 2) {
+                    return true;
+                }
+                var str_pos = parts[0].length;
+                for(int i = 1; i < parts.length-1; i++) {
+                    var index = value.index_of(parts[i], str_pos);
+                    if(index < 0) {
+                        return false;
                     }
-                    if(p.value1.contains("*")) {
-                        var parts = p.value1.split("*");
-                        if(!p.value2.has_prefix(parts[0]) || !p.value2.has_suffix(parts[parts.length-1])) {
-                            return false;
-                        }
-                        if(parts.length == 2) {
-                            return true;
-                        }
-                        var str_pos = parts[0].length;
-                        for(int i = 1; i < parts.length-1; i++) {
-                            var index = p.value2.index_of(parts[i], str_pos);
-                            if(index < 0) {
-                                return false;
-                            }
-                            str_pos += index + parts[i].length;
+                    str_pos += index + parts[i].length;
+                }
+                return true;
+            }
+            return pattern == value;
+        }
+
+        private Dictionary<string, string> extract_named_components(ReadOnlyAddressable<string> path_components, Endpoint endpoint) throws IndexError, RouterError {
+            var dictionary = new Dictionary<string, string>();
+
+            // Handle "/**" catch-all - capture remaining path as "**"
+            if(endpoint.route.has_suffix("/**")) {
+                string prefix_route = endpoint.route.substring(0, endpoint.route.length - 3);
+                var prefix_components = Wrap.array<string>(prefix_route.split("/"))
+                    .where(c => c.length != 0)
+                    .to_immutable_buffer();
+
+                // Extract named components from the prefix
+                for(int i = 0; i < prefix_components.length; i++) {
+                    if(prefix_components[i].has_prefix("{") && prefix_components[i].has_suffix("}")) {
+                        if(i < path_components.length) {
+                            dictionary.add(prefix_components[i][1:-1], path_components[i]);
                         }
-                        return true;
                     }
-                    return p.value1 == p.value2;
-                });
-        }
+                }
 
-        private Dictionary<string, string> extract_named_components(ReadOnlyAddressable<string> path_components, Endpoint endpoint) throws IndexError {
-            print(@"Extracting from $(endpoint.route)\n");
+                // Capture remaining path components as "**"
+                int remaining_start = (int)prefix_components.length;
+                
+                // Build path manually to avoid string.join issues
+                var builder = new StringBuilder();
+                bool first = true;
+                for(int i = remaining_start; i < path_components.length; i++) {
+                    if (!first) {
+                        builder.append("/");
+                    }
+                    builder.append(path_components[i]);
+                    first = false;
+                }
+                dictionary.add("**", builder.str);
+                return dictionary;
+            }
+
+            // Standard named component extraction
             var endpoint_components = Wrap.array<string>(endpoint.route.split("/"))
                 .where(c => c.length != 0)
                 .to_immutable_buffer();
-                
-            var dictionary = new Dictionary<string, string>();
+
             for(int i = 0; i < endpoint_components.length; i++) {
                 if(!endpoint_components[i].has_prefix("{") || !endpoint_components[i].has_suffix("}")) {
                     continue;
@@ -104,14 +190,6 @@ namespace Astralis {
 
     }
 
-    public interface Endpoint : Object {
-
-        public abstract string route { get; }
-        public abstract Method[] methods { owned get; }
-        public abstract async HttpResult handle_request(HttpContext http_context, RouteInformation route_context) throws Error;
-
-    }
-
     public interface EndpointSource : Object {
         public abstract Endpoint get_endpoint() throws Error;
     }
@@ -128,9 +206,10 @@ namespace Astralis {
         }
     }
 
-    public errordomain EndpointError {
+    public errordomain RouterError {
         ROUTE_NOT_FOUND,
-        METHOD_NOT_ALLOWED
+        METHOD_NOT_ALLOWED,
+        INVALID_ROUTE_DEFINITION
     }
 
 

+ 178 - 0
src/Components/GzipCompressor.vala

@@ -0,0 +1,178 @@
+using Invercargill;
+using Invercargill.DataStructures;
+
+namespace Astralis {
+
+    public class GzipCompressor : Compressor {
+
+        private int _compression_level;
+
+        public GzipCompressor(int compression_level = 7, uint64 max_buffer_size = 1024 * 1024 * 16) {
+            base(max_buffer_size);
+            this._compression_level = compression_level;
+        }
+
+        public override string encoding_token { get { return "gzip"; } }
+
+        public override ByteBuffer compress_buffer(ByteBuffer data, string? content_type) throws Error {
+            // Compress synchronously using zlib
+            var stream = ZLib.DeflateStream.full(
+                (ZLib.Level) _compression_level,
+                ZLib.Algorithm.DEFLATED,
+                31  // 15 + 16 for gzip wrapper
+            );
+            
+            // Prepare input
+            uint8[] input_bytes = data.to_array();
+            stream.next_in = input_bytes;
+            stream.avail_in = (uint) input_bytes.length;
+            
+            // Estimate output size (worst case: slightly larger for small data)
+            var output_buffer = new uint8[input_bytes.length + 1024];
+            stream.next_out = output_buffer;
+            stream.avail_out = (uint) output_buffer.length;
+            
+            // Compress in one go
+            int result = stream.deflate((int) ZLib.Flush.FINISH);
+            
+            if (result == ZLib.Status.STREAM_ERROR) {
+                throw new IOError.FAILED("ZLib Stream Error");
+            }
+            
+            size_t compressed_size = output_buffer.length - stream.avail_out;
+            
+            // Return compressed data (base class handles size comparison)
+            return new ByteBuffer.from_byte_array(output_buffer[0:compressed_size]);
+        }
+
+        public override HttpResult compress_chunked(HttpResult inner_result) {
+            var streaming_result = new StreamingGzipResult(inner_result, _compression_level);
+            return streaming_result;
+        }
+
+        /// Streaming compression result that compresses data on-the-fly using gzip
+        private class StreamingGzipResult : HttpResult {
+            private HttpResult inner_result;
+            private int compression_level;
+
+            public StreamingGzipResult(HttpResult result, int compression_level) {
+                base(result.status, null); // No content length for streaming
+                copy_headers(result, this, {"content-length", "content-encoding"});
+                this.inner_result = result;
+                this.compression_level = compression_level;
+            }
+
+            public async override void send_body(AsyncOutput output) throws Error {
+                // Create a gzip output stream wrapper that compresses on-the-fly
+                var gzip_output = new GzipAsyncOutput(output, compression_level);
+                
+                // Send the body through the gzip wrapper
+                yield inner_result.send_body(gzip_output);
+                
+                // Finish the compression stream (writes gzip trailer)
+                yield gzip_output.finish();
+            }
+        }
+
+        /// An AsyncOutput that compresses data on-the-fly using gzip format
+        private class GzipAsyncOutput : Object, AsyncOutput {
+            private AsyncOutput downstream;
+            private ZLib.DeflateStream stream;
+            private bool finished = false;
+            private uint8[] output_buffer;
+            
+            public GzipAsyncOutput(AsyncOutput downstream, int compression_level) throws Error {
+                this.downstream = downstream;
+                
+                // Initialize deflate stream with gzip format
+                // windowBits = 15 + 16 = 31 for gzip encoding (RFC1952)
+                this.stream = ZLib.DeflateStream.full(
+                    (ZLib.Level) compression_level,
+                    ZLib.Algorithm.DEFLATED,
+                    31  // 15 + 16 for gzip wrapper
+                );
+                
+                // Allocate output buffer (16KB should be plenty for compressed chunks)
+                this.output_buffer = new uint8[16384];
+            }
+
+            public async void write(BinaryData data) throws Error {
+                if (finished) {
+                    throw new IOError.FAILED("Cannot write to finished gzip stream");
+                }
+                
+                uint8[] input_bytes = data.to_array();
+                if (input_bytes.length == 0) {
+                    return;
+                }
+                
+                // Set up input
+                stream.next_in = input_bytes;
+                stream.avail_in = (uint) input_bytes.length;
+                
+                // Compress with SYNC_FLUSH to ensure output is produced immediately
+                // (important for streaming - NO_FLUSH may buffer internally)
+                bool done = false;
+                while (!done) {
+                    stream.next_out = output_buffer;
+                    stream.avail_out = (uint) output_buffer.length;
+                    
+                    int result = stream.deflate((int) ZLib.Flush.SYNC_FLUSH);
+                    
+                    if (result == ZLib.Status.STREAM_ERROR) {
+                        throw new IOError.FAILED("ZLib compression stream error");
+                    }
+                    
+                    // Calculate how much compressed data was produced
+                    size_t compressed_size = output_buffer.length - stream.avail_out;
+                    if (compressed_size > 0) {
+                        yield downstream.write(new ByteBuffer.from_byte_array(output_buffer[0:compressed_size]));
+                    }
+                    
+                    // SYNC_FLUSH ensures all input is consumed and output is produced
+                    done = (stream.avail_in == 0);
+                }
+            }
+
+            public async void write_stream(InputStream stream) throws Error {
+                uint8[] chunk = new uint8[8192];
+                while (true) {
+                    ssize_t bytes_read = yield stream.read_async(chunk);
+                    if (bytes_read <= 0) {
+                        break;
+                    }
+                    yield write(new ByteBuffer.from_byte_array(chunk[0:bytes_read]));
+                }
+            }
+            
+            /// Finish the compression stream, writing the gzip trailer
+            public async void finish() throws Error {
+                if (finished) {
+                    return;
+                }
+                finished = true;
+                
+                // Flush remaining data and write gzip trailer
+                int result = ZLib.Status.OK;
+                while (result != ZLib.Status.STREAM_END) {
+                    stream.next_out = output_buffer;
+                    stream.avail_out = (uint) output_buffer.length;
+                    
+                    result = stream.deflate((int) ZLib.Flush.FINISH);
+                    
+                    if (result == ZLib.Status.STREAM_ERROR) {
+                        throw new IOError.FAILED("ZLib compression stream error during finish");
+                    }
+                    
+                    // Write any compressed data produced
+                    size_t compressed_size = output_buffer.length - stream.avail_out;
+                    if (compressed_size > 0) {
+                        yield downstream.write(new ByteBuffer.from_byte_array(output_buffer[0:compressed_size]));
+                    }
+                }
+            }
+        }
+
+    }
+
+}

+ 189 - 0
src/Components/ZstdCompressor.vala

@@ -0,0 +1,189 @@
+using Invercargill;
+using Invercargill.DataStructures;
+
+namespace Astralis {
+
+    public class ZstdCompressor : Compressor {
+
+        private int _compression_level;
+
+        public ZstdCompressor(int compression_level = 14, uint64 max_buffer_size = 1024 * 1024 * 16) {
+            base(max_buffer_size);
+            this._compression_level = compression_level.clamp(Zstd.min_c_level(), Zstd.max_c_level());
+        }
+
+        public override string encoding_token { get { return "zstd"; } }
+
+        public override ByteBuffer compress_buffer(ByteBuffer data, string? content_type) throws Error {
+            // Get maximum compressed size
+            size_t max_output_size = Zstd.compress_bound((size_t) data.length);
+            if (Zstd.is_error(max_output_size) != 0) {
+                throw new IOError.FAILED("Zstd Error");
+            }
+            
+            // Prepare input
+            uint8[] input_bytes = data.to_array();
+            
+            // Allocate output buffer
+            var output_buffer = new uint8[max_output_size];
+            
+            // Compress in one shot (using pointers for the C API)
+            size_t compressed_size = Zstd.compress(
+                output_buffer,
+                max_output_size,
+                input_bytes,
+                input_bytes.length,
+                _compression_level
+            );
+            
+            if (Zstd.is_error(compressed_size) != 0) {
+                throw new IOError.FAILED("Zstd Error");
+            }
+            
+            // Return compressed data (base class handles size comparison)
+            return new ByteBuffer.from_byte_array(output_buffer[0:compressed_size]);
+        }
+
+        public override HttpResult compress_chunked(HttpResult inner_result) {
+            var streaming_result = new StreamingZstdResult(inner_result, _compression_level);
+            return streaming_result;
+        }
+
+        /// Streaming compression result that compresses data on-the-fly using zstd
+        private class StreamingZstdResult : HttpResult {
+            private HttpResult inner_result;
+            private int compression_level;
+
+            public StreamingZstdResult(HttpResult result, int compression_level) {
+                base(result.status, null); // No content length for streaming
+                copy_headers(result, this, {"content-length", "content-encoding"});
+                this.inner_result = result;
+                this.compression_level = compression_level;
+            }
+
+            public async override void send_body(AsyncOutput output) throws Error {
+                // Create a zstd output stream wrapper that compresses on-the-fly
+                var zstd_output = new ZstdAsyncOutput(output, compression_level);
+                
+                // Send the body through the zstd wrapper
+                yield inner_result.send_body(zstd_output);
+                
+                // Finish the compression stream
+                yield zstd_output.finish();
+            }
+        }
+
+        /// An AsyncOutput that compresses data on-the-fly using zstd format
+        private class ZstdAsyncOutput : Object, AsyncOutput {
+            private AsyncOutput downstream;
+            private Zstd.CCtx encoder;
+            private bool finished = false;
+            private uint8[] output_buffer;
+            
+            public ZstdAsyncOutput(AsyncOutput downstream, int compression_level) throws Error {
+                this.downstream = downstream;
+                
+                // Initialize zstd encoder
+                this.encoder = new Zstd.CCtx();
+                
+                // Set compression level
+                size_t result = encoder.set_parameter(Zstd.CParameter.compressionLevel, compression_level);
+                if (Zstd.is_error(result) != 0) {
+                    throw new IOError.FAILED("Failed to set zstd compression level: %s".printf(Zstd.get_error_name(result)));
+                }
+                
+                // Allocate output buffer (use recommended size)
+                this.output_buffer = new uint8[Zstd.cstream_out_size()];
+            }
+
+            public async void write(BinaryData data) throws Error {
+                if (finished) {
+                    throw new IOError.FAILED("Cannot write to finished zstd stream");
+                }
+                
+                uint8[] input_bytes = data.to_array();
+                if (input_bytes.length == 0) {
+                    return;
+                }
+                
+                // Set up input buffer
+                Zstd.InBuffer input = Zstd.InBuffer() {
+                    src = input_bytes,
+                    size = input_bytes.length,
+                    pos = 0
+                };
+                
+                // Compress with continue operation (don't flush yet)
+                while (input.pos < input.size) {
+                    Zstd.OutBuffer output = Zstd.OutBuffer() {
+                        dst = output_buffer,
+                        size = output_buffer.length,
+                        pos = 0
+                    };
+                    
+                    size_t result = encoder.compress_stream2(&output, &input, Zstd.EndDirective.continue);
+                    
+                    if (Zstd.is_error(result) != 0) {
+                        throw new IOError.FAILED("Zstd compression stream error: %s".printf(Zstd.get_error_name(result)));
+                    }
+                    
+                    // Write any compressed data produced
+                    if (output.pos > 0) {
+                        yield downstream.write(new ByteBuffer.from_byte_array(output_buffer[0:output.pos]));
+                    }
+                }
+            }
+
+            public async void write_stream(InputStream stream) throws Error {
+                uint8[] chunk = new uint8[8192];
+                while (true) {
+                    ssize_t bytes_read = yield stream.read_async(chunk);
+                    if (bytes_read <= 0) {
+                        break;
+                    }
+                    yield write(new ByteBuffer.from_byte_array(chunk[0:bytes_read]));
+                }
+            }
+            
+            /// Finish the compression stream
+            public async void finish() throws Error {
+                if (finished) {
+                    return;
+                }
+                finished = true;
+                
+                // Create empty input for finishing
+                Zstd.InBuffer input = Zstd.InBuffer() {
+                    src = null,
+                    size = 0,
+                    pos = 0
+                };
+                
+                // Finish the stream - keep calling until fully flushed
+                size_t remaining = 1;
+                while (remaining > 0) {
+                    Zstd.OutBuffer output = Zstd.OutBuffer() {
+                        dst = output_buffer,
+                        size = output_buffer.length,
+                        pos = 0
+                    };
+                    
+                    remaining = encoder.compress_stream2(&output, &input, Zstd.EndDirective.end);
+                    
+                    if (Zstd.is_error(remaining) != 0) {
+                        throw new IOError.FAILED("Zstd compression stream error during finish: %s".printf(Zstd.get_error_name(remaining)));
+                    }
+                    
+                    // Write any compressed data produced
+                    if (output.pos > 0) {
+                        yield downstream.write(new ByteBuffer.from_byte_array(output_buffer[0:output.pos]));
+                    }
+                }
+                
+                // Encoder will be cleaned up by the free_function when the object is finalized
+            }
+        }
+
+    }
+
+}

+ 15 - 57
src/Core/AsyncOutput.vala

@@ -10,71 +10,29 @@ namespace Astralis {
 
     }
 
-    public class ConverterAsyncOutput : Object, AsyncOutput {
+    /// An AsyncOutput that buffers all written data in memory
+    public class BufferAsyncOutput : Object, AsyncOutput {
+        private ByteArray buffer = new ByteArray();
 
-        private AsyncOutput inner;
-        private Converter converter;
-
-        public ConverterAsyncOutput(AsyncOutput output, Converter converter) {
-            this.inner = output;
-            this.converter = converter;
+        public async void write(BinaryData data) throws Error {
+            uint8[] bytes = data.to_array();
+            buffer.append(bytes);
         }
 
-        public async void write(Invercargill.BinaryData data) throws Error {
-            var in_buffer = data.to_array();
-            size_t in_offset = 0;
-            const size_t BUFFER_SIZE = 4096;
-            
-            while (in_offset < in_buffer.length) {
-                uint8[] out_buffer = new uint8[BUFFER_SIZE];
-                size_t bytes_read = 0;
-                size_t bytes_written = 0;
-                
-                ConverterFlags flags = ConverterFlags.NONE;
-                if (in_offset + bytes_read >= in_buffer.length) {
-                    flags |= ConverterFlags.INPUT_AT_END;
-                }
-                
-                ConverterResult result;
-                try {
-                    result = converter.convert(
-                        in_buffer[in_offset:in_buffer.length],
-                        out_buffer,
-                        flags,
-                        out bytes_read,
-                        out bytes_written
-                    );
-                } catch (IOError.NO_SPACE e) {
-                    // Need larger output buffer, try again with bigger buffer
-                    out_buffer = new uint8[BUFFER_SIZE * 2];
-                    result = converter.convert(
-                        in_buffer[in_offset:in_buffer.length],
-                        out_buffer,
-                        flags,
-                        out bytes_read,
-                        out bytes_written
-                    );
-                }
-                
-                in_offset += bytes_read;
-                
-                if (bytes_written > 0) {
-                    yield inner.write(new ByteBuffer.from_byte_array(out_buffer[0:bytes_written]));
-                }
-                
-                if (result == ConverterResult.FINISHED || result == ConverterResult.FLUSHED) {
+        public async void write_stream(InputStream stream) throws Error {
+            uint8[] chunk = new uint8[8192];
+            while (true) {
+                ssize_t bytes_read = yield stream.read_async(chunk);
+                if (bytes_read <= 0) {
                     break;
                 }
+                buffer.append(chunk[0:bytes_read]);
             }
         }
 
-        public async void write_stream(GLib.InputStream stream) throws Error {
-            var converter_stream = new ConverterInputStream(stream, converter);
-            yield inner.write_stream(converter_stream);
+        public ByteBuffer get_buffer() {
+            return new ByteBuffer.from_byte_array(buffer.steal());
         }
-
-        
-
     }
 
-}
+}

+ 54 - 5
src/Core/HttpResult.vala

@@ -6,15 +6,40 @@ namespace Astralis {
     public abstract class HttpResult : Object {
         public Dictionary<string, string> headers { get; private set; }
         public StatusCode status { get; set; }
-        public uint64? content_length { get; set; }
+        public HttpResultFlag flags { get; set; }
+        public uint64? content_length { get {
+            return _content_length;
+        }
+        set {
+            _content_length = value;
+            if(value == null) {
+                clear_header("Content-Length");
+            }
+            else {
+                set_header("Content-Length", value.to_string());
+            }
+        }}
+
+        private uint64? _content_length = null;
 
         protected HttpResult(StatusCode status, uint64? content_length = null) {
             headers = new Dictionary<string, string>();
             this.status = status;
             this.content_length = content_length;
-            if(content_length != null) {
-                headers["Content-Length"] = content_length.to_string();
-            }
+        }
+
+        public HttpResult set_flag(HttpResultFlag flag) {
+            flags |= flag;
+            return this;
+        }
+
+        public HttpResult clear_flag(HttpResultFlag flag) {
+            flags &= ~flag;
+            return this;
+        }
+
+        public bool flag_is_set(HttpResultFlag flag) {
+            return (flags & flag) != 0;
         }
 
         public HttpResult set_header(string header, string value) {
@@ -22,6 +47,11 @@ namespace Astralis {
             return this;
         }
 
+        public HttpResult clear_header(string header) {
+            headers.remove(header);
+            return this;
+        }
+
         public HttpResult set_all_headers(Enumerable<KeyValuePair<string, string>> headers) {
             foreach(var header in headers) {
                 this.headers[header.key] = header.value;
@@ -37,6 +67,19 @@ namespace Astralis {
         public abstract async void send_body(AsyncOutput output) throws Error;
     }
 
+    public class HttpEmptyResult : HttpResult {
+
+        public HttpEmptyResult(StatusCode status) {
+            base(status, 0);
+            set_flag(HttpResultFlag.DO_NOT_COMPRESS);
+        }
+
+        public async override void send_body(AsyncOutput output) {
+            // No-op
+        }
+
+    }
+
     public class HttpDataResult : HttpResult {
 
         private BinaryData bytes;
@@ -64,8 +107,8 @@ namespace Astralis {
         private InputStream stream;
 
         public HttpStreamResult(InputStream stream, uint64? content_length = null, StatusCode status = StatusCode.OK) {
-            this.stream = stream;
             base(status, content_length);
+            this.stream = stream;
         }
 
         public async override void send_body(AsyncOutput output) throws Error {
@@ -74,4 +117,10 @@ namespace Astralis {
 
     }
 
+    [Flags]
+    public enum HttpResultFlag {
+        DO_NOT_COMPRESS,
+        DO_NOT_CHUNK
+    }
+
 }

+ 1 - 0
src/Core/HttpValues.vala

@@ -43,6 +43,7 @@ namespace Astralis {
         OK = 200,
         CREATED = 201,
         NO_CONTENT = 204,
+        NOT_MODIFIED = 304,
         BAD_REQUEST = 400,
         UNAUTHORIZED = 401,
         FORBIDDEN = 403,

+ 3 - 2
src/Core/Pipeline.vala

@@ -19,7 +19,8 @@ namespace Astralis {
         public async HttpResult run(HttpContext context) throws Error {
             var components = component_sources
                 .attempt_select<PipelineComponent>(s => s.get_component())
-                .to_buffer();
+                .to_vector();
+                //  .to_buffer();
 
             if(components.length == 0) {
                 throw new PipelineError.NO_COMPONENTS("No components in pipeline");
@@ -30,7 +31,7 @@ namespace Astralis {
                 current_context = new PipelineContext(context, components[i-1], current_context); 
             }
 
-            var result = yield components[0].process_request(context, current_context);
+            var result = yield current_context.next();
             return result;
         }
     }

+ 11 - 0
src/Endpoints/Endpoint.vala

@@ -0,0 +1,11 @@
+namespace Astralis {
+
+    public interface Endpoint : Object {
+
+        public abstract string route { get; }
+        public abstract Method[] methods { owned get; }
+        public abstract async HttpResult handle_request(HttpContext http_context, RouteInformation route_context) throws Error;
+
+    }
+
+}

+ 95 - 0
src/Endpoints/FastResource.vala

@@ -0,0 +1,95 @@
+using Invercargill;
+using Invercargill.DataStructures;
+
+namespace Astralis {
+
+    public class FastResource : Object, Endpoint {
+        public string route { get { return _route; } }
+        public Astralis.Method[] methods { owned get { return { Method.GET };} }
+
+        private string _route;
+        private Dictionary<string, string> headers;
+        private Dictionary<string, ByteBuffer> encodings;
+        private Series<string> encoding_priority;
+        private string e_tag;
+        
+        public FastResource(string route, string server_path) throws Error {
+            this._route = route;
+            uint8[] data;
+            FileUtils.get_data (server_path, out data);
+            shared_setup(new ByteBuffer.from_byte_array(data));
+        }
+
+        public FastResource.from_byte_array(string route, uint8[] byte_array) throws Error {
+            this._route = route;
+            shared_setup(new ByteBuffer.from_byte_array(byte_array));
+        }
+
+        public FastResource.from_data(string route, BinaryData data) throws Error {
+            this._route = route;
+            shared_setup(data.to_byte_buffer());
+        }
+
+        public FastResource.from_string(string route, string response_string) throws Error {
+            this._route = route;
+            shared_setup(new ByteBuffer.from_byte_array(response_string.data));
+        }
+
+        public FastResource with_content_type(string content_type) {
+            headers["Content-Type"] = content_type;
+            return this;
+        }
+
+        public FastResource with_compressor(Compressor compressor) throws Error {
+            var encoded = compressor.compress_buffer(encodings["identity"], headers.get_or_default("Content-Type"));
+            if(encoded.length >= encodings["identity"].length) {
+                return this; // Skip, since the compressed version would be larger
+            }
+
+            encodings[compressor.encoding_token] = encoded;
+            encoding_priority = encodings
+                .order_by<uint>(e => e.value.length, (a, b) => (int)a - (int)b)
+                .select<string>(e => e.key)
+                .to_series();
+
+            return this;
+        }
+
+        public FastResource with_default_compressors() throws Error {
+            with_compressor(new GzipCompressor(9));
+            with_compressor(new BrotliCompressor(11));
+            with_compressor(new ZstdCompressor(19));
+            return this;
+        }
+
+        private void shared_setup(ByteBuffer data) throws Error {
+            encodings = new Dictionary<string, ByteBuffer>();
+            encodings["identity"] = data;
+            headers = new Dictionary<string, string>();
+            encoding_priority.add("identity");
+
+            var checksum = new Checksum(ChecksumType.SHA256);
+            var byte_array = data.to_array();
+            checksum.update(byte_array, byte_array.length);
+            e_tag = @"\"$(checksum.get_string())\"";
+        }
+
+        public async HttpResult handle_request (HttpContext http_context, RouteInformation route_context) throws Error {
+            var if_none_match = http_context.request.get_header("If-None-Match");
+            if(if_none_match != null && if_none_match == e_tag) {
+                return new HttpEmptyResult(StatusCode.NOT_MODIFIED);
+            }
+            var accepts = http_context.request.get_header("Accept-Encoding");
+            var encoding = encoding_priority.first_or_default(e => accepts.contains(e)) ?? "identity";
+
+            return new HttpDataResult (encodings[encoding])
+                .set_all_headers(headers)
+                .set_header("Content-Encoding", encoding)
+                .set_header("ETag", e_tag)
+                .set_flag(HttpResultFlag.DO_NOT_COMPRESS | HttpResultFlag.DO_NOT_CHUNK);
+        }
+
+
+    }
+
+}

+ 418 - 0
src/Endpoints/FilesystemResource.vala

@@ -0,0 +1,418 @@
+using Invercargill;
+using Invercargill.DataStructures;
+
+namespace Astralis {
+
+    /// A resource that serves files from the filesystem.
+    /// 
+    /// Route patterns:
+    /// - Exact match (e.g., "/robots.txt"): Serves a specific file
+    /// - Shallow match (e.g., "/assets/*"): Serves files one level deep from the directory
+    /// - Deep match (e.g., "/static/**"): Serves files recursively from the directory
+    public class FilesystemResource : Object, Endpoint {
+        
+        /// The filesystem path to serve files from
+        public string server_path { get; private set; }
+        
+        /// The route pattern for this resource
+        public string route { get { return _route; } }
+        
+        /// Only GET and HEAD methods are supported for file serving
+        public Method[] methods { owned get {
+            return new Method[] { Method.GET, Method.HEAD };
+        }}
+        
+        /// Whether to allow directory listings (only for /* and /** routes)
+        public bool allow_directory_listing { get; set; default = false; }
+        
+        /// Default index file to look for in directories (e.g., "index.html")
+        public string? index_file { get; set; default = "index.html"; }
+        
+        private string _route;
+        private MatchType _match_type;
+        private string[] _route_components;
+        private string _route_prefix; // For wildcard routes, the prefix before the wildcard
+        
+        public FilesystemResource(string route, string server_path) throws FilesystemResourceError {
+            // Validate and parse the route
+            _route = route;
+            _route_components = parse_route_components(route);
+            _match_type = determine_match_type(route);
+            _route_prefix = extract_route_prefix(route, _match_type);
+            
+            // Validate server path exists
+            var file = File.new_for_path(server_path);
+            if (!file.query_exists()) {
+                throw new FilesystemResourceError.PATH_NOT_FOUND(
+                    "Server path does not exist: %s".printf(server_path)
+                );
+            }
+            
+            this.server_path = server_path;
+        }
+        
+        public async HttpResult handle_request(HttpContext http_context, RouteInformation route_context) throws Error {
+            // Get the remaining path components after the route prefix
+            string relative_path = get_relative_path(route_context);
+            print(@"Getting $(relative_path)\n");
+            
+            // Build the full filesystem path
+            string full_path = Path.build_filename(server_path, relative_path);
+            
+            // Security: Ensure the resolved path is within server_path (prevent path traversal)
+            string resolved_path = canonicalize_path(full_path);
+            string resolved_server_path = canonicalize_path(server_path);
+            
+            if (!resolved_path.has_prefix(resolved_server_path)) {
+                return new HttpStringResult("Forbidden", StatusCode.FORBIDDEN);
+            }
+            
+            var file = File.new_for_path(resolved_path);
+            
+            // Check if file exists
+            if (!file.query_exists()) {
+                return new HttpStringResult("Not Found", StatusCode.NOT_FOUND);
+            }
+            
+            // Get file info
+            var info = yield file.query_info_async(
+                "standard::type,standard::content-type,standard::size",
+                FileQueryInfoFlags.NONE
+            );
+            
+            var file_type = info.get_file_type();
+            
+            // Handle directories
+            if (file_type == FileType.DIRECTORY) {
+                // Try index file first
+                if (index_file != null) {
+                    var index_path = Path.build_filename(resolved_path, index_file);
+                    var index_file_obj = File.new_for_path(index_path);
+                    if (index_file_obj.query_exists()) {
+                        return yield serve_file(index_file_obj, http_context);
+                    }
+                }
+                
+                // Directory listing if enabled
+                if (allow_directory_listing) {
+                    return yield serve_directory_listing(file, relative_path, http_context);
+                }
+                
+                return new HttpStringResult("Forbidden", StatusCode.FORBIDDEN);
+            }
+            
+            // Serve regular file
+            return yield serve_file(file, http_context);
+        }
+        
+        private async HttpResult serve_file(File file, HttpContext http_context) throws Error {
+            var info = yield file.query_info_async(
+                "standard::content-type,standard::size",
+                FileQueryInfoFlags.NONE
+            );
+            
+            uint64 size = info.get_size();
+            string? content_type = info.get_content_type();
+            
+            // Open file for reading
+            var stream = yield file.read_async();
+            
+            var result = new HttpStreamResult(stream, size);
+            
+            // Set content type based on file extension or detected type
+            string mime_type = get_mime_type(file.get_basename(), content_type);
+            result.set_header("Content-Type", mime_type);
+            
+            // Don't compress binary files
+            //  if (should_skip_compression(mime_type)) {
+            //      result.set_flag(HttpResultFlag.DO_NOT_COMPRESS);
+            //  }
+            
+            return result;
+        }
+        
+        private async HttpResult serve_directory_listing(File directory, string relative_path, HttpContext http_context) throws Error {
+            var builder = new StringBuilder();
+            builder.append("<!DOCTYPE html>\n");
+            builder.append("<html><head><title>Directory: /%s</title></head>\n".printf(relative_path));
+            builder.append("<body><h1>Directory: /%s</h1><ul>\n".printf(relative_path));
+            
+            // Parent directory link (if not at root)
+            if (relative_path.length > 0 && relative_path != ".") {
+                builder.append("<li><a href=\"../\">../</a></li>\n");
+            }
+            
+            var enumerator = yield directory.enumerate_children_async(
+                "standard::name,standard::type",
+                FileQueryInfoFlags.NONE
+            );
+            
+            var entries = new Series<string>();
+            
+            List<FileInfo>? infos;
+            while ((infos = yield enumerator.next_files_async(1)) != null) {
+                foreach (var info in infos) {
+                    string name = info.get_name();
+                    FileType type = info.get_file_type();
+                    
+                    // Use relative links - just the filename, since the browser is already at the directory URL
+                    // This avoids path duplication issues
+                    string link = Uri.escape_string(name);
+                    if (type == FileType.DIRECTORY) {
+                        link += "/";
+                    }
+                    
+                    string display = name;
+                    if (type == FileType.DIRECTORY) {
+                        display += "/";
+                    }
+                    
+                    entries.add("<li><a href=\"%s\">%s</a></li>\n".printf(
+                        link,
+                        Markup.escape_text(display)
+                    ));
+                }
+            }
+            
+            // Sort entries alphabetically using insertion sort
+            var sorted = entries.to_array();
+            for (int i = 1; i < sorted.length; i++) {
+                string key = sorted[i];
+                int j = i - 1;
+                while (j >= 0 && strcmp(sorted[j], key) > 0) {
+                    sorted[j + 1] = sorted[j];
+                    j--;
+                }
+                sorted[j + 1] = key;
+            }
+            foreach (var entry in sorted) {
+                builder.append(entry);
+            }
+            
+            builder.append("</ul></body></html>");
+            
+            var result = new HttpStringResult(builder.str);
+            result.set_header("Content-Type", "text/html; charset=utf-8");
+            return result;
+        }
+        
+        private string get_relative_path(RouteInformation route_context) {
+            // For exact match, use the last component of the route
+            if (_match_type == MatchType.EXACT) {
+                return _route_components.length > 0 ? _route_components[_route_components.length - 1] : "";
+            }
+            
+            // For wildcard matches, get the remaining path from the named components
+            string? remaining = null;
+            route_context.named_components.try_get("**", out remaining);
+            if (remaining != null && remaining.length > 0) {
+                return remaining;
+            }
+            
+            // If no remaining path, serve from root of server_path
+            return ".";
+        }
+        
+        private string canonicalize_path(string path) {
+            // Resolve the path to get the real, absolute path
+            // This handles . and .. components as well as symlinks
+            var file = File.new_for_path(path);
+            return file.get_path() ?? path;
+        }
+        
+        private static string[] parse_route_components(string route) {
+            return Wrap.array<string>(route.split("/"))
+                .where(c => c.length > 0 && c != "*" && c != "**")
+                .to_array();
+        }
+        
+        private static MatchType determine_match_type(string route) {
+            if (route.has_suffix("/**")) {
+                return MatchType.DEEP;
+            } else if (route.has_suffix("/*")) {
+                return MatchType.SHALLOW;
+            } else {
+                return MatchType.EXACT;
+            }
+        }
+        
+        private static string extract_route_prefix(string route, MatchType match_type) {
+            switch (match_type) {
+                case MatchType.DEEP:
+                    return route.substring(0, route.length - 3);
+                case MatchType.SHALLOW:
+                    return route.substring(0, route.length - 2);
+                default:
+                    return route;
+            }
+        }
+        
+        private static string get_mime_type(string filename, string? detected_type) {
+            // Get extension
+            int dot_pos = filename.last_index_of_char('.');
+            if (dot_pos >= 0 && dot_pos < filename.length - 1) {
+                string ext = filename.substring(dot_pos + 1).down();
+                string? mime = get_mime_type_for_extension(ext);
+                if (mime != null) {
+                    return mime;
+                }
+            }
+            
+            // Fall back to detected type
+            if (detected_type != null) {
+                // Convert GLib content type to MIME type if needed
+                if (!detected_type.contains("/")) {
+                    var mime_type = ContentType.get_mime_type(detected_type);
+                    if (mime_type != null) {
+                        return mime_type;
+                    }
+                }
+                return detected_type;
+            }
+            
+            return "application/octet-stream";
+        }
+        
+        private static string? get_mime_type_for_extension(string ext) {
+            switch (ext) {
+                // Text
+                case "html":
+                case "htm":
+                    return "text/html";
+                case "css":
+                    return "text/css";
+                case "js":
+                    return "application/javascript";
+                case "json":
+                    return "application/json";
+                case "xml":
+                    return "application/xml";
+                case "txt":
+                    return "text/plain";
+                case "md":
+                    return "text/markdown";
+                case "csv":
+                    return "text/csv";
+                case "svg":
+                    return "image/svg+xml";
+                
+                // Images
+                case "png":
+                    return "image/png";
+                case "jpg":
+                case "jpeg":
+                    return "image/jpeg";
+                case "gif":
+                    return "image/gif";
+                case "ico":
+                    return "image/x-icon";
+                case "webp":
+                    return "image/webp";
+                case "bmp":
+                    return "image/bmp";
+                case "tiff":
+                case "tif":
+                    return "image/tiff";
+                
+                // Audio
+                case "mp3":
+                    return "audio/mpeg";
+                case "wav":
+                    return "audio/wav";
+                case "ogg":
+                    return "audio/ogg";
+                case "flac":
+                    return "audio/flac";
+                case "m4a":
+                    return "audio/mp4";
+                
+                // Video
+                case "mp4":
+                    return "video/mp4";
+                case "webm":
+                    return "video/webm";
+                case "avi":
+                    return "video/x-msvideo";
+                case "mov":
+                    return "video/quicktime";
+                case "mkv":
+                    return "video/x-matroska";
+                
+                // Fonts
+                case "woff":
+                    return "font/woff";
+                case "woff2":
+                    return "font/woff2";
+                case "ttf":
+                    return "font/ttf";
+                case "otf":
+                    return "font/otf";
+                case "eot":
+                    return "application/vnd.ms-fontobject";
+                
+                // Documents
+                case "pdf":
+                    return "application/pdf";
+                case "doc":
+                    return "application/msword";
+                case "docx":
+                    return "application/vnd.openxmlformats-officedocument.wordprocessingml.document";
+                case "xls":
+                    return "application/vnd.ms-excel";
+                case "xlsx":
+                    return "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet";
+                case "ppt":
+                    return "application/vnd.ms-powerpoint";
+                case "pptx":
+                    return "application/vnd.openxmlformats-officedocument.presentationml.presentation";
+                
+                // Archives
+                case "zip":
+                    return "application/zip";
+                case "gz":
+                    return "application/gzip";
+                case "tar":
+                    return "application/x-tar";
+                case "rar":
+                    return "application/vnd.rar";
+                case "7z":
+                    return "application/x-7z-compressed";
+                
+                // Other
+                case "wasm":
+                    return "application/wasm";
+                case "bin":
+                    return "application/octet-stream";
+                
+                default:
+                    return null;
+            }
+        }
+        
+        //  private static bool should_skip_compression(string mime_type) {
+        //      // Don't compress already-compressed formats
+        //      string type = mime_type.down();
+        //      return type.has_prefix("image/") && !type.contains("svg") ||
+        //             type.has_prefix("video/") ||
+        //             type.has_prefix("audio/") ||
+        //             type.contains("zip") ||
+        //             type.contains("gzip") ||
+        //             type.contains("x-rar") ||
+        //             type.contains("x-7z") ||
+        //             type.contains("pdf") ||
+        //             type.contains("wasm");
+        //  }
+    }
+    
+    private enum MatchType {
+        EXACT,    // No wildcard - exact file match
+        SHALLOW,  // /* - one level deep
+        DEEP      // /** - recursive
+    }
+    
+    public errordomain FilesystemResourceError {
+        PATH_NOT_FOUND,
+        INVALID_ROUTE
+    }
+
+}

+ 7 - 11
src/Server/Server.vala

@@ -240,17 +240,13 @@ namespace Astralis {
 
             // Create SocketAddress from native sockaddr structure
             // Use 128 as the maximum size for sockaddr (IPv6)
-            try {
-                var sock_addr = SocketAddress.from_native((void*)addr_ptr, 128);
-                
-                // Cast to InetSocketAddress to get the address
-                var inet_addr = (InetSocketAddress) sock_addr;
-                
-                // Get the InetAddress and convert to string
-                return inet_addr.address.to_string();
-            } catch (Error e) {
-                return null;
-            }
+            var sock_addr = SocketAddress.from_native((void*)addr_ptr, 128);
+            
+            // Cast to InetSocketAddress to get the address
+            var inet_addr = (InetSocketAddress) sock_addr;
+            
+            // Get the InetAddress and convert to string
+            return inet_addr.address.to_string();
         }
     }
 }

+ 9 - 3
src/meson.build

@@ -7,7 +7,13 @@ sources = files(
     'Core/Pipeline.vala',
     'Data/FormDataParser.vala',
     'Components/EndpointRouter.vala',
-    'Components/Compression.vala',
+    'Components/Compressor.vala',
+    'Components/GzipCompressor.vala',
+    'Components/BrotliCompressor.vala',
+    'Components/ZstdCompressor.vala',
+    'Endpoints/Endpoint.vala',
+    'Endpoints/FilesystemResource.vala',
+    'Endpoints/FastResource.vala',
     'Server/Server.vala',
     'Server/RequestContext.vala',
     'Server/ResponseContext.vala',
@@ -17,12 +23,12 @@ sources = files(
 
 libastralis = shared_library('astralis',
     sources,
-    dependencies: [glib_dep, gobject_dep, mhd_dep, gio_dep, gio_unix_dep, invercargill_dep, invercargill_json_dep, json_glib_dep],
+    dependencies: [glib_dep, gobject_dep, mhd_dep, gio_dep, gio_unix_dep, invercargill_dep, invercargill_json_dep, json_glib_dep, zlib_dep, brotli_dep, zstd_dep],
     install: true
 )
 
 astralis_dep = declare_dependency(
     link_with: libastralis,
     include_directories: include_directories('.'),
-    dependencies: [glib_dep, gobject_dep, invercargill_dep, invercargill_json_dep, mhd_dep, json_glib_dep] # Users of astralis need glib, gobject, invercargill and mhd
+    dependencies: [glib_dep, gobject_dep, invercargill_dep, invercargill_json_dep, mhd_dep, json_glib_dep, brotli_dep, zstd_dep] # Users of astralis need glib, gobject, invercargill, mhd, brotli, and zstd
 )

+ 135 - 0
vapi/libbrotlienc.vapi

@@ -0,0 +1,135 @@
+/* libbrotlienc Vala bindings
+ * 
+ * Bindings for the Brotli compression library (encoder only)
+ * Based on brotli encode.h and types.h
+ */
+
+[CCode (cprefix = "BROTLI_", lower_case_cprefix = "brotli_", cheader_filename = "brotli/encode.h,brotli/types.h")]
+namespace Brotli {
+
+    /* Boolean type */
+    [CCode (cname = "BROTLI_BOOL", has_type_id = false, default_value = "BROTLI_FALSE")]
+    public struct Bool : int {
+    }
+    
+    [CCode (cname = "BROTLI_TRUE")]
+    public const Bool TRUE;
+    [CCode (cname = "BROTLI_FALSE")]
+    public const Bool FALSE;
+    
+    [CCode (cname = "TO_BROTLI_BOOL")]
+    public Bool to_bool(int x);
+
+    /* Memory allocation callbacks */
+    [CCode (cname = "brotli_alloc_func", has_target = false)]
+    public delegate void* AllocFunc(void* opaque, size_t size);
+    
+    [CCode (cname = "brotli_free_func", has_target = false)]
+    public delegate void FreeFunc(void* opaque, void* address);
+
+    /* Constants from encode.h */
+    [CCode (cname = "BROTLI_MIN_WINDOW_BITS")]
+    public const int MIN_WINDOW_BITS;
+    [CCode (cname = "BROTLI_MAX_WINDOW_BITS")]
+    public const int MAX_WINDOW_BITS;
+    [CCode (cname = "BROTLI_LARGE_MAX_WINDOW_BITS")]
+    public const int LARGE_MAX_WINDOW_BITS;
+    [CCode (cname = "BROTLI_MIN_INPUT_BLOCK_BITS")]
+    public const int MIN_INPUT_BLOCK_BITS;
+    [CCode (cname = "BROTLI_MAX_INPUT_BLOCK_BITS")]
+    public const int MAX_INPUT_BLOCK_BITS;
+    [CCode (cname = "BROTLI_MIN_QUALITY")]
+    public const int MIN_QUALITY;
+    [CCode (cname = "BROTLI_MAX_QUALITY")]
+    public const int MAX_QUALITY;
+    [CCode (cname = "BROTLI_DEFAULT_QUALITY")]
+    public const int DEFAULT_QUALITY;
+    [CCode (cname = "BROTLI_DEFAULT_WINDOW")]
+    public const int DEFAULT_WINDOW;
+
+    /* Encoder mode */
+    [CCode (cname = "BrotliEncoderMode", has_type_id = false)]
+    public enum EncoderMode {
+        [CCode (cname = "BROTLI_MODE_GENERIC")]
+        GENERIC,
+        [CCode (cname = "BROTLI_MODE_TEXT")]
+        TEXT,
+        [CCode (cname = "BROTLI_MODE_FONT")]
+        FONT
+    }
+
+    /* Encoder operation for streaming */
+    [CCode (cname = "BrotliEncoderOperation", has_type_id = false)]
+    public enum EncoderOperation {
+        [CCode (cname = "BROTLI_OPERATION_PROCESS")]
+        PROCESS,
+        [CCode (cname = "BROTLI_OPERATION_FLUSH")]
+        FLUSH,
+        [CCode (cname = "BROTLI_OPERATION_FINISH")]
+        FINISH,
+        [CCode (cname = "BROTLI_OPERATION_EMIT_METADATA")]
+        EMIT_METADATA
+    }
+
+    /* Encoder parameters */
+    [CCode (cname = "BrotliEncoderParameter", has_type_id = false)]
+    public enum EncoderParameter {
+        [CCode (cname = "BROTLI_PARAM_MODE")]
+        MODE,
+        [CCode (cname = "BROTLI_PARAM_QUALITY")]
+        QUALITY,
+        [CCode (cname = "BROTLI_PARAM_LGWIN")]
+        LGWIN,
+        [CCode (cname = "BROTLI_PARAM_LGBLOCK")]
+        LGBLOCK,
+        [CCode (cname = "BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING")]
+        DISABLE_LITERAL_CONTEXT_MODELING,
+        [CCode (cname = "BROTLI_PARAM_SIZE_HINT")]
+        SIZE_HINT,
+        [CCode (cname = "BROTLI_PARAM_LARGE_WINDOW")]
+        LARGE_WINDOW,
+        [CCode (cname = "BROTLI_PARAM_NPOSTFIX")]
+        NPOSTFIX,
+        [CCode (cname = "BROTLI_PARAM_NDIRECT")]
+        NDIRECT,
+        [CCode (cname = "BROTLI_PARAM_STREAM_OFFSET")]
+        STREAM_OFFSET
+    }
+
+    /* Encoder state - opaque structure */
+    [CCode (cname = "BrotliEncoderState", free_function = "BrotliEncoderDestroyInstance")]
+    [Compact]
+    public class EncoderState {
+        [CCode (cname = "BrotliEncoderCreateInstance")]
+        public EncoderState(AllocFunc? alloc_func, FreeFunc? free_func, void* opaque);
+        
+        [CCode (cname = "BrotliEncoderSetParameter")]
+        public Bool set_parameter(EncoderParameter param, uint32 value);
+        
+        [CCode (cname = "BrotliEncoderCompressStream")]
+        public Bool compress_stream(EncoderOperation op, ref size_t available_in, 
+            ref uint8* next_in, ref size_t available_out, ref uint8* next_out, 
+            out size_t total_out);
+        
+        [CCode (cname = "BrotliEncoderIsFinished")]
+        public Bool is_finished();
+        
+        [CCode (cname = "BrotliEncoderHasMoreOutput")]
+        public Bool has_more_output();
+        
+        [CCode (cname = "BrotliEncoderTakeOutput")]
+        public unowned uint8* take_output(ref size_t size);
+    }
+
+    /* Encoder one-shot functions */
+    [CCode (cname = "BrotliEncoderMaxCompressedSize")]
+    public size_t encoder_max_compressed_size(size_t input_size);
+    
+    [CCode (cname = "BrotliEncoderCompress")]
+    public Bool encoder_compress(int quality, int lgwin, EncoderMode mode, 
+        size_t input_size, uint8* input_buffer, 
+        ref size_t encoded_size, uint8* encoded_buffer);
+    
+    [CCode (cname = "BrotliEncoderVersion")]
+    public uint32 encoder_version();
+}

+ 286 - 0
vapi/libzstd.vapi

@@ -0,0 +1,286 @@
+/* libzstd.vapi - Vala bindings for Zstandard compression library
+ * 
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * Vala bindings generated for use with the Astralis framework.
+ * 
+ * Based on zstd.h from libzstd 1.5.7
+ */
+
+[CCode (cprefix = "ZSTD_", lower_case_cprefix = "ZSTD_", cheader_filename = "zstd.h,zstd_errors.h")]
+namespace Zstd {
+    
+    /* Version */
+    [CCode (cname = "ZSTD_VERSION_MAJOR")]
+    public const int VERSION_MAJOR;
+    [CCode (cname = "ZSTD_VERSION_MINOR")]
+    public const int VERSION_MINOR;
+    [CCode (cname = "ZSTD_VERSION_RELEASE")]
+    public const int VERSION_RELEASE;
+    [CCode (cname = "ZSTD_VERSION_NUMBER")]
+    public const int VERSION_NUMBER;
+    
+    [CCode (cname = "ZSTD_versionNumber")]
+    public uint version_number();
+    [CCode (cname = "ZSTD_versionString")]
+    public unowned string version_string();
+    
+    /* Default compression level */
+    [CCode (cname = "ZSTD_CLEVEL_DEFAULT")]
+    public const int CLEVEL_DEFAULT;
+    
+    /* Block size constants */
+    [CCode (cname = "ZSTD_BLOCKSIZELOG_MAX")]
+    public const int BLOCKSIZELOG_MAX;
+    [CCode (cname = "ZSTD_BLOCKSIZE_MAX")]
+    public const int BLOCKSIZE_MAX;
+    
+    /* Content size constants */
+    [CCode (cname = "ZSTD_CONTENTSIZE_UNKNOWN")]
+    public const uint64 CONTENTSIZE_UNKNOWN;
+    [CCode (cname = "ZSTD_CONTENTSIZE_ERROR")]
+    public const uint64 CONTENTSIZE_ERROR;
+    
+    /* Compression strategies */
+    [CCode (cname = "ZSTD_strategy", cprefix = "ZSTD_", has_type_id = false)]
+    public enum Strategy {
+        fast = 1,
+        dfast = 2,
+        greedy = 3,
+        lazy = 4,
+        lazy2 = 5,
+        btlazy2 = 6,
+        btopt = 7,
+        btultra = 8,
+        btultra2 = 9
+    }
+    
+    /* End directive for streaming */
+    [CCode (cname = "ZSTD_EndDirective", cprefix = "ZSTD_e_", has_type_id = false)]
+    public enum EndDirective {
+        continue = 0,
+        flush = 1,
+        end = 2
+    }
+    
+    /* Reset directive */
+    [CCode (cname = "ZSTD_ResetDirective", cprefix = "ZSTD_reset_", has_type_id = false)]
+    public enum ResetDirective {
+        session_only = 1,
+        parameters = 2,
+        session_and_parameters = 3
+    }
+    
+    /* Error codes */
+    [CCode (cname = "ZSTD_ErrorCode", cprefix = "ZSTD_error_", has_type_id = false)]
+    public enum ErrorCode {
+        no_error = 0,
+        GENERIC = 1,
+        prefix_unknown = 10,
+        version_unsupported = 12,
+        frameParameter_unsupported = 14,
+        frameParameter_windowTooLarge = 16,
+        corruption_detected = 20,
+        checksum_wrong = 22,
+        literals_headerWrong = 24,
+        dictionary_corrupted = 30,
+        dictionary_wrong = 32,
+        dictionaryCreation_failed = 34,
+        parameter_unsupported = 40,
+        parameter_combination_unsupported = 41,
+        parameter_outOfBound = 42,
+        tableLog_tooLarge = 44,
+        maxSymbolValue_tooLarge = 46,
+        maxSymbolValue_tooSmall = 48,
+        cannotProduce_uncompressedBlock = 49,
+        stabilityCondition_notRespected = 50,
+        stage_wrong = 60,
+        init_missing = 62,
+        memory_allocation = 64,
+        workSpace_tooSmall = 66,
+        dstSize_tooSmall = 70,
+        srcSize_wrong = 72,
+        dstBuffer_null = 74,
+        noForwardProgress_destFull = 80,
+        noForwardProgress_inputEmpty = 82,
+        frameIndex_tooLarge = 100,
+        seekableIO = 102,
+        dstBuffer_wrong = 104,
+        srcBuffer_wrong = 105,
+        sequenceProducer_failed = 106,
+        externalSequences_invalid = 107,
+        maxCode = 120
+    }
+    
+    /* Bounds structure */
+    [CCode (cname = "ZSTD_bounds", has_type_id = false)]
+    public struct Bounds {
+        public size_t error;
+        public int lowerBound;
+        public int upperBound;
+    }
+    
+    /* Input buffer for streaming */
+    [CCode (cname = "ZSTD_inBuffer", has_type_id = false)]
+    public struct InBuffer {
+        public uint8* src;
+        public size_t size;
+        public size_t pos;
+    }
+    
+    /* Output buffer for streaming */
+    [CCode (cname = "ZSTD_outBuffer", has_type_id = false)]
+    public struct OutBuffer {
+        public uint8* dst;
+        public size_t size;
+        public size_t pos;
+    }
+    
+    /* Compression context */
+    [CCode (cname = "ZSTD_CCtx", free_function = "ZSTD_freeCCtx", has_type_id = false)]
+    [Compact]
+    public class CCtx {
+        [CCode (cname = "ZSTD_createCCtx")]
+        public CCtx();
+        
+        [CCode (cname = "ZSTD_compressCCtx")]
+        public size_t compress(uint8* dst, size_t dstCapacity, uint8* src, size_t srcSize, int compressionLevel);
+        
+        [CCode (cname = "ZSTD_compress2")]
+        public size_t compress2(uint8* dst, size_t dstCapacity, uint8* src, size_t srcSize);
+        
+        [CCode (cname = "ZSTD_CCtx_setParameter")]
+        public size_t set_parameter(CParameter param, int value);
+        
+        [CCode (cname = "ZSTD_CCtx_setPledgedSrcSize")]
+        public size_t set_pledged_src_size(uint64 pledgedSrcSize);
+        
+        [CCode (cname = "ZSTD_CCtx_reset")]
+        public size_t reset(ResetDirective reset);
+        
+        [CCode (cname = "ZSTD_compressStream2")]
+        public size_t compress_stream2(OutBuffer* output, InBuffer* input, EndDirective endOp);
+        
+        [CCode (cname = "ZSTD_initCStream")]
+        public size_t init_stream(int compressionLevel);
+        
+        [CCode (cname = "ZSTD_compressStream")]
+        public size_t compress_stream(OutBuffer* output, InBuffer* input);
+        
+        [CCode (cname = "ZSTD_flushStream")]
+        public size_t flush_stream(OutBuffer* output);
+        
+        [CCode (cname = "ZSTD_endStream")]
+        public size_t end_stream(OutBuffer* output);
+    }
+    
+    /* Decompression context */
+    [CCode (cname = "ZSTD_DCtx", free_function = "ZSTD_freeDCtx", has_type_id = false)]
+    [Compact]
+    public class DCtx {
+        [CCode (cname = "ZSTD_createDCtx")]
+        public DCtx();
+        
+        [CCode (cname = "ZSTD_decompressDCtx")]
+        public size_t decompress(uint8* dst, size_t dstCapacity, uint8* src, size_t srcSize);
+        
+        [CCode (cname = "ZSTD_DCtx_setParameter")]
+        public size_t set_parameter(DParameter param, int value);
+        
+        [CCode (cname = "ZSTD_DCtx_reset")]
+        public size_t reset(ResetDirective reset);
+    }
+    
+    /* Compression stream (alias for CCtx) */
+    [CCode (cname = "ZSTD_CStream", free_function = "ZSTD_freeCStream", has_type_id = false)]
+    [Compact]
+    public class CStream {
+        [CCode (cname = "ZSTD_createCStream")]
+        public CStream();
+    }
+    
+    /* Compression parameters */
+    [CCode (cname = "ZSTD_cParameter", cprefix = "ZSTD_c_", has_type_id = false)]
+    public enum CParameter {
+        compressionLevel = 100,
+        windowLog = 101,
+        hashLog = 102,
+        chainLog = 103,
+        searchLog = 104,
+        minMatch = 105,
+        targetLength = 106,
+        strategy = 107,
+        targetCBlockSize = 130,
+        enableLongDistanceMatching = 160,
+        ldmHashLog = 161,
+        ldmMinMatch = 162,
+        ldmBucketSizeLog = 163,
+        ldmHashRateLog = 164,
+        contentSizeFlag = 200,
+        checksumFlag = 201,
+        dictIDFlag = 202,
+        nbWorkers = 400,
+        jobSize = 401,
+        overlapLog = 402
+    }
+    
+    /* Decompression parameters */
+    [CCode (cname = "ZSTD_dParameter", cprefix = "ZSTD_d_", has_type_id = false)]
+    public enum DParameter {
+        windowLogMax = 100
+    }
+    
+    /* Simple API - One-shot compression */
+    [CCode (cname = "ZSTD_compress")]
+    public size_t compress(uint8* dst, size_t dstCapacity, uint8* src, size_t srcSize, int compressionLevel);
+    
+    /* Simple API - One-shot decompression */
+    [CCode (cname = "ZSTD_decompress")]
+    public size_t decompress(uint8* dst, size_t dstCapacity, uint8* src, size_t compressedSize);
+    
+    /* Helper functions */
+    [CCode (cname = "ZSTD_compressBound")]
+    public size_t compress_bound(size_t srcSize);
+    
+    [CCode (cname = "ZSTD_isError")]
+    public uint is_error(size_t result);
+    
+    [CCode (cname = "ZSTD_getErrorCode")]
+    public ErrorCode get_error_code(size_t functionResult);
+    
+    [CCode (cname = "ZSTD_getErrorName")]
+    public unowned string get_error_name(size_t result);
+    
+    [CCode (cname = "ZSTD_minCLevel")]
+    public int min_c_level();
+    
+    [CCode (cname = "ZSTD_maxCLevel")]
+    public int max_c_level();
+    
+    [CCode (cname = "ZSTD_defaultCLevel")]
+    public int default_c_level();
+    
+    /* Content size functions */
+    [CCode (cname = "ZSTD_getFrameContentSize")]
+    public uint64 get_frame_content_size(uint8* src, size_t srcSize);
+    
+    [CCode (cname = "ZSTD_findFrameCompressedSize")]
+    public size_t find_frame_compressed_size(uint8* src, size_t srcSize);
+    
+    /* Streaming buffer size recommendations */
+    [CCode (cname = "ZSTD_CStreamInSize")]
+    public size_t cstream_in_size();
+    
+    [CCode (cname = "ZSTD_CStreamOutSize")]
+    public size_t cstream_out_size();
+    
+    /* Error string from error code */
+    [CCode (cname = "ZSTD_getErrorString")]
+    public unowned string get_error_string(ErrorCode code);
+    
+    /* Parameter bounds */
+    [CCode (cname = "ZSTD_cParam_getBounds")]
+    public Bounds cparam_get_bounds(CParameter cParam);
+    
+    [CCode (cname = "ZSTD_dParam_getBounds")]
+    public Bounds dparam_get_bounds(DParameter dParam);
+}