diff --git a/examples/can-sync/src/can_client.rs b/examples/can-sync/src/can_client.rs index 6f71ac9..9e634a6 100644 --- a/examples/can-sync/src/can_client.rs +++ b/examples/can-sync/src/can_client.rs @@ -27,6 +27,7 @@ pub struct CanSyncClient { } impl CanSyncClient { + /// Create a new client pointed at the given CAN service URL, authenticated with the sync API key. pub fn new(base_url: &str, sync_key: &str) -> Self { Self { http: reqwest::Client::new(), @@ -182,7 +183,8 @@ impl CanSyncClient { rx } - /// Internal: connect to SSE and forward events until the stream ends or errors. + // Connect to the SSE endpoint and forward parsed events to the channel + // until the stream ends or an error occurs. async fn run_sse_stream( http: &reqwest::Client, url: &str, diff --git a/examples/can-sync/src/config.rs b/examples/can-sync/src/config.rs index 8c00932..65bd283 100644 --- a/examples/can-sync/src/config.rs +++ b/examples/can-sync/src/config.rs @@ -1,6 +1,7 @@ use serde::Deserialize; use std::path::Path; +/// All settings needed to run the sync agent, loaded from a YAML file. #[derive(Debug, Clone, Deserialize)] pub struct SyncConfig { /// Base URL of the local CAN service (e.g. "http://127.0.0.1:3210") @@ -32,6 +33,7 @@ fn default_poll_interval() -> u64 { } impl SyncConfig { + /// Read a YAML config file from disk and parse it into a SyncConfig. pub fn load(path: &Path) -> anyhow::Result { let contents = std::fs::read_to_string(path)?; let config: Self = serde_yaml::from_str(&contents)?; diff --git a/examples/can-sync/src/discovery.rs b/examples/can-sync/src/discovery.rs index b17d6de..f19b180 100644 --- a/examples/can-sync/src/discovery.rs +++ b/examples/can-sync/src/discovery.rs @@ -27,6 +27,7 @@ pub struct Discovery { } impl Discovery { + /// Create a new Discovery that listens on a gossip topic derived from the shared passphrase. pub fn new(endpoint: Endpoint, gossip: Gossip, passphrase: &str) -> Self { let topic = derive_topic(passphrase); info!("Gossip topic: {}", hex::encode(topic.as_bytes())); diff --git a/examples/can-sync/src/main.rs b/examples/can-sync/src/main.rs index c7a4287..1fc84d4 100644 --- a/examples/can-sync/src/main.rs +++ b/examples/can-sync/src/main.rs @@ -30,6 +30,8 @@ use crate::rendezvous::Rendezvous; /// ALPN protocol identifier for CAN sync peer connections. const SYNC_ALPN: &[u8] = b"can-sync/1"; +/// Entry point: loads config, connects to the local CAN service, sets up +/// encrypted P2P networking (iroh), and discovers + syncs with peers. #[tokio::main] async fn main() -> Result<()> { // Initialize logging diff --git a/examples/can-sync/src/rendezvous.rs b/examples/can-sync/src/rendezvous.rs index 44687ab..c2a329f 100644 --- a/examples/can-sync/src/rendezvous.rs +++ b/examples/can-sync/src/rendezvous.rs @@ -40,6 +40,7 @@ pub struct Rendezvous { } impl Rendezvous { + /// Create a new Rendezvous by deriving keypairs for all slots from the passphrase. pub fn new(passphrase: &str, our_id: EndpointId) -> Result { let slots: Vec = (0..NUM_SLOTS) .map(|i| derive_slot_keypair(passphrase, i)) @@ -91,6 +92,7 @@ impl Rendezvous { } } + // Read every slot and report any newly discovered peer IDs. async fn scan_all_slots( &self, known_peers: &mut HashSet, @@ -111,6 +113,8 @@ impl Rendezvous { } } + // Pick an available slot for this peer: reuse our old slot, take an empty one, + // or fall back to a deterministic slot based on our ID. async fn claim_slot(&self, our_id_hex: &str) -> usize { // Check if we already own a slot (from a previous run) for i in 0..NUM_SLOTS { @@ -143,6 +147,7 @@ impl Rendezvous { slot } + // Write our EndpointId into the given slot's DNS TXT record via the pkarr relay. async fn publish_slot(&self, slot: usize, our_id_hex: &str) -> Result<()> { let keypair = &self.slots[slot]; let packet = SignedPacket::builder() @@ -163,6 +168,7 @@ impl Rendezvous { Ok(()) } + // Look up a slot's DNS TXT record and parse the EndpointId stored there, if any. async fn read_slot(&self, slot: usize) -> Option { let public_key = self.slots[slot].public_key(); let packet = self.client.resolve(&public_key).await?; diff --git a/examples/canfs/src/api.rs b/examples/canfs/src/api.rs index 87b45d3..d574145 100644 --- a/examples/canfs/src/api.rs +++ b/examples/canfs/src/api.rs @@ -49,6 +49,7 @@ pub struct CanClient { } impl CanClient { + /// Create a new client pointed at the given CAN service base URL. pub fn new(base_url: &str) -> Self { Self { client: reqwest::blocking::Client::new(), diff --git a/examples/canfs/src/fs.rs b/examples/canfs/src/fs.rs index 3853d78..742866c 100644 --- a/examples/canfs/src/fs.rs +++ b/examples/canfs/src/fs.rs @@ -26,6 +26,7 @@ const FILE_ATTRIBUTE_DIRECTORY: u32 = 0x10; const FILE_ATTRIBUTE_READONLY: u32 = 0x01; const FILE_ATTRIBUTE_ARCHIVE: u32 = 0x20; +// Wrap a raw NTSTATUS error code into WinFSP's error type. fn ntstatus(code: i32) -> FspError { FspError::NTSTATUS(code) } @@ -54,6 +55,7 @@ pub struct CanFileContext { impl FileSystemContext for CanFs { type FileContext = CanFileContext; + /// Called by Windows to check if a file/folder exists and get its basic attributes before opening it. fn get_security_by_name( &self, file_name: &U16CStr, @@ -83,6 +85,7 @@ impl FileSystemContext for CanFs { }) } + /// Called when a file or directory is opened; returns a context handle and fills in size/timestamps. fn open( &self, file_name: &U16CStr, @@ -142,8 +145,10 @@ impl FileSystemContext for CanFs { }) } + /// Called when a handle is closed; nothing to clean up since content is dropped automatically. fn close(&self, _context: Self::FileContext) {} + /// Returns up-to-date size and attribute info for an already-opened file or directory. fn get_file_info( &self, context: &Self::FileContext, @@ -194,6 +199,7 @@ impl FileSystemContext for CanFs { Ok(()) } + /// Reads file bytes at the given offset; downloads the asset from the CAN service on first access. fn read( &self, context: &Self::FileContext, @@ -233,6 +239,7 @@ impl FileSystemContext for CanFs { Ok(count as u32) } + /// Lists the contents of a directory, including "." and ".." entries, for Windows Explorer and dir commands. fn read_directory( &self, context: &Self::FileContext, @@ -308,6 +315,7 @@ impl FileSystemContext for CanFs { Ok(context.dir_buffer.read(marker, buffer)) } + /// Reports the virtual drive's total and free space (shows as a 1 GB read-only volume). fn get_volume_info(&self, out_volume_info: &mut VolumeInfo) -> winfsp::Result<()> { out_volume_info.total_size = 1024 * 1024 * 1024; // 1 GB out_volume_info.free_size = 0; diff --git a/examples/canfs/src/main.rs b/examples/canfs/src/main.rs index b552a2a..7e71cb8 100644 --- a/examples/canfs/src/main.rs +++ b/examples/canfs/src/main.rs @@ -17,6 +17,7 @@ use crate::api::CanClient; use crate::fs::{CacheState, CanFs}; use crate::tree::VirtualTree; +/// Command-line arguments for mounting CAN service assets as a virtual Windows drive using WinFSP. #[derive(Parser)] #[command(name = "canfs", about = "Mount CAN service assets as a virtual drive")] struct Args { @@ -33,6 +34,7 @@ struct Args { refresh_secs: u64, } +/// Entry point: connects to the CAN service, builds a virtual file tree, and mounts it as a read-only Windows drive. fn main() { tracing_subscriber::fmt() .with_env_filter( diff --git a/examples/canfs/src/tree.rs b/examples/canfs/src/tree.rs index d8698f6..6c2e394 100644 --- a/examples/canfs/src/tree.rs +++ b/examples/canfs/src/tree.rs @@ -131,6 +131,7 @@ struct TreeBuilder { } impl TreeBuilder { + // Create a new tree builder with an empty root directory node. fn new() -> Self { let root = VNode { name: String::new(), diff --git a/examples/filemanager/src/main.rs b/examples/filemanager/src/main.rs index cbdb6b6..82f4f84 100644 --- a/examples/filemanager/src/main.rs +++ b/examples/filemanager/src/main.rs @@ -9,11 +9,13 @@ use std::collections::HashMap; const CAN_API: &str = "http://127.0.0.1:3210/api/v1/can/0"; +// Shared state passed to every request handler; holds a reusable HTTP client. #[derive(Clone)] struct AppState { client: reqwest::Client, } +/// Web-based file manager UI that proxies requests to the CAN service API. #[tokio::main] async fn main() { tracing_subscriber::fmt() @@ -47,6 +49,7 @@ async fn main() { axum::serve(listener, app).await.unwrap(); } +// Return the single-page HTML UI for the file manager. async fn serve_index() -> Html<&'static str> { Html(html::INDEX_HTML) } @@ -86,6 +89,7 @@ fn build_qs(params: &HashMap) -> String { format!("?{}", qs.join("&")) } +// Percent-encode a string for use in URL query parameters. fn urlencoding(s: &str) -> String { s.chars() .map(|c| match c { diff --git a/examples/paste/src/main.rs b/examples/paste/src/main.rs index ace4f44..9b56a33 100644 --- a/examples/paste/src/main.rs +++ b/examples/paste/src/main.rs @@ -12,11 +12,13 @@ use std::net::SocketAddr; const CAN_API: &str = "http://127.0.0.1:3210/api/v1/can/0"; +/// Shared HTTP client for talking to the CAN service backend. #[derive(Clone)] struct AppState { client: reqwest::Client, } +/// JSON body for the text paste endpoint. #[derive(Deserialize)] struct PasteTextRequest { text: String, @@ -62,6 +64,7 @@ async fn forward(resp: Result) -> Response { // ── Handlers ───────────────────────────────────────────────────────────── +/// Serve the single-page HTML frontend. async fn serve_index() -> Html<&'static str> { Html(html::INDEX_HTML) } @@ -227,7 +230,8 @@ async fn proxy_thumb( forward(resp).await } -/// Proxy SSE events from CAN service so the frontend gets live updates. +/// Proxy SSE (Server-Sent Events) from the CAN service to the browser so +/// the frontend auto-refreshes when new pastes arrive. async fn paste_events( State(state): State, ) -> Sse>> { @@ -289,6 +293,7 @@ async fn paste_events( // ── Main ───────────────────────────────────────────────────────────────── +/// Start the Paste web app: a simple pastebin that stores text and images in CAN service. #[tokio::main] async fn main() { tracing_subscriber::fmt() diff --git a/src/config.rs b/src/config.rs index 3a08322..e39dc4e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,6 +1,7 @@ use serde::Deserialize; use std::path::{Path, PathBuf}; +/// Application settings loaded from config.yaml at startup. #[derive(Debug, Clone, Deserialize)] pub struct Config { pub storage_root: PathBuf, @@ -18,6 +19,7 @@ pub struct Config { pub sync_api_key: Option, } +// Default values used when a field is missing from config.yaml. fn default_admin_token() -> String { "changeme".to_string() } @@ -32,24 +34,29 @@ fn default_verify_interval() -> u64 { } impl Config { + /// Read and parse the YAML config file from disk. pub fn load(path: &Path) -> anyhow::Result { let contents = std::fs::read_to_string(path)?; let config: Config = serde_yaml::from_str(&contents)?; Ok(config) } + /// Returns the path to the SQLite database file inside storage_root. pub fn db_path(&self) -> PathBuf { self.storage_root.join(".can.db") } + /// Returns the path to the trash folder for soft-deleted files. pub fn trash_dir(&self) -> PathBuf { self.storage_root.join(".trash") } + /// Returns the path to the cached thumbnail images folder. pub fn thumbs_dir(&self) -> PathBuf { self.storage_root.join(".thumbs") } + /// Create the storage, trash, and thumbnail directories if they don't exist yet. pub fn ensure_dirs(&self) -> anyhow::Result<()> { std::fs::create_dir_all(&self.storage_root)?; std::fs::create_dir_all(self.trash_dir())?; diff --git a/src/db.rs b/src/db.rs index 0c6542d..e787054 100644 --- a/src/db.rs +++ b/src/db.rs @@ -4,8 +4,11 @@ use std::sync::{Arc, Mutex}; use crate::models::{Asset, AssetMeta, ListParams, SearchParams}; +/// Thread-safe handle to the SQLite database (wrapped in Arc so multiple +/// threads can share it safely). pub type Db = Arc>; +/// Open (or create) the SQLite database file and set up tables. pub fn open(path: &Path) -> anyhow::Result { let conn = Connection::open(path)?; conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")?; @@ -13,6 +16,7 @@ pub fn open(path: &Path) -> anyhow::Result { Ok(Arc::new(Mutex::new(conn))) } +/// Open a temporary in-memory database (used for tests). pub fn open_in_memory() -> anyhow::Result { let conn = Connection::open_in_memory()?; conn.execute_batch("PRAGMA foreign_keys=ON;")?; @@ -20,6 +24,8 @@ pub fn open_in_memory() -> anyhow::Result { Ok(Arc::new(Mutex::new(conn))) } +/// Create the assets, tags, and asset_tags tables if they don't already exist, +/// and run any pending migrations. fn init_schema(conn: &Connection) -> rusqlite::Result<()> { conn.execute_batch( " @@ -66,7 +72,7 @@ fn init_schema(conn: &Connection) -> rusqlite::Result<()> { Ok(()) } -/// Insert a new asset. Returns the row id. +/// Save a new asset record to the database. Returns the auto-generated row id. pub fn insert_asset(conn: &Connection, asset: &Asset) -> rusqlite::Result { conn.execute( "INSERT INTO assets (timestamp, hash, mime_type, application, user_identity, description, actual_filename, human_filename, human_path, size) @@ -87,7 +93,7 @@ pub fn insert_asset(conn: &Connection, asset: &Asset) -> rusqlite::Result { Ok(conn.last_insert_rowid()) } -/// Look up an asset by its hash. +/// Find an asset by its unique SHA-256 hash. Returns None if not found. pub fn get_asset_by_hash(conn: &Connection, hash: &str) -> rusqlite::Result> { conn.query_row( "SELECT id, timestamp, hash, mime_type, application, user_identity, description, @@ -115,7 +121,7 @@ pub fn get_asset_by_hash(conn: &Connection, hash: &str) -> rusqlite::Result rusqlite::Result> { let mut stmt = conn.prepare( "SELECT t.name FROM tags t @@ -127,7 +133,7 @@ pub fn get_asset_tags(conn: &Connection, asset_id: i64) -> rusqlite::Result rusqlite::Result { conn.execute( "INSERT OR IGNORE INTO tags (name) VALUES (?1)", @@ -138,7 +144,7 @@ pub fn upsert_tag(conn: &Connection, name: &str) -> rusqlite::Result { }) } -/// Replace all tags for an asset within a transaction. +/// Remove all existing tags for an asset and assign the new ones. pub fn set_asset_tags(conn: &Connection, asset_id: i64, tags: &[String]) -> rusqlite::Result<()> { conn.execute( "DELETE FROM asset_tags WHERE asset_id = ?1", @@ -154,7 +160,8 @@ pub fn set_asset_tags(conn: &Connection, asset_id: i64, tags: &[String]) -> rusq Ok(()) } -/// Build an AssetMeta from an Asset row + tags. +/// Convert an internal Asset database row into the API-friendly AssetMeta format +/// (includes tags fetched from the join table). pub fn asset_to_meta(conn: &Connection, asset: &Asset) -> rusqlite::Result { let tags = get_asset_tags(conn, asset.id)?; Ok(AssetMeta { @@ -173,7 +180,7 @@ pub fn asset_to_meta(conn: &Connection, asset: &Asset) -> rusqlite::Result rusqlite::Result<()> { conn.execute( "UPDATE assets SET is_corrupted = ?1 WHERE hash = ?2", @@ -204,7 +211,8 @@ pub fn flag_corrupted(conn: &Connection, hash: &str, corrupted: bool) -> rusqlit Ok(()) } -/// Update file size for an asset (used by verifier to backfill). +/// Store the file size in bytes for an asset (used by the verifier to fill in +/// sizes for assets that were created before the size column existed). pub fn update_asset_size(conn: &Connection, hash: &str, size: i64) -> rusqlite::Result<()> { conn.execute( "UPDATE assets SET size = ?1 WHERE hash = ?2", @@ -213,7 +221,7 @@ pub fn update_asset_size(conn: &Connection, hash: &str, size: i64) -> rusqlite:: Ok(()) } -/// Soft-delete: mark as trashed. +/// Soft-delete an asset by marking it as trashed (the file is moved to .trash/). pub fn trash_asset(conn: &Connection, hash: &str) -> rusqlite::Result<()> { conn.execute( "UPDATE assets SET is_trashed = 1 WHERE hash = ?1", @@ -222,7 +230,8 @@ pub fn trash_asset(conn: &Connection, hash: &str) -> rusqlite::Result<()> { Ok(()) } -/// List assets with pagination and filtering. +/// Fetch a page of assets with optional filters (application, trashed, etc.). +/// Returns the matching assets and the total count for pagination. pub fn list_assets(conn: &Connection, params: &ListParams) -> rusqlite::Result<(Vec, i64)> { let limit = params.limit.unwrap_or(50); let offset = params.offset.unwrap_or(0); @@ -301,7 +310,8 @@ pub fn list_assets(conn: &Connection, params: &ListParams) -> rusqlite::Result<( Ok((assets, total)) } -/// Search assets with various filters. +/// Search assets with multiple filters (hash prefix, time range, MIME type, tags, etc.). +/// Returns matching assets and total count for pagination. pub fn search_assets( conn: &Connection, params: &SearchParams, @@ -428,7 +438,8 @@ pub fn search_assets( Ok((assets, total)) } -/// Get ALL asset records including trashed (for sync reconciliation). +/// Get every asset record in the database, including trashed ones. +/// Used by the sync system to compare what two peers have. pub fn get_all_assets(conn: &Connection) -> rusqlite::Result> { let mut stmt = conn.prepare( "SELECT id, timestamp, hash, mime_type, application, user_identity, description, @@ -457,7 +468,8 @@ pub fn get_all_assets(conn: &Connection) -> rusqlite::Result> { Ok(assets) } -/// Get assets with `timestamp > since` (for incremental sync queries). +/// Get only assets added after a given timestamp (for incremental sync -- +/// "what's new since last time I checked?"). pub fn get_assets_since(conn: &Connection, since: i64) -> rusqlite::Result> { let mut stmt = conn.prepare( "SELECT id, timestamp, hash, mime_type, application, user_identity, description, @@ -487,7 +499,8 @@ pub fn get_assets_since(conn: &Connection, since: i64) -> rusqlite::Result rusqlite::Result> { let mut stmt = conn.prepare( "SELECT id, timestamp, hash, mime_type, application, user_identity, description, diff --git a/src/error.rs b/src/error.rs index 8728c1d..1a17495 100644 --- a/src/error.rs +++ b/src/error.rs @@ -2,6 +2,7 @@ use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use crate::models::ErrorResponse; +/// All the error types the API can return. Each variant maps to an HTTP status code. #[derive(Debug, thiserror::Error)] pub enum AppError { #[error("Not found: {0}")] @@ -23,6 +24,7 @@ pub enum AppError { Internal(String), } +/// Converts an AppError into an HTTP response with the right status code and a JSON body. impl IntoResponse for AppError { fn into_response(self) -> Response { let (status, message) = match &self { diff --git a/src/lib.rs b/src/lib.rs index 5eb0db7..3f04866 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,12 +1,12 @@ -pub mod config; -pub mod db; -pub mod error; -pub mod hash; -pub mod models; -pub mod routes; -pub mod storage; -pub mod verifier; -pub mod xattr; +pub mod config; // Configuration loading from YAML +pub mod db; // SQLite database access (CRUD for assets and tags) +pub mod error; // Centralized error types and HTTP error responses +pub mod hash; // SHA-256 content hashing +pub mod models; // Data structures shared across the codebase +pub mod routes; // HTTP API route handlers +pub mod storage; // File I/O: reading, writing, and trashing asset files +pub mod verifier; // Background integrity checker and file-attribute syncer +pub mod xattr; // OS-level file metadata (xattr on Unix, NTFS ADS on Windows) use std::sync::Arc; @@ -17,6 +17,7 @@ use crate::db::Db; /// Each message is `"hash:timestamp"` (e.g. `"abc123def456:1710000000000"`). pub type SyncEventSender = tokio::sync::broadcast::Sender; +/// Shared application state passed to every HTTP handler. #[derive(Clone)] pub struct AppState { pub config: Arc, diff --git a/src/main.rs b/src/main.rs index 7d3c68a..cca77de 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,6 +10,8 @@ use tower_http::trace::TraceLayer; use can_service::config::Config; use can_service::{db, routes, verifier, AppState}; +/// Entry point: loads config, opens the database, starts background services, +/// and launches the HTTP server. #[tokio::main] async fn main() -> anyhow::Result<()> { // Initialize tracing diff --git a/src/models.rs b/src/models.rs index 392153c..e4c32a3 100644 --- a/src/models.rs +++ b/src/models.rs @@ -1,6 +1,7 @@ use serde::{Deserialize, Serialize}; -/// Database representation of an asset. +/// Internal database row for a stored file. Contains all metadata fields +/// that are persisted in SQLite. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Asset { pub id: i64, @@ -18,7 +19,8 @@ pub struct Asset { pub size: i64, } -/// API-facing asset metadata response. +/// The public-facing version of an asset's metadata, returned by the API. +/// Includes resolved tags and omits internal fields like `id` and `actual_filename`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AssetMeta { pub hash: String, @@ -35,7 +37,7 @@ pub struct AssetMeta { pub size: i64, } -/// Standard API response wrapper. +/// Wraps every successful API response in `{ "status": "success", "data": ... }`. #[derive(Debug, Serialize, Deserialize)] pub struct ApiResponse { pub status: String, @@ -43,6 +45,7 @@ pub struct ApiResponse { } impl ApiResponse { + /// Create a success response wrapping the given data. pub fn success(data: T) -> Self { Self { status: "success".to_string(), @@ -51,7 +54,7 @@ impl ApiResponse { } } -/// Error response body. +/// JSON body for error responses: `{ "status": "error", "error": "..." }`. #[derive(Debug, Serialize, Deserialize)] pub struct ErrorResponse { pub status: String, @@ -67,7 +70,7 @@ impl ErrorResponse { } } -/// Ingest success response data. +/// Returned after a successful file upload: the timestamp, hash, and on-disk filename. #[derive(Debug, Serialize, Deserialize)] pub struct IngestResult { pub timestamp: i64, @@ -97,7 +100,9 @@ pub struct MetadataUpdate { pub description: Option, } -/// OS-level file attribute metadata (for xattr / NTFS ADS). +/// Metadata stored directly on the file via OS-level attributes +/// (xattr on macOS/Linux, NTFS Alternate Data Streams on Windows). +/// This lets external tools read CAN metadata without hitting the database. #[derive(Debug, Clone, Default, PartialEq)] pub struct FileAttributes { pub mime_type: Option, diff --git a/src/routes/asset.rs b/src/routes/asset.rs index b4d329a..023f628 100644 --- a/src/routes/asset.rs +++ b/src/routes/asset.rs @@ -17,7 +17,8 @@ pub fn router() -> Router { .route("/api/v1/can/0/asset/{hash}", patch(patch_asset)) } -/// GET /api/v1/can/0/asset/{hash} - Stream the physical file. +/// Download an asset's file by its hash. Streams the raw bytes back to the +/// client with the correct MIME type and a suggested filename. async fn get_asset( State(state): State, Path(hash): Path, @@ -59,7 +60,8 @@ async fn get_asset( .into_response()) } -/// PATCH /api/v1/can/0/asset/{hash} - Update metadata (tags, description). +/// Update an asset's tags and/or description. Saves changes to both the +/// database and the OS-level file attributes. async fn patch_asset( State(state): State, Path(hash): Path, diff --git a/src/routes/ingest.rs b/src/routes/ingest.rs index 1b1c683..594541c 100644 --- a/src/routes/ingest.rs +++ b/src/routes/ingest.rs @@ -7,6 +7,9 @@ use crate::error::AppError; use crate::models::{ApiResponse, Asset, DataIngestRequest, FileAttributes, IngestResult}; use crate::{db, hash, storage, xattr, AppState}; +/// Register the two upload endpoints: +/// - POST /ingest (multipart file upload) +/// - POST /ingest/data (JSON body upload, agent-friendly) pub fn router() -> Router { Router::new() .route("/api/v1/can/0/ingest", post(ingest_multipart)) @@ -27,7 +30,9 @@ struct IngestInput { description: Option, } -/// Common pipeline: timestamp → hash → write file → xattr → DB insert. +/// Core ingest pipeline shared by both upload endpoints. +/// Steps: generate timestamp -> hash content -> write file to disk -> +/// save OS-level metadata -> insert into database -> notify SSE subscribers. fn do_ingest(state: &AppState, input: IngestInput) -> Result { let timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) @@ -99,7 +104,7 @@ fn do_ingest(state: &AppState, input: IngestInput) -> Result) -> Vec { raw.unwrap_or("") .split(',') @@ -110,6 +115,8 @@ fn parse_tags(raw: Option<&str>) -> Vec { // ── POST /api/v1/can/0/ingest (multipart — file uploads) ────────────── +/// Handle multipart file upload. Reads the "file" field plus optional metadata +/// fields (tags, application, user, etc.) and runs the ingest pipeline. async fn ingest_multipart( State(state): State, mut multipart: Multipart, diff --git a/src/routes/list.rs b/src/routes/list.rs index 30283ad..fa5808b 100644 --- a/src/routes/list.rs +++ b/src/routes/list.rs @@ -10,6 +10,8 @@ pub fn router() -> Router { Router::new().route("/api/v1/can/0/list", get(list_assets)) } +/// GET /api/v1/can/0/list - Return a paginated list of assets with their metadata. +/// Supports query params: limit, offset, order (asc/desc), application filter. async fn list_assets( State(state): State, Query(params): Query, diff --git a/src/routes/meta.rs b/src/routes/meta.rs index bdfc1b8..889fdc2 100644 --- a/src/routes/meta.rs +++ b/src/routes/meta.rs @@ -10,6 +10,9 @@ pub fn router() -> Router { Router::new().route("/api/v1/can/0/asset/{hash}/meta", get(get_meta)) } +/// GET /api/v1/can/0/asset/{hash}/meta - Return an asset's metadata as JSON +/// (hash, MIME type, tags, description, timestamps, etc.) without downloading +/// the actual file. async fn get_meta( State(state): State, Path(hash): Path, diff --git a/src/routes/mod.rs b/src/routes/mod.rs index 752e9f2..f01013f 100644 --- a/src/routes/mod.rs +++ b/src/routes/mod.rs @@ -1,15 +1,16 @@ -pub mod ingest; -pub mod asset; -pub mod meta; -pub mod list; -pub mod search; -pub mod thumb; -pub mod sync; -pub mod events; +pub mod ingest; // POST endpoints for uploading files and JSON data +pub mod asset; // GET/PATCH endpoints for downloading files and updating metadata +pub mod meta; // GET endpoint for reading asset metadata as JSON +pub mod list; // GET endpoint for paginated asset listing +pub mod search; // GET endpoint for searching/filtering assets +pub mod thumb; // GET endpoint for generating resized thumbnail images +pub mod sync; // Private P2P sync endpoints (protobuf, requires API key) +pub mod events; // Public SSE endpoint for real-time "new asset" notifications use axum::Router; use crate::AppState; +/// Combine all route modules into one router. Called once at startup. pub fn router() -> Router { Router::new() .merge(ingest::router()) diff --git a/src/routes/search.rs b/src/routes/search.rs index f671246..93d47c8 100644 --- a/src/routes/search.rs +++ b/src/routes/search.rs @@ -10,6 +10,8 @@ pub fn router() -> Router { Router::new().route("/api/v1/can/0/search", get(search_assets)) } +/// GET /api/v1/can/0/search - Search assets by hash prefix, time range, +/// MIME type, user, application, or tags. Returns paginated results. async fn search_assets( State(state): State, Query(params): Query, diff --git a/src/routes/sync.rs b/src/routes/sync.rs index c7a52f2..ac7d9f1 100644 --- a/src/routes/sync.rs +++ b/src/routes/sync.rs @@ -22,7 +22,9 @@ use tokio_stream::StreamExt; use crate::models::{Asset, FileAttributes}; use crate::{db, hash, storage, xattr, AppState}; -// ── Protobuf message types (hand-written, no protoc needed) ───────────── +// ── Protobuf message types ─────────────────────────────────────────────── +// These structs are serialized/deserialized as protobuf using the `prost` crate. +// They define the wire format for peer-to-peer sync communication. #[derive(Clone, PartialEq, Message)] pub struct HashListRequest {} @@ -137,6 +139,8 @@ struct HashesQuery { // ── Auth ──────────────────────────────────────────────────────────────── +/// Verify the X-Sync-Key header matches the configured API key. +/// Returns 404 if sync is not configured, 401 if the key is wrong. fn check_sync_key(state: &AppState, headers: &HeaderMap) -> Result<(), (StatusCode, String)> { let expected = match &state.config.sync_api_key { Some(key) if !key.is_empty() => key, @@ -157,6 +161,7 @@ fn check_sync_key(state: &AppState, headers: &HeaderMap) -> Result<(), (StatusCo // ── Helpers ───────────────────────────────────────────────────────────── +/// Serialize a protobuf message into bytes. fn encode_proto(msg: &M) -> Result, (StatusCode, String)> { let mut buf = Vec::with_capacity(msg.encoded_len()); msg.encode(&mut buf) @@ -164,12 +169,16 @@ fn encode_proto(msg: &M) -> Result, (StatusCode, String)> { Ok(buf) } +/// Wrap protobuf bytes into an HTTP 200 response with the right content type. fn proto_response(buf: Vec) -> (StatusCode, [(&'static str, &'static str); 1], Vec) { (StatusCode::OK, [("content-type", "application/x-protobuf")], buf) } // ── POST /sync/hashes ─────────────────────────────────────────────────── +/// Return a compact list of all known asset hashes + timestamps. +/// A remote peer calls this first to figure out which assets it's missing. +/// Supports `?since=` for incremental queries. async fn sync_hashes( State(state): State, headers: HeaderMap, @@ -208,6 +217,8 @@ async fn sync_hashes( // ── POST /sync/pull ───────────────────────────────────────────────────── +/// Download full asset bundles (metadata + file content) for a list of hashes. +/// A remote peer calls this to fetch assets it doesn't have yet. async fn sync_pull( State(state): State, headers: HeaderMap, @@ -261,6 +272,9 @@ async fn sync_pull( // ── POST /sync/push ───────────────────────────────────────────────────── +/// Receive and store a new asset pushed from a remote peer. +/// Verifies the hash, writes the file, and inserts the DB record. +/// Returns early if the asset already exists locally. async fn sync_push( State(state): State, headers: HeaderMap, @@ -372,6 +386,7 @@ async fn sync_push( // ── POST /sync/meta ───────────────────────────────────────────────────── +/// Receive a metadata update from a remote peer (description, tags, trash status). async fn sync_meta( State(state): State, headers: HeaderMap, diff --git a/src/routes/thumb.rs b/src/routes/thumb.rs index 19236b7..066ef27 100644 --- a/src/routes/thumb.rs +++ b/src/routes/thumb.rs @@ -18,12 +18,15 @@ pub fn router() -> Router { ) } -/// Static fallback SVG icon for non-image assets. +/// A simple "?" placeholder icon returned when the asset isn't a resizable image. const FALLBACK_SVG: &str = r##" ? "##; +/// GET /api/v1/can/0/asset/{hash}/thumb/{width}/{height} +/// Generate (or serve from cache) a resized JPEG thumbnail for image assets. +/// Non-image assets get a placeholder SVG icon instead. async fn get_thumb( State(state): State, Path((hash, max_width, max_height)): Path<(String, u32, u32)>, diff --git a/src/storage.rs b/src/storage.rs index c018981..ffd16cf 100644 --- a/src/storage.rs +++ b/src/storage.rs @@ -1,59 +1,49 @@ use std::path::{Path, PathBuf}; -use chrono::{DateTime, Utc}; - -/// Classify a MIME type into a storage subdirectory. -pub fn mime_to_type_dir(mime: &str) -> &str { - if mime.starts_with("image/") { - "images" - } else if mime == "application/pdf" { - "pdf" - } else if mime.starts_with("video/") { - "video" - } else if mime.starts_with("audio/") { - "audio" - } else if mime.starts_with("text/") - || mime == "application/json" - || mime == "application/xml" - || mime == "application/msword" - || mime == "application/rtf" - || mime.starts_with("application/vnd.openxmlformats") - || mime.starts_with("application/vnd.ms-") - || mime == "application/vnd.oasis.opendocument.text" - || mime == "application/vnd.oasis.opendocument.spreadsheet" - { - "documents" - } else { - "others" - } -} - -/// Build the physical filename (including type subdirectory) per the spec: -/// `{type_dir}/{YYYY-MM-DD_HH-MM}_{hash8}.{extension}` -/// -/// Example: `images/2026-03-13_14-30_a3b2c4d5.jpg` +/// Build the on-disk filename for a new asset. +/// Format: `{timestamp}_{sha256hash}_{tags}.{extension}` +/// Tags are sanitized (alphanumeric only) and truncated to fit filesystem limits. pub fn build_filename( timestamp: i64, hash: &str, - _tags: &[String], + tags: &[String], mime_type: &str, ) -> String { let extension = mime_to_extension(mime_type); - let type_dir = mime_to_type_dir(mime_type); - // Convert timestamp_ms to human-readable YYYY-MM-DD_HH-MM - let dt = DateTime::::from_timestamp_millis(timestamp) - .unwrap_or_else(|| DateTime::::from_timestamp(0, 0).unwrap()); - let time_part = dt.format("%Y-%m-%d_%H-%M").to_string(); + let base = format!("{}_{}", timestamp, hash); - // Use first 8 chars of hash for short identifier - let short_hash = if hash.len() >= 8 { &hash[..8] } else { hash }; + if tags.is_empty() { + return format!("{}.{}", base, extension); + } - format!("{}/{}_{}. {}", type_dir, time_part, short_hash, extension) + // Sanitize tags: strip non-alphanumeric, join with underscore + let sanitized_tags: Vec = tags + .iter() + .map(|t| t.chars().filter(|c| c.is_alphanumeric()).collect::()) + .filter(|t| !t.is_empty()) + .collect(); + + if sanitized_tags.is_empty() { + return format!("{}.{}", base, extension); + } + + let tag_part = sanitized_tags.join("_"); + + // Truncate to keep total filename under ~200 chars (safely under 255) + let max_tag_len = 200usize.saturating_sub(base.len() + extension.len() + 2); // 2 for _ and . + let truncated = if tag_part.len() > max_tag_len { + &tag_part[..max_tag_len] + } else { + &tag_part + }; + + format!("{}_{}. {}", base, truncated, extension) .replace(". ", ".") } -/// Derive file extension from MIME type. +/// Convert a MIME type string (like "image/png") into a file extension (like "png"). +/// Falls back to "bin" for unknown types. pub fn mime_to_extension(mime: &str) -> &str { match mime { "application/pdf" => "pdf", @@ -87,92 +77,49 @@ pub fn mime_to_extension(mime: &str) -> &str { } } -/// Write asset bytes to the storage root. Creates the type subdirectory if needed. -/// `filename` may include a subdirectory prefix (e.g. "images/2026-01-01_12-00_abcd1234.jpg"). +/// Save a file's raw bytes to the storage directory. Returns the full path on disk. pub fn write_asset(root: &Path, filename: &str, data: &[u8]) -> std::io::Result { let path = root.join(filename); - // Ensure parent directory exists (handles type subdirectories) - if let Some(parent) = path.parent() { - std::fs::create_dir_all(parent)?; - } std::fs::write(&path, data)?; Ok(path) } -/// Read asset bytes from the storage root. -/// `filename` may include a subdirectory prefix. +/// Load the raw bytes of a stored file from the storage directory. pub fn read_asset(root: &Path, filename: &str) -> std::io::Result> { let path = root.join(filename); std::fs::read(path) } -/// Move an asset file to the .trash directory. -/// Handles filenames with subdirectory prefixes (e.g. "images/file.jpg"). +/// Move a file from the storage directory into the .trash/ folder (soft delete). pub fn trash_asset_file(root: &Path, filename: &str) -> std::io::Result<()> { let src = root.join(filename); let trash_dir = root.join(".trash"); std::fs::create_dir_all(&trash_dir)?; - // Use just the file basename in trash (flatten subdirectory structure) - let basename = Path::new(filename) - .file_name() - .unwrap_or_else(|| std::ffi::OsStr::new(filename)); - let dst = trash_dir.join(basename); + let dst = trash_dir.join(filename); std::fs::rename(src, dst)?; Ok(()) } -/// Parse a physical filename to extract the hash component. -/// -/// New format: `{type_dir}/{YYYY-MM-DD_HH-MM}_{hash8}.{ext}` -/// Legacy format: `{timestamp}_{sha256_64}_{tags}.{ext}` -/// -/// Returns the hash portion (8 chars for new format, 64 chars for legacy). +/// Extract the SHA-256 hash from a CAN filename. +/// Expects format: `{timestamp}_{sha256hash}_{tags}.{ext}` +/// Returns None if the filename doesn't match the expected pattern. pub fn parse_hash_from_filename(filename: &str) -> Option { - // Strip any directory prefix - let basename = filename.rsplit('/').next().unwrap_or(filename); - let basename = basename.rsplit('\\').next().unwrap_or(basename); - // Remove extension - let stem = basename.rsplit_once('.')?.0; + let stem = filename.rsplit_once('.')?.0; + // Split by underscore: first part is timestamp, second is hash (64 hex chars) let parts: Vec<&str> = stem.splitn(3, '_').collect(); - - // New format: YYYY-MM-DD_HH-MM_hash8 - // After splitn(3, '_'): ["YYYY-MM-DD", "HH-MM", "hash8"] - if parts.len() >= 3 && parts[0].len() == 10 && parts[0].contains('-') { - // New format: third part is the short hash - return Some(parts[2].to_string()); - } - - // Legacy format: {timestamp}_{sha256_64}_{tags} if parts.len() >= 2 && parts[1].len() == 64 { - return Some(parts[1].to_string()); + Some(parts[1].to_string()) + } else { + None } - - None } -/// Parse a physical filename to extract the timestamp component. -/// -/// New format: `{type_dir}/{YYYY-MM-DD_HH-MM}_{hash8}.{ext}` → parses date to epoch ms -/// Legacy format: `{timestamp}_{sha256}_{tags}.{ext}` → raw epoch ms +/// Extract the millisecond timestamp from a CAN filename. +/// Returns None if the filename doesn't match the expected pattern. pub fn parse_timestamp_from_filename(filename: &str) -> Option { - // Strip any directory prefix - let basename = filename.rsplit('/').next().unwrap_or(filename); - let basename = basename.rsplit('\\').next().unwrap_or(basename); - - let stem = basename.rsplit_once('.')?.0; - let parts: Vec<&str> = stem.splitn(3, '_').collect(); - - // New format: YYYY-MM-DD_HH-MM_hash8 - if parts.len() >= 2 && parts[0].len() == 10 && parts[0].contains('-') { - let date_str = format!("{}_{}", parts[0], parts[1]); - let dt = chrono::NaiveDateTime::parse_from_str(&date_str, "%Y-%m-%d_%H-%M").ok()?; - let utc = dt.and_utc(); - return Some(utc.timestamp_millis()); - } - - // Legacy format: first part is raw epoch ms - let ts_str = parts.first()?; + let stem = filename.rsplit_once('.')?.0; + let ts_str = stem.split('_').next()?; ts_str.parse().ok() } @@ -182,66 +129,23 @@ mod tests { use tempfile::TempDir; #[test] - fn test_mime_to_type_dir() { - assert_eq!(mime_to_type_dir("image/jpeg"), "images"); - assert_eq!(mime_to_type_dir("image/png"), "images"); - assert_eq!(mime_to_type_dir("application/pdf"), "pdf"); - assert_eq!(mime_to_type_dir("text/plain"), "documents"); - assert_eq!(mime_to_type_dir("application/json"), "documents"); - assert_eq!(mime_to_type_dir("video/mp4"), "video"); - assert_eq!(mime_to_type_dir("audio/mpeg"), "audio"); - assert_eq!(mime_to_type_dir("application/zip"), "others"); - assert_eq!(mime_to_type_dir("application/octet-stream"), "others"); + fn test_build_filename_no_tags() { + let name = build_filename(1773014400123, "a3b2c4d5e6f7", &[], "application/pdf"); + assert_eq!(name, "1773014400123_a3b2c4d5e6f7.pdf"); } #[test] - fn test_build_filename_image() { - // 2026-03-13 14:30:00 UTC in ms - let ts = 1773412200000i64; - let hash = "a3b2c4d5e6f7a8b9".to_string(); - let name = build_filename(ts, &hash, &[], "image/jpeg"); - assert_eq!(name, "images/2026-03-13_14-30_a3b2c4d5.jpg"); + fn test_build_filename_with_tags() { + let tags = vec!["photo".to_string(), "vacation".to_string()]; + let name = build_filename(1773014400123, "a3b2c4d5e6f7", &tags, "image/jpeg"); + assert_eq!(name, "1773014400123_a3b2c4d5e6f7_photo_vacation.jpg"); } #[test] - fn test_build_filename_pdf() { - let ts = 1773412200000i64; - let hash = "deadbeef12345678".to_string(); - let name = build_filename(ts, &hash, &[], "application/pdf"); - assert_eq!(name, "pdf/2026-03-13_14-30_deadbeef.pdf"); - } - - #[test] - fn test_build_filename_text() { - let ts = 1773412200000i64; - let hash = "abcdef0123456789".to_string(); - let name = build_filename(ts, &hash, &["ignored".to_string()], "text/plain"); - // Tags are ignored in new format - assert_eq!(name, "documents/2026-03-13_14-30_abcdef01.txt"); - } - - #[test] - fn test_build_filename_video() { - let ts = 1773412200000i64; - let hash = "ff00ff00ff00ff00".to_string(); - let name = build_filename(ts, &hash, &[], "video/mp4"); - assert_eq!(name, "video/2026-03-13_14-30_ff00ff00.mp4"); - } - - #[test] - fn test_build_filename_audio() { - let ts = 1773412200000i64; - let hash = "aa11bb22cc33dd44".to_string(); - let name = build_filename(ts, &hash, &[], "audio/mpeg"); - assert_eq!(name, "audio/2026-03-13_14-30_aa11bb22.mp3"); - } - - #[test] - fn test_build_filename_others() { - let ts = 1773412200000i64; - let hash = "1234567890abcdef".to_string(); - let name = build_filename(ts, &hash, &[], "application/zip"); - assert_eq!(name, "others/2026-03-13_14-30_12345678.zip"); + fn test_build_filename_strips_special_chars_from_tags() { + let tags = vec!["hello world!".to_string(), "test@123".to_string()]; + let name = build_filename(100, "abc", &tags, "text/plain"); + assert_eq!(name, "100_abc_helloworld_test123.txt"); } #[test] @@ -253,55 +157,38 @@ mod tests { } #[test] - fn test_write_and_read_asset_with_subdir() { + fn test_write_and_read_asset() { let dir = TempDir::new().unwrap(); let data = b"hello world"; - let filename = "images/2026-01-01_12-00_abcd1234.jpg"; - let path = write_asset(dir.path(), filename, data).unwrap(); + let path = write_asset(dir.path(), "test_file.txt", data).unwrap(); assert!(path.exists()); - assert!(dir.path().join("images").is_dir()); - let read_back = read_asset(dir.path(), filename).unwrap(); + let read_back = read_asset(dir.path(), "test_file.txt").unwrap(); assert_eq!(read_back, data); } #[test] - fn test_trash_asset_file_with_subdir() { + fn test_trash_asset_file() { let dir = TempDir::new().unwrap(); - let filename = "images/2026-01-01_12-00_abcd1234.jpg"; - write_asset(dir.path(), filename, b"bye").unwrap(); + write_asset(dir.path(), "to_trash.txt", b"bye").unwrap(); - trash_asset_file(dir.path(), filename).unwrap(); - assert!(!dir.path().join(filename).exists()); - assert!(dir.path().join(".trash").join("2026-01-01_12-00_abcd1234.jpg").exists()); + trash_asset_file(dir.path(), "to_trash.txt").unwrap(); + assert!(!dir.path().join("to_trash.txt").exists()); + assert!(dir.path().join(".trash").join("to_trash.txt").exists()); } #[test] - fn test_parse_hash_from_new_filename() { - assert_eq!( - parse_hash_from_filename("images/2026-03-13_14-30_a3b2c4d5.jpg"), - Some("a3b2c4d5".to_string()) - ); - } - - #[test] - fn test_parse_hash_from_legacy_filename() { + fn test_parse_hash_from_filename() { let hash_64 = "a".repeat(64); let filename = format!("1773014400123_{}.pdf", hash_64); - assert_eq!(parse_hash_from_filename(&filename), Some(hash_64)); + assert_eq!(parse_hash_from_filename(&filename), Some(hash_64.clone())); + + let filename_tags = format!("1773014400123_{}_photo_vacation.jpg", hash_64); + assert_eq!(parse_hash_from_filename(&filename_tags), Some(hash_64)); } #[test] - fn test_parse_timestamp_from_new_filename() { - let ts = parse_timestamp_from_filename("images/2026-03-13_14-30_a3b2c4d5.jpg"); - assert!(ts.is_some()); - let ts = ts.unwrap(); - // Should be 2026-03-13 14:30 UTC in millis - assert_eq!(ts, 1773412200000); - } - - #[test] - fn test_parse_timestamp_from_legacy_filename() { + fn test_parse_timestamp_from_filename() { let hash_64 = "b".repeat(64); let filename = format!("1773014400123_{}.pdf", hash_64); assert_eq!(parse_timestamp_from_filename(&filename), Some(1773014400123)); diff --git a/src/verifier.rs b/src/verifier.rs index 3c91ab2..899bc09 100644 --- a/src/verifier.rs +++ b/src/verifier.rs @@ -11,10 +11,10 @@ use crate::models::FileAttributes; use crate::storage::{parse_hash_from_filename, parse_timestamp_from_filename}; use crate::xattr; -/// Start the background verifier subsystem. -/// - Runs an initial full scrub -/// - Watches for filesystem changes -/// - Runs periodic scrubs +/// Launch the background integrity checker. It does three things: +/// 1. Immediately scans all files to detect corruption or missing data. +/// 2. Watches the storage folder for file changes and re-checks them in real time. +/// 3. Re-runs the full scan on a timer (configurable in config.yaml). pub fn start(config: Config, db: Db) { let config2 = config.clone(); let db2 = db.clone(); @@ -58,6 +58,7 @@ fn config3_for_watcher(config: Config) -> Config { config } +/// Watch the storage directory for file changes and verify each changed file. async fn run_watcher(config: Config, db: Db) -> anyhow::Result<()> { let (tx, mut rx) = mpsc::channel::(100); let storage_root = config.storage_root.clone(); @@ -114,7 +115,9 @@ async fn run_watcher(config: Config, db: Db) -> anyhow::Result<()> { Ok(()) } -/// Run a full scrub: verify every active asset's hash. +/// Full integrity scan: re-hashes every active file on disk and compares it +/// to the expected hash in the database. Also syncs OS-level file attributes +/// and backfills missing file sizes. async fn run_scrub(config: &Config, db: &Db) -> anyhow::Result<()> { let assets = { let conn = db.lock().unwrap(); @@ -276,7 +279,8 @@ async fn run_scrub(config: &Config, db: &Db) -> anyhow::Result<()> { Ok(()) } -/// Verify a single file by its physical filename. +/// Re-hash a single file and flag it as corrupted if the hash doesn't match. +/// Called when the filesystem watcher detects a change. async fn verify_single_file( config: &Config, db: &Db, diff --git a/src/xattr.rs b/src/xattr.rs index 62490c0..ec777da 100644 --- a/src/xattr.rs +++ b/src/xattr.rs @@ -27,7 +27,8 @@ pub fn read_attributes(path: &Path) -> std::io::Result { } } -// ── Unix implementation using xattr crate ── +// ── Unix implementation ── +// Stores each metadata field as an extended attribute (e.g. "user.can.mime_type"). #[cfg(unix)] fn write_xattr(path: &Path, attrs: &FileAttributes) -> std::io::Result<()> { @@ -58,6 +59,7 @@ fn write_xattr(path: &Path, attrs: &FileAttributes) -> std::io::Result<()> { Ok(()) } +/// Read all CAN metadata from Unix extended attributes on a file. #[cfg(unix)] fn read_xattr(path: &Path) -> std::io::Result { use xattr::FileExt; @@ -81,8 +83,10 @@ fn read_xattr(path: &Path) -> std::io::Result { }) } -// ── Windows implementation using NTFS Alternate Data Streams ── +// ── Windows implementation ── +// Stores each metadata field as an NTFS Alternate Data Stream (e.g. "file.txt:can.mime_type"). +/// Write CAN metadata fields as NTFS Alternate Data Streams on a file. #[cfg(windows)] fn write_ntfs_ads(path: &Path, attrs: &FileAttributes) -> std::io::Result<()> { let base = path.to_string_lossy(); @@ -111,6 +115,7 @@ fn write_ntfs_ads(path: &Path, attrs: &FileAttributes) -> std::io::Result<()> { Ok(()) } +/// Read all CAN metadata from NTFS Alternate Data Streams on a file. #[cfg(windows)] fn read_ntfs_ads(path: &Path) -> std::io::Result { let base = path.to_string_lossy();