Merge branch 'master' into ep/smp-web-spike

This commit is contained in:
Evgeny Poberezkin
2026-04-20 12:15:01 +01:00
31 changed files with 5791 additions and 543 deletions
+22 -12
View File
@@ -173,7 +173,7 @@ jobs:
-v ${{ github.workspace }}:/project \
build/${{ matrix.os }}:latest
- name: Build smp-server (postgresql) and tests
- name: Build smp-server, xftp-server (postgresql) and tests
if: matrix.should_run == true
shell: docker exec -t builder sh -eu {0}
run: |
@@ -182,12 +182,12 @@ jobs:
cabal update
cabal build --jobs=$(nproc) --enable-tests -fserver_postgres
mkdir -p /out
for i in smp-server simplexmq-test; do
for i in smp-server xftp-server simplexmq-test; do
bin=$(find /project/dist-newstyle -name "$i" -type f -executable)
chmod +x "$bin"
mv "$bin" /out/
done
strip /out/smp-server
strip /out/smp-server /out/xftp-server
- name: Copy simplexmq-test from container
if: matrix.should_run == true
@@ -195,19 +195,29 @@ jobs:
run: |
docker cp builder:/out/simplexmq-test .
- name: Copy smp-server (postgresql) from container and prepare it
- name: Copy smp-server, xftp-server (postgresql) from container and prepare it
if: startsWith(github.ref, 'refs/tags/v') && matrix.should_run == true
id: prepare-postgres
shell: bash
run: |
name="smp-server-postgres-ubuntu-${{ matrix.os_underscore }}-${{ matrix.arch }}"
docker cp builder:/out/smp-server $name
printf 'bins<<EOF\n' > bins.output
printf 'hashes<<EOF\n' > hashes.output
path="${{ github.workspace }}/$name"
echo "bin=$path" >> $GITHUB_OUTPUT
for i in smp-server xftp-server; do
name="${i}-postgres-ubuntu-${{ matrix.os_underscore }}-${{ matrix.arch }}"
docker cp builder:/out/$i $name
hash="SHA2-256($name)= $(openssl sha256 $path | cut -d' ' -f 2)"
printf 'hash=%s' "$hash" >> $GITHUB_OUTPUT
path="${{ github.workspace }}/$name"
hash="SHA2-256($name)= $(openssl sha256 $path | cut -d' ' -f 2)"
printf '%s\n' "$path" >> bins.output
printf '%s\n\n' "$hash" >> hashes.output
done
printf 'EOF\n' >> bins.output
printf 'EOF\n' >> hashes.output
cat bins.output >> "$GITHUB_OUTPUT"
cat hashes.output >> "$GITHUB_OUTPUT"
- name: Build everything else (standard)
if: matrix.should_run == true
@@ -257,10 +267,10 @@ jobs:
fail_on_unmatched_files: true
body: |
${{ steps.prepare-regular.outputs.hashes }}
${{ steps.prepare-postgres.outputs.hash }}
${{ steps.prepare-postgres.outputs.hashes }}
files: |
${{ steps.prepare-regular.outputs.bins }}
${{ steps.prepare-postgres.outputs.bin }}
${{ steps.prepare-postgres.outputs.bins }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+1 -1
View File
@@ -33,7 +33,7 @@ To initialize the server use `smp-server init -n <fqdn>` (or `smp-server init --
SMP server uses in-memory persistence with an optional append-only log of created queues that allows to re-start the server without losing the connections. This log is compacted on every server restart, permanently removing suspended and removed queues.
To enable store log, initialize server using `smp-server -l` command, or modify `smp-server.ini` created during initialization (uncomment `enable: on` option in the store log section). Use `smp-server --help` for other usage tips.
To enable store log, initialize server using `smp-server -l` command, or modify `smp-server.ini` created during initialization (uncomment `enable = on` option in the store log section). Use `smp-server --help` for other usage tips.
Starting from version 2.3.0, when store log is enabled, the server would also enable saving undelivered messages on exit and restoring them on start. This can be disabled via a separate setting `restore_messages` in `smp-server.ini` file. Saving messages would only work if the server is stopped with SIGINT signal (keyboard interrupt), if it is stopped with SIGTERM signal the messages would not be saved.
+3 -3
View File
@@ -34,7 +34,7 @@ xftpMediaContent = $(embedDir "apps/xftp-server/static/media/")
-- xftpFilePageHtml :: ByteString
-- xftpFilePageHtml = $(embedFile "apps/xftp-server/static/file.html")
xftpGenerateSite :: XFTPServerConfig -> Maybe ServerPublicInfo -> Maybe TransportHost -> FilePath -> IO ()
xftpGenerateSite :: XFTPServerConfig s -> Maybe ServerPublicInfo -> Maybe TransportHost -> FilePath -> IO ()
xftpGenerateSite cfg info onionHost path = do
let substs = xftpSubsts cfg info onionHost
Web.generateSite embeddedContent (render (Web.indexHtml embeddedContent) substs) [] path
@@ -50,10 +50,10 @@ xftpGenerateSite cfg info onionHost path = do
createDirectoryIfMissing True dir
forM_ content_ $ \(fp, content) -> B.writeFile (dir </> fp) content
xftpServerInformation :: XFTPServerConfig -> Maybe ServerPublicInfo -> Maybe TransportHost -> ByteString
xftpServerInformation :: XFTPServerConfig s -> Maybe ServerPublicInfo -> Maybe TransportHost -> ByteString
xftpServerInformation cfg info onionHost = render (Web.indexHtml embeddedContent) (xftpSubsts cfg info onionHost)
xftpSubsts :: XFTPServerConfig -> Maybe ServerPublicInfo -> Maybe TransportHost -> [(ByteString, Maybe ByteString)]
xftpSubsts :: XFTPServerConfig s -> Maybe ServerPublicInfo -> Maybe TransportHost -> [(ByteString, Maybe ByteString)]
xftpSubsts XFTPServerConfig {fileExpiration, logStatsInterval, allowNewFiles, newFileBasicAuth} information onionHost =
[("smpConfig", Nothing), ("xftpConfig", Just "y")] <> substConfig <> serverInfoSubsts simplexmqSource information <> [("onionHost", strEncode <$> onionHost), ("iniFileName", Just "file-server.ini")]
where
@@ -0,0 +1,472 @@
# XFTP Server PostgreSQL Backend
## Overview
Add PostgreSQL backend support to xftp-server, following the SMP server pattern. Supports bidirectional migration between STM (in-memory with StoreLog) and PostgreSQL backends.
## Goals
- PostgreSQL-backed file metadata storage as an alternative to STM + StoreLog
- Polymorphic server code via `FileStoreClass` typeclass with IO-based methods (following `QueueStoreClass` pattern)
- Bidirectional migration: StoreLog <-> PostgreSQL via CLI commands
- Shared `server_postgres` cabal flag (same flag enables both SMP and XFTP Postgres support)
- INI-based backend selection at runtime
## Architecture
### FileStoreClass Typeclass
IO-based typeclass following the `QueueStoreClass` pattern — each method is a self-contained IO action, with the implementation responsible for its own atomicity (STM backend wraps in `atomically`, Postgres backend uses database transactions):
```haskell
class FileStoreClass s where
type FileStoreConfig s
-- Lifecycle
newFileStore :: FileStoreConfig s -> IO s
closeFileStore :: s -> IO ()
-- File operations
addFile :: s -> SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> IO (Either XFTPErrorType ())
setFilePath :: s -> SenderId -> FilePath -> IO (Either XFTPErrorType ())
addRecipient :: s -> SenderId -> FileRecipient -> IO (Either XFTPErrorType ())
getFile :: s -> SFileParty p -> XFTPFileId -> IO (Either XFTPErrorType (FileRec, C.APublicAuthKey))
deleteFile :: s -> SenderId -> IO (Either XFTPErrorType ())
blockFile :: s -> SenderId -> BlockingInfo -> Bool -> IO (Either XFTPErrorType ())
deleteRecipient :: s -> RecipientId -> FileRec -> IO ()
ackFile :: s -> RecipientId -> IO (Either XFTPErrorType ())
-- Expiration (with LIMIT for Postgres; called in a loop until empty)
expiredFiles :: s -> Int64 -> Int -> IO [(SenderId, Maybe FilePath, Word32)]
-- Storage and stats (for init-time computation)
getUsedStorage :: s -> IO Int64
getFileCount :: s -> IO Int
```
- STM backend: each method wraps its STM transaction in `atomically` internally.
- Postgres backend: each method runs its query via `withDB` / database connection internally.
No polymorphic monad or `runStore` dispatcher needed — unlike `MsgStoreClass`, XFTP file operations are individually atomic and don't require grouping multiple operations into backend-dependent transactions.
### PostgresFileStore Data Type
```haskell
data PostgresFileStore = PostgresFileStore
{ dbStore :: DBStore,
dbStoreLog :: Maybe (StoreLog 'WriteMode)
}
```
- `dbStore` — connection pool created via `createDBStore`, runs schema migrations on init.
- `dbStoreLog` — optional parallel log file (enabled by `db_store_log` INI setting). When present, every mutation (`addFile`, `setFilePath`, `deleteFile`, `blockFile`, `addRecipient`, `ackFile`) also writes to this log via a `withLog` wrapper. `withLog` is called AFTER the DB operation succeeds (so the log reflects committed state only). Log write failures are non-fatal (logged as warnings, do not fail the DB operation). This provides an audit trail and enables recovery via export.
`closeFileStore` for Postgres calls `closeDBStore` (closes connection pool) then `mapM_ closeStoreLog dbStoreLog` (flushes and closes the parallel log). For STM, it closes the storeLog. Called from a `finally` block during server shutdown, matching SMP's `stopServer``closeMsgStore``closeQueueStore` pattern.
### STMFileStore Type
After extracting from current `Store.hs`, `STMFileStore` retains the file and recipient maps but no longer owns `usedStorage` (moved to `XFTPEnv`):
```haskell
data STMFileStore = STMFileStore
{ files :: TMap SenderId FileRec,
recipients :: TMap RecipientId (SenderId, RcvPublicAuthKey)
}
```
`closeFileStore` for STM is a no-op (TMaps are garbage-collected; the env-level `storeLog` is closed separately by the server).
### Error Handling
Postgres operations follow SMP's `withDB` / `handleDuplicate` pattern:
```haskell
withDB :: Text -> PostgresFileStore -> (DB.Connection -> IO (Either XFTPErrorType a)) -> ExceptT XFTPErrorType IO a
withDB op st action =
ExceptT $ E.try (withTransaction (dbStore st) action) >>= either logErr pure
where
logErr :: E.SomeException -> IO (Either XFTPErrorType a)
logErr e = logError ("STORE: " <> err) $> Left INTERNAL
where
err = op <> ", withDB, " <> tshow e
handleDuplicate :: SqlError -> IO (Either XFTPErrorType a)
handleDuplicate e = case constraintViolation e of
Just (UniqueViolation _) -> pure $ Left DUPLICATE_
_ -> E.throwIO e
```
- All DB operations wrapped in `withDB` — catches exceptions, logs, returns `INTERNAL`.
- Unique constraint violations caught by `handleDuplicate` and mapped to `DUPLICATE_`.
- UPDATE operations verified with `assertUpdated` — returns `AUTH` if 0 rows affected (matching SMP pattern, prevents silent failures when WHERE clause doesn't match).
- Critical sections (DB write + TVar update) wrapped in `uninterruptibleMask_` to prevent async exceptions from leaving inconsistent state between DB and TVars.
### FileRec and TVar Fields
`FileRec` retains its `TVar` fields (matching SMP's `PostgresQueue` pattern):
```haskell
data FileRec = FileRec
{ senderId :: SenderId,
fileInfo :: FileInfo,
filePath :: TVar (Maybe FilePath),
recipientIds :: TVar (Set RecipientId),
createdAt :: RoundedFileTime,
fileStatus :: TVar ServerEntityStatus
}
```
- **STM backend**: TVars are the source of truth, as currently.
- **Postgres backend**: `getFile` reads from DB and creates a `FileRec` with fresh TVars populated from the DB row (matching SMP's `mkQ` pattern — `newTVarIO` per load). Mutation methods (`setFilePath`, `blockFile`, etc.) update both the DB (persistence) and the TVars (in-session consistency). The `recipientIds` TVar is initialized to `S.empty` — no subquery needed because no server code reads `recipientIds` directly; all recipient operations go through the typeclass methods (`addRecipient`, `deleteRecipient`, `ackFile`), which query the `recipients` table for Postgres.
### usedStorage Ownership
`usedStorage :: TVar Int64` moves from the store to `XFTPEnv`. The store typeclass does **not** manage `usedStorage` — it only provides `getUsedStorage` for init-time computation.
- **STM init**: StoreLog replay calls `setFilePath` (which only sets the filePath TVar — the STM `setFilePath` implementation is changed to **not** update `usedStorage`). Similarly, STM `deleteFile` (Store.hs line 117) and `blockFile` (line 125) are changed to **not** update `usedStorage` — the server handles all `usedStorage` adjustments externally. After replay, `getUsedStorage` computes the sum over all file sizes (matching current `countUsedStorage` behavior).
- **Postgres init**: `getUsedStorage` executes `SELECT COALESCE(SUM(file_size), 0) FROM files`.
- **Runtime**: Server manages `usedStorage` TVar directly for reserve/commit/rollback during uploads, and adjusts after `deleteFile`/`blockFile` calls.
**Note on `getUsedStorage` semantics**: The current STM `countUsedStorage` sums all file sizes unconditionally (including files without `filePath` set, i.e., created but not yet uploaded). The Postgres `getUsedStorage` matches this: `SELECT SUM(file_size) FROM files` (no `WHERE file_path IS NOT NULL`). In practice, orphaned files (created but never uploaded) are rare and short-lived (expired within 48h), so the difference is negligible. A future improvement could filter by `file_path IS NOT NULL` in both backends to reflect actual disk usage more accurately.
### Server.hs Refactoring
`Server.hs` becomes polymorphic over `FileStoreClass s`. Since all typeclass methods are IO, call sites replace `atomically` with direct IO calls to the store.
**Call sites requiring changes** (exhaustive list):
1. **`receiveServerFile`** (line 563): `atomically $ writeTVar filePath (Just fPath)``setFilePath store senderId fPath`. The `reserve` logic (line 551-555) stays as direct TVar manipulation on `usedStorage` from `XFTPEnv`.
2. **`verifyXFTPTransmission`** (line 453): `atomically $ verify =<< getFile st party fId` — the `getFile` call and subsequent `readTVar fileStatus` are in a single `atomically` block. Refactored to: `getFile st party fId` (IO), then `readTVarIO (fileStatus fr)` from the returned `FileRec` (safe for both backends — STM TVar is the source of truth, Postgres TVar is a fresh snapshot from DB).
3. **`retryAdd`** (line 516): Signature `XFTPFileId -> STM (Either XFTPErrorType a)``XFTPFileId -> IO (Either XFTPErrorType a)`. The `atomically` call (line 520) replaced with `liftIO`.
4. **`deleteOrBlockServerFile_`** (line 620): Parameter `FileStore -> STM (Either XFTPErrorType ())``FileStoreClass s => s -> IO (Either XFTPErrorType ())`. The `atomically` call (line 626) removed — the store method is already IO. After the store action, server adjusts `usedStorage` TVar in `XFTPEnv` based on `fileInfo.size`.
5. **`ackFileReception`** (line 605): `atomically $ deleteRecipient st rId fr``deleteRecipient st rId fr`.
6. **Control port `CPDelete`/`CPBlock`** (lines 371, 377): `atomically $ getFile fs SFRecipient fileId``getFile fs SFRecipient fileId`.
7. **`expireServerFiles`** (line 636): Replace per-file `expiredFilePath` iteration with batched `expiredFiles st old batchSize`, which returns `[(SenderId, Maybe FilePath, Word32)]` — the `Word32` file size is needed so the server can adjust the `usedStorage` TVar after each deletion. Called in a loop until the returned list is empty. The `itemDelay` between files applies to the deletion loop over each batch, not the query itself. STM backend ignores the batch size limit (returns all expired files from TMap scan); Postgres uses `LIMIT`.
8. **`restoreServerStats`** (line 694): `FileStore {files, usedStorage} <- asks store` accesses store fields directly. Refactored to: `usedStorage` from `XFTPEnv` via `asks usedStorage`, file count via `getFileCount store`. STM: `M.size <$> readTVarIO files`. Postgres: `SELECT COUNT(*) FROM files`.
### Store Config Selection
GADT in `Env.hs`:
```haskell
data XFTPStoreConfig s where
XSCMemory :: Maybe FilePath -> XFTPStoreConfig STMFileStore
#if defined(dbServerPostgres)
XSCDatabase :: PostgresFileStoreCfg -> XFTPStoreConfig PostgresFileStore
#endif
```
`XFTPEnv` becomes polymorphic:
```haskell
data XFTPEnv s = XFTPEnv
{ config :: XFTPServerConfig,
store :: s,
usedStorage :: TVar Int64,
storeLog :: Maybe (StoreLog 'WriteMode),
...
}
```
The `M` monad (`ReaderT (XFTPEnv s) IO`) and all functions in `Server.hs` gain `FileStoreClass s =>` constraints.
**StoreLog lifecycle per backend:**
- **STM mode**: `storeLog = Just sl` (current behavior — append-only log for persistence and recovery).
- **Postgres mode**: `storeLog = Nothing` (main storeLog disabled — Postgres is the source of truth). The optional parallel `dbStoreLog` inside `PostgresFileStore` provides audit/recovery if enabled via `db_store_log` INI setting.
The existing `withFileLog` pattern in Server.hs continues to work unchanged — it maps over `Maybe (StoreLog 'WriteMode)`, which is `Nothing` in Postgres mode so the calls become no-ops.
### Main.hs Store Type Dispatch
The `Start` CLI command gains a `--confirm-migrations` flag (default `MCConsole` — manual prompt, matching SMP's `StartOptions`). For automated deployments, `--confirm-migrations up` auto-applies forward migrations. The import command uses `MCYesUp` (always auto-apply).
Following SMP's existential dispatch pattern (`AStoreType` + `run`), `Main.hs` selects the store type from INI config and dispatches to the polymorphic server:
```haskell
runServer ini = do
let storeType = fromRight "memory" $ lookupValue "STORE_LOG" "store_files" ini
case storeType of
"memory" -> run $ XSCMemory (enableStoreLog $> storeLogFilePath)
"database" ->
#if defined(dbServerPostgres)
run $ XSCDatabase PostgresFileStoreCfg {..}
#else
exitError "server not compiled with Postgres support"
#endif
_ -> exitError $ "Invalid store_files value: " <> storeType
where
run :: FileStoreClass s => XFTPStoreConfig s -> IO ()
run storeCfg = do
env <- newXFTPServerEnv storeCfg config
runReaderT (xftpServer config) env
```
**`newXFTPServerEnv` refactored signature:**
```haskell
newXFTPServerEnv :: FileStoreClass s => XFTPStoreConfig s -> XFTPServerConfig -> IO (XFTPEnv s)
newXFTPServerEnv storeCfg config = do
(store, storeLog) <- case storeCfg of
XSCMemory storeLogPath -> do
st <- newFileStore ()
sl <- mapM (`readWriteFileStore` st) storeLogPath
pure (st, sl)
XSCDatabase dbCfg -> do
st <- newFileStore dbCfg
pure (st, Nothing) -- main storeLog disabled for Postgres
usedStorage <- newTVarIO =<< getUsedStorage store
...
pure XFTPEnv {config, store, usedStorage, storeLog, ...}
```
### Startup Config Validation
Following SMP's `checkMsgStoreMode` pattern, `Main.hs` validates config before starting:
- **`store_files=database` + StoreLog file exists** (without `db_store_log=on`): Error — "StoreLog file present but store_files is `database`. Use `xftp-server database import` to migrate, or set `db_store_log: on`."
- **`store_files=database` + schema doesn't exist**: Error — "Create schema in PostgreSQL or use `xftp-server database import`."
- **`store_files=memory` + Postgres schema exists**: Warning — "Postgres schema exists but store_files is `memory`. Data in Postgres will not be used."
- **Binary compiled without `server_postgres` + `store_files=database`**: Error — "Server not compiled with Postgres support."
## Module Structure
```
src/Simplex/FileTransfer/Server/
Store.hs -- FileStoreClass typeclass + shared types (FileRec, FileRecipient, etc.)
Store/
STM.hs -- STMFileStore (extracted from current Store.hs)
Postgres.hs -- PostgresFileStore [CPP-guarded]
Postgres/
Migrations.hs -- Schema migrations [CPP-guarded]
Config.hs -- PostgresFileStoreCfg [CPP-guarded]
StoreLog.hs -- Unchanged (interchange format for both backends + migration)
Env.hs -- XFTPStoreConfig GADT, polymorphic XFTPEnv
Main.hs -- Store selection, migration CLI commands
Server.hs -- Polymorphic over FileStoreClass
```
## PostgreSQL Schema
Initial migration (`20260325_initial`):
```sql
CREATE TABLE files (
sender_id BYTEA NOT NULL PRIMARY KEY,
file_size INT4 NOT NULL,
file_digest BYTEA NOT NULL,
sender_key BYTEA NOT NULL,
file_path TEXT,
created_at INT8 NOT NULL,
status TEXT NOT NULL DEFAULT 'active'
);
CREATE TABLE recipients (
recipient_id BYTEA NOT NULL PRIMARY KEY,
sender_id BYTEA NOT NULL REFERENCES files ON DELETE CASCADE,
recipient_key BYTEA NOT NULL
);
CREATE INDEX idx_recipients_sender_id ON recipients (sender_id);
CREATE INDEX idx_files_created_at ON files (created_at);
```
- `file_size` is `INT4` matching `Word32` in `FileInfo.size`
- `sender_key` and `recipient_key` stored as `BYTEA` using binary encoding via `C.encodePubKey` / `C.decodePubKey` (matching SMP's `ToField`/`FromField` instances for `APublicAuthKey` — includes algorithm type tag in the binary format)
- `file_path` nullable (set after upload completes via `setFilePath`)
- `ON DELETE CASCADE` for recipients when file is hard-deleted
- `created_at` stores rounded epoch seconds (1-hour precision, `RoundedFileTime`)
- `status` as TEXT via `StrEncoding` (`ServerEntityStatus`: `EntityActive`, `EntityBlocked info`, `EntityOff`)
- Hard deletes (no `deleted_at` column)
- No PL/pgSQL functions needed; `setFilePath` uses `WHERE file_path IS NULL` to prevent duplicate uploads (the `UPDATE` itself acquires a row-level lock)
- `used_storage` computed on startup: `SELECT COALESCE(SUM(file_size), 0) FROM files` (matches STM `countUsedStorage` — all files, see usedStorage Ownership section)
### Migrations Module
Following SMP's `QueueStore/Postgres/Migrations.hs` pattern:
```haskell
module Simplex.FileTransfer.Server.Store.Postgres.Migrations
( xftpServerMigrations,
)
where
import Data.List (sortOn)
import Data.Text (Text)
import Simplex.Messaging.Agent.Store.Shared
import Text.RawString.QQ (r)
xftpSchemaMigrations :: [(String, Text, Maybe Text)]
xftpSchemaMigrations =
[ ("20260325_initial", m20260325_initial, Nothing)
]
xftpServerMigrations :: [Migration]
xftpServerMigrations = sortOn name $ map migration xftpSchemaMigrations
where
migration (name, up, down) = Migration {name, up, down = down}
m20260325_initial :: Text
m20260325_initial =
[r|
CREATE TABLE files (
sender_id BYTEA NOT NULL PRIMARY KEY,
...
);
|]
```
The `Migration` type (from `Simplex.Messaging.Agent.Store.Shared`) has fields `{name :: String, up :: Text, down :: Maybe Text}`. Initial migration has `Nothing` for `down`. Future migrations should include `Just down_migration` for rollback support. Called via `createDBStore dbOpts xftpServerMigrations (MigrationConfig confirmMigrations Nothing)`.
### Postgres Operations
Key query patterns:
- **`addFile`**: `INSERT INTO files (...) VALUES (...)`, return `DUPLICATE_` on unique violation.
- **`setFilePath`**: `UPDATE files SET file_path = ? WHERE sender_id = ? AND file_path IS NULL`, verified with `assertUpdated` (returns `AUTH` if 0 rows affected — file not found or already uploaded). The `WHERE file_path IS NULL` prevents duplicate uploads; the `UPDATE` acquires a row lock implicitly. Only persists the path; `usedStorage` managed by server.
- **`addRecipient`**: `INSERT INTO recipients (...)`, plus check for duplicates. No need for `recipientIds` TVar update — Postgres derives it from the table.
- **`getFile`** (sender): `SELECT ... FROM files WHERE sender_id = ?`, returns auth key from `sender_key` column.
- **`getFile`** (recipient): `SELECT f.*, r.recipient_key FROM recipients r JOIN files f ON ... WHERE r.recipient_id = ?`.
- **`deleteFile`**: `DELETE FROM files WHERE sender_id = ?` (recipients cascade).
- **`blockFile`**: `UPDATE files SET status = ? WHERE sender_id = ?`. When `deleted = True`, the server adjusts `usedStorage` externally (matching current STM behavior where `blockFile` only updates status and storage, not `filePath`).
- **`expiredFiles`**: `SELECT sender_id, file_path, file_size FROM files WHERE created_at + ? < ? LIMIT ?` — batched query replaces per-file iteration, includes `file_size` for `usedStorage` adjustment. Called in a loop until no rows returned.
## INI Configuration
New keys in `[STORE_LOG]` section:
```ini
[STORE_LOG]
enable: on
store_files: memory # memory | database
db_connection: postgresql://xftp@/xftp_server_store
db_schema: xftp_server
db_pool_size: 10
db_store_log: off
expire_files_hours: 48
```
`store_files` selects the backend (`store_files` rather than `store_queues` because XFTP stores files, not queues):
- `memory` -> `XSCMemory` (current behavior)
- `database` -> `XSCDatabase` (requires `server_postgres` build flag)
### INI Template Generation (`xftp-server init`)
The `iniFileContent` function in `Main.hs` must be updated to generate the new keys in the `[STORE_LOG]` section. Following SMP's `iniDbOpts` pattern with `optDisabled'` (prefixes `"# "` when value equals default), Postgres keys are generated commented out by default:
```ini
[STORE_LOG]
enable: on
# File storage mode: `memory` or `database` (PostgreSQL).
store_files: memory
# Database connection settings for PostgreSQL database (`store_files: database`).
# db_connection: postgresql://xftp@/xftp_server_store
# db_schema: xftp_server
# db_pool_size: 10
# Write database changes to store log file
# db_store_log: off
expire_files_hours: 48
```
Reuses `iniDBOptions` from `Simplex.Messaging.Server.CLI` for runtime parsing (falls back to defaults when keys are commented out or missing). `enableDbStoreLog'` pattern (`settingIsOn "STORE_LOG" "db_store_log"`) controls `dbStoreLogPath`.
### PostgresFileStoreCfg
```haskell
data PostgresFileStoreCfg = PostgresFileStoreCfg
{ dbOpts :: DBOpts,
dbStoreLogPath :: Maybe FilePath,
confirmMigrations :: MigrationConfirmation
}
```
No `deletedTTL` (hard deletes).
### Default DB Options
```haskell
defaultXFTPDBOpts :: DBOpts
defaultXFTPDBOpts =
DBOpts
{ connstr = "postgresql://xftp@/xftp_server_store",
schema = "xftp_server",
poolSize = 10,
createSchema = False
}
```
## Migration CLI
Bidirectional migration via StoreLog as interchange format:
```
xftp-server database import [--database DB_CONN] [--schema DB_SCHEMA] [--pool-size N]
xftp-server database export [--database DB_CONN] [--schema DB_SCHEMA] [--pool-size N]
```
No `--table` flag needed (unlike SMP which has queues/messages/all) — XFTP has a single entity type (files + recipients, always migrated together).
CLI options reuse `dbOptsP` parser from `Simplex.Messaging.Server.CLI`.
### Import (StoreLog -> PostgreSQL)
1. Confirm: prompt user with database connection details and StoreLog path
2. Read and replay StoreLog into temporary `STMFileStore`
3. Connect to PostgreSQL, run schema migrations (`createSchema = True`, `confirmMigrations = MCYesUp`)
4. Batch-insert file records into `files` table using PostgreSQL COPY protocol (matching SMP's `batchInsertQueues` pattern for performance). Progress reported every 10k files.
5. Batch-insert recipient records into `recipients` table using COPY protocol
6. Verify counts: `SELECT COUNT(*) FROM files` / `recipients` — warn if mismatch
7. Rename StoreLog to `.bak` (prevents accidental re-import, preserves original for rollback)
8. Report counts
### Export (PostgreSQL -> StoreLog)
1. Confirm: prompt user with database connection details and output path. Fail if output file already exists.
2. Connect to PostgreSQL
3. Open new StoreLog file for writing
4. Fold over all file records, writing per file (in this order, matching existing `writeFileStore`): `AddFile` (with `ServerEntityStatus` — this preserves `EntityBlocked` state), `AddRecipients`, then `PutFile` (if `file_path` is set)
5. Report counts
Note: `AddFile` carries `ServerEntityStatus` which includes `EntityBlocked info`, so blocking state is preserved through export/import without needing separate `BlockFile` log entries.
File data on disk is untouched by migration — only metadata moves between backends.
## Cabal Integration
Shared `server_postgres` flag. New Postgres modules added to existing conditional block:
```cabal
if flag(server_postgres)
cpp-options: -DdbServerPostgres
exposed-modules:
...existing SMP modules...
Simplex.FileTransfer.Server.Store.Postgres
Simplex.FileTransfer.Server.Store.Postgres.Migrations
Simplex.FileTransfer.Server.Store.Postgres.Config
```
CPP guards (`#if defined(dbServerPostgres)`) in:
- `Store.hs` — Postgres `FromField`/`ToField` instances for XFTP-specific types if needed
- `Env.hs``XSCDatabase` constructor
- `Main.hs` — database CLI commands, store selection for `database` mode, Postgres imports
- `Server.hs` — Postgres-specific imports if needed
## Testing
- **Parameterized server tests**: Existing `xftpServerTests` refactored to accept a store type parameter (following SMP's `SpecWith (ASrvTransport, AStoreType)` pattern). The same server tests run against both STM and Postgres backends — STM tests run unconditionally, Postgres tests added under `#if defined(dbServerPostgres)` with `postgressBracket` for database lifecycle (drop → create → test → drop).
- **Unit tests**: `PostgresFileStore` operations — add/get/delete/block/expire, duplicate detection, auth errors
- **Migration round-trip**: STM store → export to StoreLog → import to Postgres → export back → verify StoreLog equality (including blocked file status)
- **Tests location**: in `tests/` alongside existing XFTP tests, guarded by `server_postgres` CPP flag
- **Test database**: PostgreSQL on `localhost:5432`, using a dedicated `xftp_server_test` schema (dropped and recreated per test run via `postgressBracket`, following SMP's test database lifecycle pattern)
- **Test fixtures**: `testXFTPStoreDBOpts :: DBOpts` with `createSchema = True`, `confirmMigrations = MCYesUp`, in `tests/XFTPClient.hs`
@@ -0,0 +1,648 @@
# XFTP PostgreSQL Backend — Implementation Plan
> **For agentic workers:** REQUIRED: Use superpowers-extended-cc:subagent-driven-development (if subagents available) or superpowers-extended-cc:executing-plans to implement this plan. Steps use checkbox (`- [ ]`) syntax for tracking.
**Goal:** Add PostgreSQL backend support to xftp-server as an alternative to STM + StoreLog, with bidirectional migration.
**Architecture:** Introduce `FileStoreClass` typeclass (IO-based, following `QueueStoreClass` pattern). Extract current STM store into `Store/STM.hs`, make `Server.hs` polymorphic, then add `Store/Postgres.hs` behind `server_postgres` CPP flag. `usedStorage` moves from store to `XFTPEnv` so the server manages quota tracking externally.
**Tech Stack:** Haskell, postgresql-simple, STM, fourmolu, cabal with CPP flags
**Design spec:** `plans/2026-03-25-xftp-postgres-backend-design.md`
---
## File Structure
**Existing files modified:**
- `src/Simplex/FileTransfer/Server/Store.hs` — rewritten: becomes typeclass + shared types
- `src/Simplex/FileTransfer/Server/Env.hs` — polymorphic `XFTPEnv s`, `XFTPStoreConfig` GADT
- `src/Simplex/FileTransfer/Server.hs` — polymorphic over `FileStoreClass s`
- `src/Simplex/FileTransfer/Server/StoreLog.hs` — update for IO store functions
- `src/Simplex/FileTransfer/Server/Main.hs` — INI config, dispatch, CLI commands
- `simplexmq.cabal` — new modules
- `tests/XFTPClient.hs` — Postgres test fixtures
- `tests/Test.hs` — Postgres test group
**New files created:**
- `src/Simplex/FileTransfer/Server/Store/STM.hs``STMFileStore` (extracted from current `Store.hs`)
- `src/Simplex/FileTransfer/Server/Store/Postgres.hs``PostgresFileStore` [CPP-guarded]
- `src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs``PostgresFileStoreCfg` [CPP-guarded]
- `src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs` — schema SQL [CPP-guarded]
- `tests/CoreTests/XFTPStoreTests.hs` — Postgres store unit tests [CPP-guarded]
---
## Task 1: Move `usedStorage` from `FileStore` to `XFTPEnv`
**Files:**
- Modify: `src/Simplex/FileTransfer/Server/Store.hs`
- Modify: `src/Simplex/FileTransfer/Server/Env.hs`
- Modify: `src/Simplex/FileTransfer/Server.hs`
- [ ] **Step 1: Remove `usedStorage` from `FileStore` in `Store.hs`**
1. Remove `usedStorage :: TVar Int64` field from `FileStore` record (line 47).
2. Remove `usedStorage <- newTVarIO 0` from `newFileStore` (line 75) and drop the field from the record construction (line 76).
3. In `setFilePath` (line 92-97): remove `modifyTVar' (usedStorage st) (+ fromIntegral (size fileInfo))` — keep only `writeTVar filePath (Just fPath)`. Change pattern from `\FileRec {fileInfo, filePath}` to `\FileRec {filePath}` (fileInfo is now unused — `-Wunused-matches` error).
4. In `deleteFile` (line 112-119): remove `modifyTVar' usedStorage $ subtract (fromIntegral $ size fileInfo)`. Change outer pattern match from `FileStore {files, recipients, usedStorage}` to `FileStore {files, recipients}`. Change inner pattern from `Just FileRec {fileInfo, recipientIds}` to `Just FileRec {recipientIds}` (`fileInfo` is now unused — `-Wunused-matches` error).
5. In `blockFile` (line 122-127): remove `when deleted $ modifyTVar' usedStorage $ subtract (fromIntegral $ size fileInfo)`. Change pattern match from `st@FileStore {usedStorage}` to `st`. The `deleted` parameter and `fileInfo` in the inner pattern become unused — prefix with `_` or remove from pattern to avoid `-Wunused-matches`.
- [ ] **Step 2: Add `usedStorage` to `XFTPEnv` in `Env.hs`**
1. Add `usedStorage :: TVar Int64` field to `XFTPEnv` record (between `store` and `storeLog`, line 93).
2. In `newXFTPServerEnv` (line 112-126): replace lines 117-118:
```
used <- countUsedStorage <$> readTVarIO (files store)
atomically $ writeTVar (usedStorage store) used
```
with:
```
usedStorage <- newTVarIO =<< countUsedStorage <$> readTVarIO (files store)
```
3. Add `usedStorage` to the `pure XFTPEnv {..}` construction.
- [ ] **Step 3: Update all `usedStorage` access sites in `Server.hs`**
1. Line 552: `us <- asks $ usedStorage . store` → `us <- asks usedStorage`.
2. Line 569: `us <- asks $ usedStorage . store` → `us <- asks usedStorage`.
3. Line 639: `usedStart <- readTVarIO $ usedStorage st` → `usedStart <- readTVarIO =<< asks usedStorage`.
4. Line 647: `usedEnd <- readTVarIO $ usedStorage st` → `usedEnd <- readTVarIO =<< asks usedStorage`.
5. Line 694: `FileStore {files, usedStorage} <- asks store` → split into `FileStore {files} <- asks store` and `usedStorage <- asks usedStorage`.
6. In `deleteOrBlockServerFile_` (line 620): after `void $ atomically $ storeAction st`, add usedStorage adjustment — `us <- asks usedStorage` then `atomically $ modifyTVar' us $ subtract (fromIntegral $ size fileInfo)` when file had a path (check `path` from `readTVarIO filePath` earlier in the function).
- [ ] **Step 4: Build and verify**
Run: `cabal build`
- [ ] **Step 5: Run existing tests**
Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"`
- [ ] **Step 6: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs
git add src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs
git commit -m "refactor(xftp): move usedStorage from FileStore to XFTPEnv"
```
---
## Task 2: Add `getUsedStorage`, `getFileCount`, `expiredFiles` functions
**Files:**
- Modify: `src/Simplex/FileTransfer/Server/Store.hs`
- Modify: `src/Simplex/FileTransfer/Server/Env.hs`
- Modify: `src/Simplex/FileTransfer/Server.hs`
- [ ] **Step 1: Add three new functions to `Store.hs`**
1. Add to exports: `getUsedStorage`, `getFileCount`, `expiredFiles`.
2. Remove `expiredFilePath` from exports AND delete the function definition (dead code → `-Wunused-binds` error). Also remove `($>>=)` from import `Simplex.Messaging.Util (ifM, ($>>=))` → `Simplex.Messaging.Util (ifM)` — `$>>=` was only used by `expiredFilePath`.
3. Add import: `qualified Data.Map.Strict as M` (needed for `M.foldl'` in `getUsedStorage` and `M.toList` in `expiredFiles`).
4. Implement:
```haskell
getUsedStorage :: FileStore -> IO Int64
getUsedStorage FileStore {files} =
M.foldl' (\acc FileRec {fileInfo = FileInfo {size}} -> acc + fromIntegral size) 0 <$> readTVarIO files
getFileCount :: FileStore -> IO Int
getFileCount FileStore {files} = M.size <$> readTVarIO files
expiredFiles :: FileStore -> Int64 -> Int -> IO [(SenderId, Maybe FilePath, Word32)]
expiredFiles FileStore {files} old _limit = do
fs <- readTVarIO files
fmap catMaybes . forM (M.toList fs) $ \(sId, FileRec {fileInfo = FileInfo {size}, filePath, createdAt = RoundedSystemTime createdAt}) ->
if createdAt + fileTimePrecision < old
then do
path <- readTVarIO filePath
pure $ Just (sId, path, size)
else pure Nothing
```
5. Add imports: `Data.Maybe (catMaybes)`, `Data.Word (Word32)` (note: `qualified Data.Map.Strict as M` already added in item 3).
- [ ] **Step 2: Replace `countUsedStorage` in `Env.hs`**
1. Replace `countUsedStorage <$> readTVarIO (files store)` with `getUsedStorage store` in `newXFTPServerEnv`.
2. Remove `countUsedStorage` function definition and its export.
3. Remove `qualified Data.Map.Strict as M` import if no longer used.
- [ ] **Step 3: Update `restoreServerStats` in `Server.hs` to use `getFileCount`**
In `restoreServerStats` (line 694-696): replace `FileStore {files} <- asks store` and `_filesCount <- M.size <$> readTVarIO files` with `st <- asks store` and `_filesCount <- liftIO $ getFileCount st` (eliminates the `FileStore` pattern match — `files` binding no longer needed).
- [ ] **Step 4: Replace `expireServerFiles` iteration in `Server.hs`**
1. Replace the body of `expireServerFiles` (lines 636-660). Remove `files' <- readTVarIO (files st)` and the `forM_ (M.keys files')` loop.
2. New body: call `expiredFiles st old 10000` in a loop. For each `(sId, filePath_, fileSize)` in returned list: apply `itemDelay`, remove disk file if present, call `atomically $ deleteFile st sId`, adjust `usedStorage` TVar by `fileSize`, increment `filesExpired` stat. Loop until `expiredFiles` returns `[]`.
3. Remove `Data.Map.Strict` import from Server.hs if no longer needed (was used for `M.size` and `M.keys` — now replaced by `getFileCount` and `expiredFiles`).
- [ ] **Step 5: Build and verify**
Run: `cabal build`
- [ ] **Step 6: Run existing tests**
Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"`
- [ ] **Step 7: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs
git add src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs
git commit -m "refactor(xftp): add getUsedStorage, getFileCount, expiredFiles store functions"
```
---
## Task 3: Change `Store.hs` functions from STM to IO
**Files:**
- Modify: `src/Simplex/FileTransfer/Server/Store.hs`
- Modify: `src/Simplex/FileTransfer/Server.hs`
- Modify: `src/Simplex/FileTransfer/Server/StoreLog.hs`
- [ ] **Step 1: Change all Store.hs function signatures from STM to IO**
For each of: `addFile`, `setFilePath`, `addRecipient`, `getFile`, `deleteFile`, `blockFile`, `deleteRecipient`, `ackFile`:
1. Change return type from `STM (Either XFTPErrorType ...)` to `IO (Either XFTPErrorType ...)` (or `STM ()` to `IO ()` for `deleteRecipient`).
2. Wrap the function body in `atomically $ do ...`.
3. Keep `withFile` and `newFileRec` as internal STM helpers (called inside the `atomically` blocks).
- [ ] **Step 2: Update Server.hs call sites — remove `atomically` wrappers**
1. Line 563 (`receiveServerFile`): change `atomically $ writeTVar filePath (Just fPath)` → add `st <- asks store` then `void $ liftIO $ setFilePath st senderId fPath` (design call site #1 — `store` is not in scope in `receiveServerFile`'s `receive` helper, so bind via `asks`; `void` avoids `-Wunused-do-bind` warning on the `Either` result).
2. Line 453 (`verifyXFTPTransmission`): split `atomically $ verify =<< getFile st party fId` into: `liftIO (getFile st party fId)` (IO→M lift), then pattern match on result, use `readTVarIO (fileStatus fr)` instead of `readTVar`.
3. Lines 371, 377 (control port `CPDelete`/`CPBlock`): change `ExceptT $ atomically $ getFile fs SFRecipient fileId` → `ExceptT $ liftIO $ getFile fs SFRecipient fileId` (inside `unliftIO u $ do` block which runs in M monad — `liftIO` required to lift IO into M).
4. Line 508 (`addFile` in `createFile`): the `ExceptT $ addFile st sId file ts EntityActive` — `addFile` is now IO, `ExceptT` wraps IO directly. Remove any `atomically`.
5. Line 514 (`addRecipient`): same — `ExceptT . addRecipient st sId` works directly in IO.
6. Line 516 (`retryAdd`): change parameter type from `(XFTPFileId -> STM (Either XFTPErrorType a))` to `(XFTPFileId -> IO (Either XFTPErrorType a))`. Line 520: change `atomically (add fId)` to `liftIO (add fId)`.
7. Line 605 (`ackFileReception`): change `atomically $ deleteRecipient st rId fr` to `liftIO $ deleteRecipient st rId fr`.
8. Line 620 (`deleteOrBlockServerFile_`): change third parameter type from `(FileStore -> STM (Either XFTPErrorType ()))` to `(FileStore -> IO (Either XFTPErrorType ()))`. Line 626: change `void $ atomically $ storeAction st` to `void $ liftIO $ storeAction st`.
9. `expireServerFiles` `delete` helper: change `atomically $ deleteFile st sId` to `liftIO $ deleteFile st sId` (deleteFile is now IO; `liftIO` required because the helper runs in M monad, not IO).
- [ ] **Step 3: Update `StoreLog.hs` — remove `atomically` from replay**
In `readFileStore` (line 93), function `addToStore`:
1. Change `atomically (addToStore lr)` to `addToStore lr` — store functions are now IO.
2. The `addToStore` body calls `addFile`, `setFilePath`, `deleteFile`, `blockFile`, `ackFile` — all IO now, no `atomically` needed.
3. For `AddRecipients`: `runExceptT $ mapM_ (ExceptT . addRecipient st sId) rcps` — `addRecipient` returns `IO (Either ...)`, so `ExceptT . addRecipient st sId` works directly.
- [ ] **Step 4: Build and verify**
Run: `cabal build`
- [ ] **Step 5: Run existing tests**
Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"`
- [ ] **Step 6: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/StoreLog.hs
git add src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/StoreLog.hs
git commit -m "refactor(xftp): change file store operations from STM to IO"
```
---
## Task 4: Extract `FileStoreClass` typeclass, move STM impl to `Store/STM.hs`
**Files:**
- Rewrite: `src/Simplex/FileTransfer/Server/Store.hs`
- Create: `src/Simplex/FileTransfer/Server/Store/STM.hs`
- Modify: `src/Simplex/FileTransfer/Server/StoreLog.hs`
- Modify: `src/Simplex/FileTransfer/Server/Env.hs`
- Modify: `src/Simplex/FileTransfer/Server.hs`
- Modify: `simplexmq.cabal`
- [ ] **Step 1: Create `Store/STM.hs` — move all implementation code**
1. Create directory `src/Simplex/FileTransfer/Server/Store/`.
2. Create `src/Simplex/FileTransfer/Server/Store/STM.hs`.
3. Move from `Store.hs`: `FileStore` data type (rename to `STMFileStore`), all function implementations, internal helpers (`withFile`, `newFileRec`), all STM-specific imports.
4. Rename all `FileStore` references to `STMFileStore` in the new file.
5. Module declaration: `module Simplex.FileTransfer.Server.Store.STM` exporting only `STMFileStore (..)` — do NOT export standalone functions (`addFile`, `setFilePath`, etc.) to avoid name collisions with the typeclass methods from `Store.hs`.
- [ ] **Step 2: Rewrite `Store.hs` as the typeclass module**
1. Add `{-# LANGUAGE TypeFamilies #-}` pragma to `Store.hs` (required for `type FileStoreConfig s` associated type).
2. Keep in `Store.hs`: `FileRec (..)`, `FileRecipient (..)`, `RoundedFileTime`, `fileTimePrecision` definitions and their `StrEncoding` instance.
3. Add `FileStoreClass` typeclass:
```haskell
class FileStoreClass s where
type FileStoreConfig s
-- Lifecycle
newFileStore :: FileStoreConfig s -> IO s
closeFileStore :: s -> IO ()
-- File operations
addFile :: s -> SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> IO (Either XFTPErrorType ())
setFilePath :: s -> SenderId -> FilePath -> IO (Either XFTPErrorType ())
addRecipient :: s -> SenderId -> FileRecipient -> IO (Either XFTPErrorType ())
getFile :: s -> SFileParty p -> XFTPFileId -> IO (Either XFTPErrorType (FileRec, C.APublicAuthKey))
deleteFile :: s -> SenderId -> IO (Either XFTPErrorType ())
blockFile :: s -> SenderId -> BlockingInfo -> Bool -> IO (Either XFTPErrorType ())
deleteRecipient :: s -> RecipientId -> FileRec -> IO ()
ackFile :: s -> RecipientId -> IO (Either XFTPErrorType ())
-- Expiration
expiredFiles :: s -> Int64 -> Int -> IO [(SenderId, Maybe FilePath, Word32)]
-- Stats
getUsedStorage :: s -> IO Int64
getFileCount :: s -> IO Int
```
4. Do NOT re-export from `Store/STM.hs` — this would create a circular module dependency (Store.hs imports Store/STM.hs, Store/STM.hs imports Store.hs). Consumers must import `Store.STM` directly where they need `STMFileStore`.
5. Remove all STM-specific imports that are no longer needed.
- [ ] **Step 3: Add `FileStoreClass` instance in `Store/STM.hs`**
1. Import `FileStoreClass` from `Simplex.FileTransfer.Server.Store`.
2. Inline all implementations directly in the instance body (do NOT delegate to standalone functions — the standalone names collide with typeclass method names, causing ambiguous occurrences for importers):
```haskell
instance FileStoreClass STMFileStore where
type FileStoreConfig STMFileStore = ()
newFileStore () = do
files <- TM.emptyIO
recipients <- TM.emptyIO
pure STMFileStore {files, recipients}
closeFileStore _ = pure ()
addFile st sId fileInfo createdAt status = atomically $ ...
setFilePath st sId fPath = atomically $ ...
-- ... (each method's body is the existing function body, inlined)
```
3. Remove the standalone top-level function definitions — they are now instance methods. Keep only `withFile` and `newFileRec` as internal helpers used by the instance methods.
- [ ] **Step 4: Update importers**
1. `Env.hs`: add `import Simplex.FileTransfer.Server.Store.STM (STMFileStore (..))`. Change `FileStore` → `STMFileStore` in `XFTPEnv` type and `newXFTPServerEnv`. Change `store <- newFileStore` to `store <- newFileStore ()` (typeclass method now takes `FileStoreConfig STMFileStore` which is `()`). Keep `import Simplex.FileTransfer.Server.Store` for `FileRec`, `FileRecipient`, `FileStoreClass`, etc.
2. `Server.hs`: add `import Simplex.FileTransfer.Server.Store.STM`. Change `FileStore` → `STMFileStore` in any explicit type annotations. Import `FileStoreClass` from `Simplex.FileTransfer.Server.Store`.
3. `StoreLog.hs`: add `import Simplex.FileTransfer.Server.Store.STM` to access concrete `STMFileStore` type and store functions used during log replay. Change `FileStore` → `STMFileStore` in `readWriteFileStore` and `writeFileStore` parameter types.
- [ ] **Step 5: Update cabal file**
Add `Simplex.FileTransfer.Server.Store.STM` to `exposed-modules` in the `!flag(client_library)` section, alongside existing XFTP server modules.
- [ ] **Step 6: Build and verify**
Run: `cabal build`
- [ ] **Step 7: Run existing tests**
Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"`
- [ ] **Step 8: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Store/STM.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/StoreLog.hs
git add src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Store/STM.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/StoreLog.hs simplexmq.cabal
git commit -m "refactor(xftp): extract FileStoreClass typeclass, move STM impl to Store.STM"
```
---
## Task 5: Make `XFTPEnv` and `Server.hs` polymorphic over `FileStoreClass`
**Files:**
- Modify: `src/Simplex/FileTransfer/Server/Env.hs`
- Modify: `src/Simplex/FileTransfer/Server.hs`
- Modify: `src/Simplex/FileTransfer/Server/Main.hs`
- Modify: `tests/XFTPClient.hs` (if it calls `runXFTPServerBlocking` directly)
- [ ] **Step 1: Make `XFTPEnv` polymorphic in `Env.hs`**
1. Add `XFTPStoreConfig` GADT: `data XFTPStoreConfig s where XSCMemory :: Maybe FilePath -> XFTPStoreConfig STMFileStore`.
2. Change `data XFTPEnv` to `data XFTPEnv s` — field `store :: FileStore` becomes `store :: s`.
3. Change `newXFTPServerEnv :: XFTPServerConfig -> IO XFTPEnv` to `newXFTPServerEnv :: FileStoreClass s => XFTPStoreConfig s -> XFTPServerConfig -> IO (XFTPEnv s)`.
4. Pattern match on `XSCMemory storeLogPath` in `newXFTPServerEnv` body. Create store via `newFileStore ()`, storeLog via `mapM (`readWriteFileStore` st) storeLogPath`.
- [ ] **Step 2: Make `Server.hs` polymorphic**
1. Change `type M a = ReaderT XFTPEnv IO a` to `type M s a = ReaderT (XFTPEnv s) IO a`.
2. Add `FileStoreClass s =>` constraint to all functions using `M s a`. Use `forall s.` in signatures of functions that have `where`-block bindings with `M s` type annotations — `ScopedTypeVariables` requires explicit `forall` to bring `s` into scope for inner type signatures (matching SMP's `smpServer :: forall s. MsgStoreClass s => ...` pattern). Full list: `xftpServer`, `processRequest`, `verifyXFTPTransmission`, `processXFTPRequest` and all its `where`-bound functions (`createFile`, `addRecipients`, `receiveServerFile`, `sendServerFile`, `deleteServerFile`, `ackFileReception`, `retryAdd`, `addFileRetry`, `addRecipientRetry`), `deleteServerFile_`, `blockServerFile`, `deleteOrBlockServerFile_`, `expireServerFiles`, `randomId`, `getFileId`, `withFileLog`, `incFileStat`, `saveServerStats`, `restoreServerStats`, `randomDelay` (inside `#ifdef slow_servers` CPP block). Also update `encodeXftp` (line 236) and `runCPClient` (line 339) which use explicit `ReaderT XFTPEnv IO` instead of the `M` alias — change to `ReaderT (XFTPEnv s) IO`.
3. Change `runXFTPServerBlocking` and `runXFTPServer` to take `XFTPStoreConfig s` parameter.
4. Add `closeFileStore store` call to the server shutdown path (in the `finally` block or `stopServer` equivalent — after saving stats, before logging "Server stopped"). This ensures Postgres connection pool and `dbStoreLog` are properly closed. For STM this is a no-op.
- [ ] **Step 3: Update `Main.hs` dispatch**
1. In `runServer`: construct `XSCMemory (enableStoreLog $> storeLogFilePath)`.
2. Add dispatch function that calls the updated `runXFTPServer` (which creates `started` internally):
```haskell
run :: FileStoreClass s => XFTPStoreConfig s -> IO ()
run storeCfg = runXFTPServer storeCfg serverConfig
```
3. Call `run` with the `XSCMemory` config.
- [ ] **Step 4: Update test helper if needed**
If `tests/XFTPClient.hs` calls `runXFTPServerBlocking` directly, update the call to pass an `XSCMemory` config. Check the `withXFTPServer` / `serverBracket` helper.
- [ ] **Step 5: Build and verify**
Run: `cabal build && cabal build test:simplexmq-test`
- [ ] **Step 6: Run existing tests**
Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"`
- [ ] **Step 7: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/Main.hs
git add src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/Main.hs tests/XFTPClient.hs simplexmq.cabal
git commit -m "refactor(xftp): make XFTPEnv and server polymorphic over FileStoreClass"
```
---
## Task 6: Add Postgres config, migrations, and store skeleton
**Files:**
- Create: `src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs`
- Create: `src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs`
- Create: `src/Simplex/FileTransfer/Server/Store/Postgres.hs`
- Modify: `src/Simplex/FileTransfer/Server/Env.hs`
- Modify: `simplexmq.cabal`
- [ ] **Step 1: Create `Store/Postgres/Config.hs`**
```haskell
module Simplex.FileTransfer.Server.Store.Postgres.Config
( PostgresFileStoreCfg (..),
defaultXFTPDBOpts,
)
where
import Simplex.Messaging.Agent.Store.Postgres.Options (DBOpts (..))
import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation)
data PostgresFileStoreCfg = PostgresFileStoreCfg
{ dbOpts :: DBOpts,
dbStoreLogPath :: Maybe FilePath,
confirmMigrations :: MigrationConfirmation
}
defaultXFTPDBOpts :: DBOpts
defaultXFTPDBOpts =
DBOpts
{ connstr = "postgresql://xftp@/xftp_server_store",
schema = "xftp_server",
poolSize = 10,
createSchema = False
}
```
- [ ] **Step 2: Create `Store/Postgres/Migrations.hs`**
Full migration module with `xftpServerMigrations :: [Migration]` and `m20260325_initial` containing CREATE TABLE SQL for `files` and `recipients` tables plus indexes. Follow SMP's `QueueStore/Postgres/Migrations.hs` pattern exactly: tuple list → `sortOn name . map migration`.
- [ ] **Step 3: Create `Store/Postgres.hs` with stub instance**
1. Define `PostgresFileStore` with `dbStore :: DBStore` and `dbStoreLog :: Maybe (StoreLog 'WriteMode)`.
2. `instance FileStoreClass PostgresFileStore` with `error "not implemented"` for all methods except `newFileStore` (calls `createDBStore` + opens `dbStoreLog`) and `closeFileStore` (closes both). `type FileStoreConfig PostgresFileStore = PostgresFileStoreCfg`.
3. Add `withDB`, `handleDuplicate`, `assertUpdated`, `withLog` helpers.
- [ ] **Step 4: Add `XSCDatabase` GADT constructor in `Env.hs` (CPP-guarded)**
```haskell
#if defined(dbServerPostgres)
import Simplex.FileTransfer.Server.Store.Postgres (PostgresFileStore)
import Simplex.FileTransfer.Server.Store.Postgres.Config (PostgresFileStoreCfg)
#endif
data XFTPStoreConfig s where
XSCMemory :: Maybe FilePath -> XFTPStoreConfig STMFileStore
#if defined(dbServerPostgres)
XSCDatabase :: PostgresFileStoreCfg -> XFTPStoreConfig PostgresFileStore
#endif
```
- [ ] **Step 5: Update cabal**
Add to existing `if flag(server_postgres)` block:
```
Simplex.FileTransfer.Server.Store.Postgres
Simplex.FileTransfer.Server.Store.Postgres.Config
Simplex.FileTransfer.Server.Store.Postgres.Migrations
```
- [ ] **Step 6: Build both ways**
Run: `cabal build && cabal build -fserver_postgres`
- [ ] **Step 7: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Store/Postgres.hs src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs src/Simplex/FileTransfer/Server/Env.hs
git add src/Simplex/FileTransfer/Server/Store/Postgres.hs src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs src/Simplex/FileTransfer/Server/Env.hs simplexmq.cabal
git commit -m "feat(xftp): add PostgreSQL store skeleton with schema migration"
```
---
## Task 7: Implement `PostgresFileStore` operations
**Files:**
- Modify: `src/Simplex/FileTransfer/Server/Store/Postgres.hs`
- [ ] **Step 1: Implement `addFile`**
`INSERT INTO files (sender_id, file_size, file_digest, sender_key, file_path, created_at, status) VALUES (?,?,?,?,NULL,?,?)`. Catch unique violation with `handleDuplicate` → `DUPLICATE_`. Call `withLog "addFile"` after.
- [ ] **Step 2: Implement `getFile`**
For `SFSender`: `SELECT ... FROM files WHERE sender_id = ?`. Construct `FileRec` with `newTVarIO` per TVar field. `recipientIds = S.empty`.
For `SFRecipient`: `SELECT f.*, r.recipient_key FROM recipients r JOIN files f ON r.sender_id = f.sender_id WHERE r.recipient_id = ?`.
- [ ] **Step 3: Implement `setFilePath`**
`UPDATE files SET file_path = ? WHERE sender_id = ? AND file_path IS NULL`. Use `assertUpdated`. Call `withLog "setFilePath"`.
- [ ] **Step 4: Implement `addRecipient`**
`INSERT INTO recipients (recipient_id, sender_id, recipient_key) VALUES (?,?,?)`. `handleDuplicate` → `DUPLICATE_`. Call `withLog "addRecipient"`.
- [ ] **Step 5: Implement `deleteFile`, `blockFile`**
`deleteFile`: `DELETE FROM files WHERE sender_id = ?` (CASCADE). `withLog "deleteFile"`.
`blockFile`: `UPDATE files SET status = ? WHERE sender_id = ?`. `assertUpdated`. `withLog "blockFile"`.
- [ ] **Step 6: Implement `deleteRecipient`, `ackFile`**
`deleteRecipient`: `DELETE FROM recipients WHERE recipient_id = ?`. `withLog "deleteRecipient"`.
`ackFile`: same + return `Left AUTH` if 0 rows.
- [ ] **Step 7: Implement `expiredFiles`, `getUsedStorage`, `getFileCount`**
`expiredFiles`: `SELECT sender_id, file_path, file_size FROM files WHERE created_at + ? < ? LIMIT ?`.
`getUsedStorage`: `SELECT COALESCE(SUM(file_size), 0) FROM files`.
`getFileCount`: `SELECT COUNT(*) FROM files`.
- [ ] **Step 8: Add `ToField`/`FromField` instances**
For `RoundedFileTime` (Int64 wrapper), `ServerEntityStatus` (Text via StrEncoding), `C.APublicAuthKey` (Binary via `encodePubKey`/`decodePubKey`). Check SMP's `QueueStore/Postgres.hs` for existing instances to import.
- [ ] **Step 9: Wrap mutation operations in `uninterruptibleMask_`**
Operations that combine a DB write with a TVar update (e.g., `getFile` constructs `FileRec` with `newTVarIO`) must be wrapped in `E.uninterruptibleMask_` to prevent async exceptions from leaving inconsistent state. Follow SMP's `addQueue_`, `deleteStoreQueue` pattern.
- [ ] **Step 10: Build**
Run: `cabal build -fserver_postgres`
- [ ] **Step 11: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Store/Postgres.hs
git add src/Simplex/FileTransfer/Server/Store/Postgres.hs
git commit -m "feat(xftp): implement PostgresFileStore operations"
```
---
## Task 8: Add INI config, Main.hs dispatch, startup validation
**Files:**
- Modify: `src/Simplex/FileTransfer/Server/Main.hs`
- Modify: `src/Simplex/FileTransfer/Server/Env.hs`
- [ ] **Step 1: Update `iniFileContent` in `Main.hs`**
Add to `[STORE_LOG]` section: `store_files: memory`, commented-out `db_connection`, `db_schema`, `db_pool_size`, `db_store_log` keys. Follow SMP's `optDisabled'` pattern for commented defaults.
- [ ] **Step 2: Add `StartOptions` and `--confirm-migrations` flag**
```haskell
data StartOptions = StartOptions
{ confirmMigrations :: MigrationConfirmation
}
```
Add to `Start` command parser with default `MCConsole`. Thread through to `runServer`.
- [ ] **Step 3: Add store_files INI parsing and CPP-guarded Postgres dispatch**
In `runServer`: read `store_files` from INI (`fromRight "memory" $ lookupValue "STORE_LOG" "store_files" ini`). Add `"database"` branch (CPP-guarded) that constructs `PostgresFileStoreCfg` using `iniDBOptions ini defaultXFTPDBOpts` and `enableDbStoreLog'` pattern. Non-postgres build: `exitError`.
- [ ] **Step 4: Add `XSCDatabase` branch in `newXFTPServerEnv` (`Env.hs`)**
CPP-guarded pattern match on `XSCDatabase dbCfg`: `newFileStore dbCfg`, `storeLog = Nothing`.
- [ ] **Step 5: Add startup config validation**
Add `checkFileStoreMode` (CPP-guarded) before `run`: validate conflicting storeLog file + database mode, missing schema, etc. per design doc.
- [ ] **Step 6: Build both ways**
Run: `cabal build && cabal build -fserver_postgres`
- [ ] **Step 7: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Main.hs src/Simplex/FileTransfer/Server/Env.hs
git add src/Simplex/FileTransfer/Server/Main.hs src/Simplex/FileTransfer/Server/Env.hs
git commit -m "feat(xftp): add PostgreSQL INI config, store dispatch, startup validation"
```
---
## Task 9: Add database import/export CLI commands
**Files:**
- Modify: `src/Simplex/FileTransfer/Server/Main.hs`
- [ ] **Step 1: Add `Database` CLI command (CPP-guarded)**
Add `Database StoreCmd DBOpts` constructor to `CliCommand`. Add `database` subcommand parser with `import`/`export` subcommands + `dbOptsP defaultXFTPDBOpts`.
- [ ] **Step 2: Implement `importFileStoreToDatabase`**
1. `confirmOrExit` with database details.
2. Create temporary `STMFileStore`, replay StoreLog via `readWriteFileStore`.
3. Create `PostgresFileStore` with `createSchema = True`, `confirmMigrations = MCYesUp`.
4. Batch-insert files using PostgreSQL COPY protocol. Progress every 10k.
5. Batch-insert recipients using COPY protocol.
6. Verify counts: `SELECT COUNT(*)` — warn on mismatch.
7. Rename StoreLog to `.bak`.
8. Report counts.
- [ ] **Step 3: Implement `exportDatabaseToStoreLog`**
1. `confirmOrExit`. Fail if output file exists.
2. Create `PostgresFileStore` from config.
3. Open StoreLog for writing.
4. Fold over file records: write `AddFile` (with status), `AddRecipients`, `PutFile` per file.
5. Close StoreLog, report counts.
- [ ] **Step 4: Build**
Run: `cabal build -fserver_postgres`
- [ ] **Step 5: Format and commit**
```bash
fourmolu -i src/Simplex/FileTransfer/Server/Main.hs
git add src/Simplex/FileTransfer/Server/Main.hs
git commit -m "feat(xftp): add database import/export CLI commands"
```
---
## Task 10: Add Postgres tests
**Files:**
- Modify: `tests/XFTPClient.hs`
- Modify: `tests/Test.hs`
- Create: `tests/CoreTests/XFTPStoreTests.hs`
- [ ] **Step 1: Add test fixtures in `tests/XFTPClient.hs`**
```haskell
testXFTPStoreDBOpts :: DBOpts
testXFTPStoreDBOpts =
DBOpts
{ connstr = "postgresql://test_xftp_server_user@/test_xftp_server_db",
schema = "xftp_server_test",
poolSize = 10,
createSchema = True
}
```
Add `testXFTPDBConnectInfo :: ConnectInfo` matching the connection string.
- [ ] **Step 2: Add Postgres server test group in `tests/Test.hs`**
CPP-guarded block that runs existing `xftpServerTests` with Postgres store config, wrapped in `postgressBracket testXFTPDBConnectInfo`. Parameterize `withXFTPServer` to accept store config if needed.
- [ ] **Step 3: Create `tests/CoreTests/XFTPStoreTests.hs` — unit tests**
Test `PostgresFileStore` operations directly:
- `addFile` + `getFile SFSender` round-trip.
- `addFile` duplicate → `DUPLICATE_`.
- `getFile` nonexistent → `AUTH`.
- `setFilePath` + verify `WHERE file_path IS NULL` guard.
- `addRecipient` + `getFile SFRecipient` round-trip.
- `deleteFile` cascades recipients.
- `blockFile` + verify status.
- `expiredFiles` batch semantics.
- `getUsedStorage`, `getFileCount` correctness.
- [ ] **Step 4: Add migration round-trip test**
Create `STMFileStore` with test data (files + recipients + blocked status) → export to StoreLog → import to Postgres → export back → compare StoreLog files byte-for-byte.
- [ ] **Step 5: Build and run tests**
```bash
cabal build -fserver_postgres test:simplexmq-test
cabal test --test-show-details=streaming --test-option=--match="/XFTP/" -fserver_postgres
```
- [ ] **Step 6: Format and commit**
```bash
fourmolu -i tests/CoreTests/XFTPStoreTests.hs tests/XFTPClient.hs
git add tests/CoreTests/XFTPStoreTests.hs tests/XFTPClient.hs tests/Test.hs
git commit -m "test(xftp): add PostgreSQL backend tests"
```
+6 -6
View File
@@ -67,14 +67,14 @@ if [ ! -f "${confd}/smp-server.ini" ]; then
# Fix path to certificates
if [ -n "${WEB_MANUAL}" ]; then
sed -i -e 's|^[^#]*https: |#&|' \
-e 's|^[^#]*cert: |#&|' \
-e 's|^[^#]*key: |#&|' \
-e 's|^port:.*|port: 5223|' \
sed -i -e 's|^[^#]*https = |#&|' \
-e 's|^[^#]*cert = |#&|' \
-e 's|^[^#]*key = |#&|' \
-e 's|^port = .*|port = 5223|' \
"${confd}/smp-server.ini"
else
sed -i -e "s|cert: /etc/opt/simplex/web.crt|cert: $cert_path/$ADDR.crt|" \
-e "s|key: /etc/opt/simplex/web.key|key: $cert_path/$ADDR.key|" \
sed -i -e "s|cert = /etc/opt/simplex/web.crt|cert = $cert_path/$ADDR.crt|" \
-e "s|key = /etc/opt/simplex/web.key|key = $cert_path/$ADDR.key|" \
"${confd}/smp-server.ini"
fi
fi
+1 -1
View File
@@ -76,7 +76,7 @@ if [ ! -f "${confd}/file-server.ini" ]; then
# Optionally, set password
if [ -n "${PASS}" ]; then
sed -i -e "/^# create_password:/a create_password: $PASS" \
sed -i -e "/^# create_password =/a create_password = $PASS" \
"${confd}/file-server.ini"
fi
fi
+4
View File
@@ -283,6 +283,9 @@ library
Simplex.Messaging.Notifications.Server.Store.Postgres
Simplex.Messaging.Notifications.Server.Store.Types
Simplex.Messaging.Notifications.Server.StoreLog
Simplex.FileTransfer.Server.Store.Postgres
Simplex.FileTransfer.Server.Store.Postgres.Config
Simplex.FileTransfer.Server.Store.Postgres.Migrations
Simplex.Messaging.Server.MsgStore.Postgres
Simplex.Messaging.Server.QueueStore.Postgres
Simplex.Messaging.Server.QueueStore.Postgres.Migrations
@@ -527,6 +530,7 @@ test-suite simplexmq-test
if flag(server_postgres)
other-modules:
AgentTests.NotificationTests
CoreTests.XFTPStoreTests
NtfClient
NtfServerTests
PostgresSchemaDump
+101 -88
View File
@@ -31,7 +31,6 @@ import qualified Data.ByteString.Char8 as B
import Data.Int (Int64)
import Data.List.NonEmpty (NonEmpty)
import qualified Data.List.NonEmpty as L
import qualified Data.Map.Strict as M
import Data.Maybe (fromMaybe, isJust)
import qualified Data.Text as T
import qualified Data.Text.IO as T
@@ -88,7 +87,7 @@ import UnliftIO.Concurrent (threadDelay)
import UnliftIO.Directory (canonicalizePath, doesFileExist, removeFile, renameFile)
import qualified UnliftIO.Exception as E
type M a = ReaderT XFTPEnv IO a
type M s a = ReaderT (XFTPEnv s) IO a
data XFTPTransportRequest = XFTPTransportRequest
{ thParams :: THandleParamsXFTP 'TServer,
@@ -112,19 +111,19 @@ corsPreflightHeaders =
("Access-Control-Max-Age", "86400")
]
runXFTPServer :: XFTPServerConfig -> IO ()
runXFTPServer :: FileStoreClass s => XFTPServerConfig s -> IO ()
runXFTPServer cfg = do
started <- newEmptyTMVarIO
runXFTPServerBlocking started cfg
runXFTPServerBlocking :: TMVar Bool -> XFTPServerConfig -> IO ()
runXFTPServerBlocking :: FileStoreClass s => TMVar Bool -> XFTPServerConfig s -> IO ()
runXFTPServerBlocking started cfg = newXFTPServerEnv cfg >>= runReaderT (xftpServer cfg started)
data Handshake
= HandshakeSent C.PrivateKeyX25519
| HandshakeAccepted (THandleParams XFTPVersion 'TServer)
xftpServer :: XFTPServerConfig -> TMVar Bool -> M ()
xftpServer :: forall s. FileStoreClass s => XFTPServerConfig s -> TMVar Bool -> M s ()
xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpiration, fileExpiration, xftpServerVRange} started = do
mapM_ (expireServerFiles Nothing) fileExpiration
restoreServerStats
@@ -137,7 +136,7 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira
)
`finally` stopServer
where
runServer :: M ()
runServer :: M s ()
runServer = do
srvCreds@(chain, pk) <- asks tlsServerCreds
httpCreds_ <- asks httpServerCreds
@@ -168,7 +167,7 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira
Nothing -> pure ()
Just thParams -> processRequest req0 {thParams}
| otherwise -> liftIO . sendResponse $ H.responseNoBody N.ok200 (corsHeaders addCORS')
xftpServerHandshakeV1 :: X.CertificateChain -> C.APrivateSignKey -> TMap SessionId Handshake -> XFTPTransportRequest -> M (Maybe (THandleParams XFTPVersion 'TServer))
xftpServerHandshakeV1 :: X.CertificateChain -> C.APrivateSignKey -> TMap SessionId Handshake -> XFTPTransportRequest -> M s (Maybe (THandleParams XFTPVersion 'TServer))
xftpServerHandshakeV1 chain serverSignKey sessions XFTPTransportRequest {thParams = thParams0@THandleParams {sessionId}, request, reqBody = HTTP2Body {bodyHead}, sendResponse, sniUsed, addCORS} = do
s <- atomically $ TM.lookup sessionId sessions
r <- runExceptT $ case s of
@@ -227,39 +226,40 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira
liftIO . sendResponse $ H.responseNoBody N.ok200 (corsHeaders addCORS)
pure Nothing
Nothing -> throwE HANDSHAKE
sendError :: XFTPErrorType -> M (Maybe (THandleParams XFTPVersion 'TServer))
sendError :: XFTPErrorType -> M s (Maybe (THandleParams XFTPVersion 'TServer))
sendError err = do
runExceptT (encodeXftp err) >>= \case
Right bs -> liftIO . sendResponse $ H.responseBuilder N.ok200 (corsHeaders addCORS) bs
Left _ -> logError $ "Error encoding handshake error: " <> tshow err
pure Nothing
encodeXftp :: Encoding a => a -> ExceptT XFTPErrorType (ReaderT XFTPEnv IO) Builder
encodeXftp :: Encoding a => a -> ExceptT XFTPErrorType (ReaderT (XFTPEnv s) IO) Builder
encodeXftp a = byteString <$> liftHS (C.pad (smpEncode a) xftpBlockSize)
liftHS = liftEitherWith (const HANDSHAKE)
stopServer :: M ()
stopServer :: M s ()
stopServer = do
withFileLog closeStoreLog
st <- asks fileStore
liftIO $ closeFileStore st
saveServerStats
logNote "Server stopped"
expireFilesThread_ :: XFTPServerConfig -> [M ()]
expireFilesThread_ :: XFTPServerConfig s -> [M s ()]
expireFilesThread_ XFTPServerConfig {fileExpiration = Just fileExp} = [expireFiles fileExp]
expireFilesThread_ _ = []
expireFiles :: ExpirationConfig -> M ()
expireFiles :: ExpirationConfig -> M s ()
expireFiles expCfg = do
let interval = checkInterval expCfg * 1000000
forever $ do
liftIO $ threadDelay' interval
expireServerFiles (Just 100000) expCfg
serverStatsThread_ :: XFTPServerConfig -> [M ()]
serverStatsThread_ :: XFTPServerConfig s -> [M s ()]
serverStatsThread_ XFTPServerConfig {logStatsInterval = Just interval, logStatsStartTime, serverStatsLogFile} =
[logServerStats logStatsStartTime interval serverStatsLogFile]
serverStatsThread_ _ = []
logServerStats :: Int64 -> Int64 -> FilePath -> M ()
logServerStats :: Int64 -> Int64 -> FilePath -> M s ()
logServerStats startAt logInterval statsFilePath = do
initialDelay <- (startAt -) . fromIntegral . (`div` 1000000_000000) . diffTimeToPicoseconds . utctDayTime <$> liftIO getCurrentTime
liftIO $ putStrLn $ "server stats log enabled: " <> statsFilePath
@@ -300,12 +300,12 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira
]
liftIO $ threadDelay' interval
prometheusMetricsThread_ :: XFTPServerConfig -> [M ()]
prometheusMetricsThread_ :: XFTPServerConfig s -> [M s ()]
prometheusMetricsThread_ XFTPServerConfig {prometheusInterval = Just interval, prometheusMetricsFile} =
[savePrometheusMetrics interval prometheusMetricsFile]
prometheusMetricsThread_ _ = []
savePrometheusMetrics :: Int -> FilePath -> M ()
savePrometheusMetrics :: Int -> FilePath -> M s ()
savePrometheusMetrics saveInterval metricsFile = do
labelMyThread "savePrometheusMetrics"
liftIO $ putStrLn $ "Prometheus metrics saved every " <> show saveInterval <> " seconds to " <> metricsFile
@@ -324,11 +324,11 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira
let fd = periodStatDataCounts $ _filesDownloaded d
pure FileServerMetrics {statsData = d, filesDownloadedPeriods = fd, rtsOptions}
controlPortThread_ :: XFTPServerConfig -> [M ()]
controlPortThread_ :: XFTPServerConfig s -> [M s ()]
controlPortThread_ XFTPServerConfig {controlPort = Just port} = [runCPServer port]
controlPortThread_ _ = []
runCPServer :: ServiceName -> M ()
runCPServer :: ServiceName -> M s ()
runCPServer port = do
cpStarted <- newEmptyTMVarIO
u <- askUnliftIO
@@ -336,7 +336,7 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira
labelMyThread "control port server"
runLocalTCPServer cpStarted port $ runCPClient u
where
runCPClient :: UnliftIO (ReaderT XFTPEnv IO) -> Socket -> IO ()
runCPClient :: UnliftIO (ReaderT (XFTPEnv s) IO) -> Socket -> IO ()
runCPClient u sock = do
labelMyThread "control port client"
h <- socketToHandle sock ReadWriteMode
@@ -366,15 +366,15 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira
XFTPServerConfig {controlPortUserAuth = user, controlPortAdminAuth = admin} = cfg
CPStatsRTS -> E.tryAny getRTSStats >>= either (hPrint h) (hPrint h)
CPDelete fileId -> withUserRole $ unliftIO u $ do
fs <- asks store
fs <- asks fileStore
r <- runExceptT $ do
(fr, _) <- ExceptT $ atomically $ getFile fs SFRecipient fileId
(fr, _) <- ExceptT $ liftIO $ getFile fs SFRecipient fileId
ExceptT $ deleteServerFile_ fr
liftIO . hPutStrLn h $ either (\e -> "error: " <> show e) (\() -> "ok") r
CPBlock fileId info -> withUserRole $ unliftIO u $ do
fs <- asks store
fs <- asks fileStore
r <- runExceptT $ do
(fr, _) <- ExceptT $ atomically $ getFile fs SFRecipient fileId
(fr, _) <- ExceptT $ liftIO $ getFile fs SFRecipient fileId
ExceptT $ blockServerFile fr info
liftIO . hPutStrLn h $ either (\e -> "error: " <> show e) (\() -> "ok") r
CPHelp -> hPutStrLn h "commands: stats-rts, delete, help, quit"
@@ -395,7 +395,7 @@ data ServerFile = ServerFile
sbState :: LC.SbState
}
processRequest :: XFTPTransportRequest -> M ()
processRequest :: FileStoreClass s => XFTPTransportRequest -> M s ()
processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHead}, sendResponse, addCORS}
| B.length bodyHead /= xftpBlockSize = sendXFTPResponse ("", NoEntity, FRErr BLOCK) Nothing
| otherwise =
@@ -430,7 +430,7 @@ processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHea
done
#ifdef slow_servers
randomDelay :: M ()
randomDelay :: M s ()
randomDelay = do
d <- asks $ responseDelay . config
when (d > 0) $ do
@@ -440,20 +440,20 @@ randomDelay = do
data VerificationResult = VRVerified XFTPRequest | VRFailed XFTPErrorType
verifyXFTPTransmission :: Maybe (THandleAuth 'TServer) -> SignedTransmission FileCmd -> M VerificationResult
verifyXFTPTransmission :: forall s. FileStoreClass s => Maybe (THandleAuth 'TServer) -> SignedTransmission FileCmd -> M s VerificationResult
verifyXFTPTransmission thAuth (tAuth, authorized, (corrId, fId, cmd)) =
case cmd of
FileCmd SFSender (FNEW file rcps auth') -> pure $ XFTPReqNew file rcps auth' `verifyWith` sndKey file
FileCmd SFRecipient PING -> pure $ VRVerified XFTPReqPing
FileCmd party _ -> verifyCmd party
where
verifyCmd :: SFileParty p -> M VerificationResult
verifyCmd :: SFileParty p -> M s VerificationResult
verifyCmd party = do
st <- asks store
atomically $ verify =<< getFile st party fId
st <- asks fileStore
liftIO $ verify =<< getFile st party fId
where
verify = \case
Right (fr, k) -> result <$> readTVar (fileStatus fr)
Right (fr, k) -> result <$> readTVarIO (fileStatus fr)
where
result = \case
EntityActive -> XFTPReqCmd fId fr cmd `verifyWith` k
@@ -464,7 +464,7 @@ verifyXFTPTransmission thAuth (tAuth, authorized, (corrId, fId, cmd)) =
-- TODO verify with DH authorization
req `verifyWith` k = if verifyCmdAuthorization thAuth tAuth authorized corrId k then VRVerified req else VRFailed AUTH
processXFTPRequest :: HTTP2Body -> XFTPRequest -> M (FileResponse, Maybe ServerFile)
processXFTPRequest :: forall s. FileStoreClass s => HTTP2Body -> XFTPRequest -> M s (FileResponse, Maybe ServerFile)
processXFTPRequest HTTP2Body {bodyPart} = \case
XFTPReqNew file rks auth -> noFile =<< ifM allowNew (createFile file rks) (pure $ FRErr AUTH)
where
@@ -483,9 +483,9 @@ processXFTPRequest HTTP2Body {bodyPart} = \case
XFTPReqPing -> noFile FRPong
where
noFile resp = pure (resp, Nothing)
createFile :: FileInfo -> NonEmpty RcvPublicAuthKey -> M FileResponse
createFile :: FileInfo -> NonEmpty RcvPublicAuthKey -> M s FileResponse
createFile file rks = do
st <- asks store
st <- asks fileStore
r <- runExceptT $ do
sizes <- asks $ allowedChunkSizes . config
unless (size file `elem` sizes) $ throwE SIZE
@@ -502,27 +502,27 @@ processXFTPRequest HTTP2Body {bodyPart} = \case
let rIds = L.map (\(FileRecipient rId _) -> rId) rcps
pure $ FRSndIds sId rIds
pure $ either FRErr id r
addFileRetry :: FileStore -> FileInfo -> Int -> RoundedFileTime -> M (Either XFTPErrorType XFTPFileId)
addFileRetry :: s -> FileInfo -> Int -> RoundedFileTime -> M s (Either XFTPErrorType XFTPFileId)
addFileRetry st file n ts =
retryAdd n $ \sId -> runExceptT $ do
ExceptT $ addFile st sId file ts EntityActive
pure sId
addRecipientRetry :: FileStore -> Int -> XFTPFileId -> RcvPublicAuthKey -> M (Either XFTPErrorType FileRecipient)
addRecipientRetry :: s -> Int -> XFTPFileId -> RcvPublicAuthKey -> M s (Either XFTPErrorType FileRecipient)
addRecipientRetry st n sId rpk =
retryAdd n $ \rId -> runExceptT $ do
let rcp = FileRecipient rId rpk
ExceptT $ addRecipient st sId rcp
pure rcp
retryAdd :: Int -> (XFTPFileId -> STM (Either XFTPErrorType a)) -> M (Either XFTPErrorType a)
retryAdd :: Int -> (XFTPFileId -> IO (Either XFTPErrorType a)) -> M s (Either XFTPErrorType a)
retryAdd 0 _ = pure $ Left INTERNAL
retryAdd n add = do
fId <- getFileId
atomically (add fId) >>= \case
liftIO (add fId) >>= \case
Left DUPLICATE_ -> retryAdd (n - 1) add
r -> pure r
addRecipients :: XFTPFileId -> NonEmpty RcvPublicAuthKey -> M FileResponse
addRecipients :: XFTPFileId -> NonEmpty RcvPublicAuthKey -> M s FileResponse
addRecipients sId rks = do
st <- asks store
st <- asks fileStore
r <- runExceptT $ do
rcps <- mapM (ExceptT . addRecipientRetry st 3 sId) rks
lift $ withFileLog $ \sl -> logAddRecipients sl sId rcps
@@ -531,7 +531,7 @@ processXFTPRequest HTTP2Body {bodyPart} = \case
let rIds = L.map (\(FileRecipient rId _) -> rId) rcps
pure $ FRRcvIds rIds
pure $ either FRErr id r
receiveServerFile :: FileRec -> M FileResponse
receiveServerFile :: FileRec -> M s FileResponse
receiveServerFile FileRec {senderId, fileInfo = FileInfo {size, digest}, filePath} = case bodyPart of
Nothing -> pure $ FRErr SIZE
-- TODO validate body size from request before downloading, once it's populated
@@ -549,7 +549,7 @@ processXFTPRequest HTTP2Body {bodyPart} = \case
| bs == 0 || bs > s -> pure $ FRErr SIZE
| otherwise -> drain (s - bs)
reserve = do
us <- asks $ usedStorage . store
us <- asks usedStorage
quota <- asks $ fromMaybe maxBound . fileSizeQuota . config
atomically . stateTVar us $
\used -> let used' = used + fromIntegral size in if used' <= quota then (True, used') else (False, used)
@@ -559,21 +559,28 @@ processXFTPRequest HTTP2Body {bodyPart} = \case
receiveChunk (XFTPRcvChunkSpec fPath size digest) >>= \case
Right () -> do
stats <- asks serverStats
withFileLog $ \sl -> logPutFile sl senderId fPath
atomically $ writeTVar filePath (Just fPath)
incFileStat filesUploaded
incFileStat filesCount
liftIO $ atomicModifyIORef'_ (filesSize stats) (+ fromIntegral size)
pure FROk
st <- asks fileStore
liftIO (setFilePath st senderId fPath) >>= \case
Right () -> do
withFileLog $ \sl -> logPutFile sl senderId fPath
incFileStat filesUploaded
incFileStat filesCount
liftIO $ atomicModifyIORef'_ (filesSize stats) (+ fromIntegral size)
pure FROk
Left _e -> do
us <- asks usedStorage
atomically $ modifyTVar' us $ subtract (fromIntegral size)
liftIO $ whenM (doesFileExist fPath) (removeFile fPath) `catch` logFileError
pure $ FRErr AUTH
Left e -> do
us <- asks $ usedStorage . store
us <- asks usedStorage
atomically $ modifyTVar' us $ subtract (fromIntegral size)
liftIO $ whenM (doesFileExist fPath) (removeFile fPath) `catch` logFileError
pure $ FRErr e
receiveChunk spec = do
t <- asks $ fileTimeout . config
liftIO $ fromMaybe (Left TIMEOUT) <$> timeout t (runExceptT $ receiveFile getBody spec)
sendServerFile :: FileRec -> RcvPublicDhKey -> M (FileResponse, Maybe ServerFile)
sendServerFile :: FileRec -> RcvPublicDhKey -> M s (FileResponse, Maybe ServerFile)
sendServerFile FileRec {senderId, filePath, fileInfo = FileInfo {size}} rDhKey = do
readTVarIO filePath >>= \case
Just path -> ifM (doesFileExist path) sendFile (pure (FRErr AUTH, Nothing))
@@ -592,38 +599,41 @@ processXFTPRequest HTTP2Body {bodyPart} = \case
_ -> pure (FRErr INTERNAL, Nothing)
_ -> pure (FRErr NO_FILE, Nothing)
deleteServerFile :: FileRec -> M FileResponse
deleteServerFile :: FileRec -> M s FileResponse
deleteServerFile fr = either FRErr (\() -> FROk) <$> deleteServerFile_ fr
logFileError :: SomeException -> IO ()
logFileError e = logError $ "Error deleting file: " <> tshow e
ackFileReception :: RecipientId -> FileRec -> M FileResponse
ackFileReception :: RecipientId -> FileRec -> M s FileResponse
ackFileReception rId fr = do
withFileLog (`logAckFile` rId)
st <- asks store
atomically $ deleteRecipient st rId fr
st <- asks fileStore
liftIO $ deleteRecipient st rId fr
incFileStat fileDownloadAcks
pure FROk
deleteServerFile_ :: FileRec -> M (Either XFTPErrorType ())
deleteServerFile_ :: FileStoreClass s => FileRec -> M s (Either XFTPErrorType ())
deleteServerFile_ fr@FileRec {senderId} = do
withFileLog (`logDeleteFile` senderId)
deleteOrBlockServerFile_ fr filesDeleted (`deleteFile` senderId)
-- this also deletes the file from storage, but doesn't include it in delete statistics
blockServerFile :: FileRec -> BlockingInfo -> M (Either XFTPErrorType ())
blockServerFile :: FileStoreClass s => FileRec -> BlockingInfo -> M s (Either XFTPErrorType ())
blockServerFile fr@FileRec {senderId} info = do
withFileLog $ \sl -> logBlockFile sl senderId info
deleteOrBlockServerFile_ fr filesBlocked $ \st -> blockFile st senderId info True
deleteOrBlockServerFile_ :: FileRec -> (FileServerStats -> IORef Int) -> (FileStore -> STM (Either XFTPErrorType ())) -> M (Either XFTPErrorType ())
deleteOrBlockServerFile_ :: FileStoreClass s => FileRec -> (FileServerStats -> IORef Int) -> (s -> IO (Either XFTPErrorType ())) -> M s (Either XFTPErrorType ())
deleteOrBlockServerFile_ FileRec {filePath, fileInfo} stat storeAction = runExceptT $ do
path <- readTVarIO filePath
stats <- asks serverStats
ExceptT $ first (\(_ :: SomeException) -> FILE_IO) <$> try (forM_ path $ \p -> whenM (doesFileExist p) (removeFile p >> deletedStats stats))
st <- asks store
void $ atomically $ storeAction st
st <- asks fileStore
ExceptT $ liftIO $ storeAction st
forM_ path $ \_ -> do
us <- asks usedStorage
atomically $ modifyTVar' us $ subtract (fromIntegral $ size fileInfo)
lift $ incFileStat stat
where
deletedStats stats = do
@@ -633,47 +643,50 @@ deleteOrBlockServerFile_ FileRec {filePath, fileInfo} stat storeAction = runExce
getFileTime :: IO RoundedFileTime
getFileTime = getRoundedSystemTime
expireServerFiles :: Maybe Int -> ExpirationConfig -> M ()
expireServerFiles :: FileStoreClass s => Maybe Int -> ExpirationConfig -> M s ()
expireServerFiles itemDelay expCfg = do
st <- asks store
usedStart <- readTVarIO $ usedStorage st
st <- asks fileStore
us <- asks usedStorage
usedStart <- readTVarIO us
old <- liftIO $ expireBeforeEpoch expCfg
files' <- readTVarIO (files st)
logNote $ "Expiration check: " <> tshow (M.size files') <> " files"
forM_ (M.keys files') $ \sId -> do
mapM_ threadDelay itemDelay
atomically (expiredFilePath st sId old)
>>= mapM_ (maybeRemove $ delete st sId)
usedEnd <- readTVarIO $ usedStorage st
filesCount <- liftIO $ getFileCount st
logNote $ "Expiration check: " <> tshow filesCount <> " files"
expireLoop st us old
usedEnd <- readTVarIO us
logNote $ "Used " <> mbs usedStart <> " -> " <> mbs usedEnd <> ", " <> mbs (usedStart - usedEnd) <> " reclaimed."
where
mbs bs = tshow (bs `div` 1048576) <> "mb"
maybeRemove del = maybe del (remove del)
remove del filePath =
ifM
(doesFileExist filePath)
((removeFile filePath >> del) `catch` \(e :: SomeException) -> logError $ "failed to remove expired file " <> tshow filePath <> ": " <> tshow e)
del
delete st sId = do
withFileLog (`logDeleteFile` sId)
void . atomically $ deleteFile st sId -- will not update usedStorage if sId isn't in store
incFileStat filesExpired
expireLoop st us old = do
expired <- liftIO $ expiredFiles st old 10000
forM_ expired $ \(sId, filePath_, fileSize) -> do
mapM_ threadDelay itemDelay
forM_ filePath_ $ \fp ->
whenM (doesFileExist fp) $
removeFile fp `catch` \(e :: SomeException) -> logError $ "failed to remove expired file " <> tshow fp <> ": " <> tshow e
forM_ filePath_ $ \_ ->
atomically $ modifyTVar' us $ subtract (fromIntegral fileSize)
incFileStat filesExpired
let sIds = map (\(sId, _, _) -> sId) expired
unless (null sIds) $ do
withFileLog $ \sl -> mapM_ (logDeleteFile sl) sIds
liftIO $ deleteFiles st sIds
expireLoop st us old
randomId :: Int -> M ByteString
randomId :: Int -> M s ByteString
randomId n = atomically . C.randomBytes n =<< asks random
getFileId :: M XFTPFileId
getFileId :: M s XFTPFileId
getFileId = fmap EntityId . randomId =<< asks (fileIdSize . config)
withFileLog :: (StoreLog 'WriteMode -> IO a) -> M ()
withFileLog :: (StoreLog 'WriteMode -> IO a) -> M s ()
withFileLog action = liftIO . mapM_ action =<< asks storeLog
incFileStat :: (FileServerStats -> IORef Int) -> M ()
incFileStat :: (FileServerStats -> IORef Int) -> M s ()
incFileStat statSel = do
stats <- asks serverStats
liftIO $ atomicModifyIORef'_ (statSel stats) (+ 1)
saveServerStats :: M ()
saveServerStats :: M s ()
saveServerStats =
asks (serverStatsBackupFile . config)
>>= mapM_ (\f -> asks serverStats >>= liftIO . getFileServerStatsData >>= liftIO . saveStats f)
@@ -683,7 +696,7 @@ saveServerStats =
B.writeFile f $ strEncode stats
logNote "server stats saved"
restoreServerStats :: M ()
restoreServerStats :: FileStoreClass s => M s ()
restoreServerStats = asks (serverStatsBackupFile . config) >>= mapM_ restoreStats
where
restoreStats f = whenM (doesFileExist f) $ do
@@ -691,9 +704,9 @@ restoreServerStats = asks (serverStatsBackupFile . config) >>= mapM_ restoreStat
liftIO (strDecode <$> B.readFile f) >>= \case
Right d@FileServerStatsData {_filesCount = statsFilesCount, _filesSize = statsFilesSize} -> do
s <- asks serverStats
FileStore {files, usedStorage} <- asks store
_filesCount <- M.size <$> readTVarIO files
_filesSize <- readTVarIO usedStorage
st <- asks fileStore
_filesCount <- liftIO $ getFileCount st
_filesSize <- readTVarIO =<< asks usedStorage
liftIO $ setFileServerStats s d {_filesCount, _filesSize}
renameFile f $ f <> ".bak"
logNote "server stats restored"
+150 -16
View File
@@ -1,21 +1,35 @@
{-# LANGUAGE CPP #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TypeFamilies #-}
module Simplex.FileTransfer.Server.Env
( XFTPServerConfig (..),
XFTPStoreConfig (..),
XFTPEnv (..),
XFTPRequest (..),
XFTPStoreType,
FileStore (..),
AFStoreType (..),
fileStore,
fromFileStore,
defaultInactiveClientExpiration,
defFileExpirationHours,
defaultFileExpiration,
newXFTPServerEnv,
countUsedStorage,
readFileStoreType,
runWithStoreConfig,
checkFileStoreMode,
importToDatabase,
exportFromDatabase,
) where
import Control.Logger.Simple
@@ -23,7 +37,6 @@ import Control.Monad
import Crypto.Random
import Data.Int (Int64)
import Data.List.NonEmpty (NonEmpty)
import qualified Data.Map.Strict as M
import Data.Time.Clock (getCurrentTime)
import Data.Word (Word32)
import Data.X509.Validation (Fingerprint (..))
@@ -31,7 +44,21 @@ import Network.Socket
import qualified Network.TLS as T
import Simplex.FileTransfer.Protocol (FileCmd, FileInfo (..), XFTPFileId)
import Simplex.FileTransfer.Server.Stats
import Data.Either (fromRight)
import Data.Ini (Ini, lookupValue)
import qualified Data.Text as T
import Simplex.FileTransfer.Server.Store
import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation)
import Data.Functor (($>))
import Simplex.Messaging.Server.CLI (settingIsOn)
import System.Exit (exitFailure)
#if defined(dbServerPostgres)
import Data.Maybe (isNothing)
import Simplex.FileTransfer.Server.Store.Postgres (PostgresFileStore, importFileStore, exportFileStore)
import Simplex.FileTransfer.Server.Store.Postgres.Config (PostgresFileStoreCfg (..), defaultXFTPDBOpts)
import Simplex.Messaging.Server.CLI (iniDBOptions)
import System.Directory (doesFileExist)
#endif
import Simplex.FileTransfer.Server.StoreLog
import Simplex.FileTransfer.Transport (VersionRangeXFTP)
import qualified Simplex.Messaging.Crypto as C
@@ -42,10 +69,11 @@ import Simplex.Messaging.Util (tshow)
import System.IO (IOMode (..))
import UnliftIO.STM
data XFTPServerConfig = XFTPServerConfig
data XFTPServerConfig s = XFTPServerConfig
{ xftpPort :: ServiceName,
controlPort :: Maybe ServiceName,
fileIdSize :: Int,
serverStoreCfg :: XFTPStoreConfig s,
storeLogFile :: Maybe FilePath,
filesPath :: FilePath,
-- | server storage quota
@@ -88,9 +116,10 @@ defaultInactiveClientExpiration =
checkInterval = 3600 -- seconds, 1 hours
}
data XFTPEnv = XFTPEnv
{ config :: XFTPServerConfig,
store :: FileStore,
data XFTPEnv s = XFTPEnv
{ config :: XFTPServerConfig s,
store :: FileStore s,
usedStorage :: TVar Int64,
storeLog :: Maybe (StoreLog 'WriteMode),
random :: TVar ChaChaDRG,
serverIdentity :: C.KeyHash,
@@ -99,6 +128,38 @@ data XFTPEnv = XFTPEnv
serverStats :: FileServerStats
}
fileStore :: XFTPEnv s -> s
fileStore = fromFileStore . store
{-# INLINE fileStore #-}
data XFTPStoreConfig s where
XSCMemory :: Maybe FilePath -> XFTPStoreConfig STMFileStore
#if defined(dbServerPostgres)
XSCDatabase :: PostgresFileStoreCfg -> XFTPStoreConfig PostgresFileStore
#endif
type family XFTPStoreType (fs :: FSType) where
XFTPStoreType 'FSMemory = STMFileStore
#if defined(dbServerPostgres)
XFTPStoreType 'FSPostgres = PostgresFileStore
#endif
data FileStore s where
StoreMemory :: STMFileStore -> FileStore STMFileStore
#if defined(dbServerPostgres)
StoreDatabase :: PostgresFileStore -> FileStore PostgresFileStore
#endif
data AFStoreType = forall fs. AFSType (SFSType fs)
fromFileStore :: FileStore s -> s
fromFileStore = \case
StoreMemory s -> s
#if defined(dbServerPostgres)
StoreDatabase s -> s
#endif
{-# INLINE fromFileStore #-}
defFileExpirationHours :: Int64
defFileExpirationHours = 48
@@ -109,13 +170,22 @@ defaultFileExpiration =
checkInterval = 2 * 3600 -- seconds, 2 hours
}
newXFTPServerEnv :: XFTPServerConfig -> IO XFTPEnv
newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCredentials, httpCredentials} = do
newXFTPServerEnv :: FileStoreClass s => XFTPServerConfig s -> IO (XFTPEnv s)
newXFTPServerEnv config@XFTPServerConfig {serverStoreCfg, fileSizeQuota, xftpCredentials, httpCredentials} = do
random <- C.newRandom
store <- newFileStore
storeLog <- mapM (`readWriteFileStore` store) storeLogFile
used <- countUsedStorage <$> readTVarIO (files store)
atomically $ writeTVar (usedStorage store) used
(store, storeLog) <- case serverStoreCfg of
XSCMemory storeLogPath -> do
st <- newFileStore ()
sl <- mapM (`readWriteFileStore` st) storeLogPath
atomically $ writeTVar (stmStoreLog st) sl
pure (StoreMemory st, sl)
#if defined(dbServerPostgres)
XSCDatabase dbCfg -> do
st <- newFileStore dbCfg
pure (StoreDatabase st, Nothing)
#endif
used <- getUsedStorage (fromFileStore store)
usedStorage <- newTVarIO used
forM_ fileSizeQuota $ \quota -> do
logNote $ "Total / available storage: " <> tshow quota <> " / " <> tshow (quota - used)
when (quota < used) $ logWarn "WARNING: storage quota is less than used storage, no files can be uploaded!"
@@ -123,12 +193,76 @@ newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCrede
httpServerCreds <- mapM loadServerCredential httpCredentials
Fingerprint fp <- loadFingerprint xftpCredentials
serverStats <- newFileServerStats =<< getCurrentTime
pure XFTPEnv {config, store, storeLog, random, tlsServerCreds, httpServerCreds, serverIdentity = C.KeyHash fp, serverStats}
countUsedStorage :: M.Map k FileRec -> Int64
countUsedStorage = M.foldl' (\acc FileRec {fileInfo = FileInfo {size}} -> acc + fromIntegral size) 0
pure XFTPEnv {config, store, usedStorage, storeLog, random, tlsServerCreds, httpServerCreds, serverIdentity = C.KeyHash fp, serverStats}
data XFTPRequest
= XFTPReqNew FileInfo (NonEmpty RcvPublicAuthKey) (Maybe BasicAuth)
| XFTPReqCmd XFTPFileId FileRec FileCmd
| XFTPReqPing
readFileStoreType :: Ini -> Either String AFStoreType
readFileStoreType ini = case fromRight "memory" $ T.unpack <$> lookupValue "STORE_LOG" "store_files" ini of
"memory" -> Right $ AFSType SFSMemory
"database" -> Right $ AFSType SFSPostgres
other -> Left $ "Invalid store_files value: " <> other
-- | Dispatch store config from AFStoreType singleton and run the callback.
-- CPP guards for Postgres are handled here so Main.hs stays CPP-free.
runWithStoreConfig ::
AFStoreType ->
Ini ->
FilePath ->
MigrationConfirmation ->
(forall s. FileStoreClass s => XFTPStoreConfig s -> IO ()) ->
IO ()
runWithStoreConfig (AFSType SFSMemory) ini storeLogFilePath _confirmMigrations run =
run $ XSCMemory (enableStoreLog' $> storeLogFilePath)
where
enableStoreLog' = settingIsOn "STORE_LOG" "enable" ini
runWithStoreConfig (AFSType SFSPostgres) ini storeLogFilePath confirmMigrations run =
#if defined(dbServerPostgres)
run $ XSCDatabase dbCfg
where
enableDbStoreLog' = settingIsOn "STORE_LOG" "db_store_log" ini
dbStoreLogPath = enableDbStoreLog' $> storeLogFilePath
dbCfg = PostgresFileStoreCfg {dbOpts = iniDBOptions ini defaultXFTPDBOpts, dbStoreLogPath, confirmMigrations}
#else
error "server binary is compiled without support for PostgreSQL database"
#endif
-- | Validate startup config when store_files=database.
checkFileStoreMode :: Ini -> AFStoreType -> FilePath -> IO ()
checkFileStoreMode ini (AFSType SFSPostgres) storeLogFilePath = do
#if defined(dbServerPostgres)
storeLogExists <- doesFileExist storeLogFilePath
let dbStoreLogOn = settingIsOn "STORE_LOG" "db_store_log" ini
when (storeLogExists && isNothing dbStoreLogOn) $ do
putStrLn $ "Error: store log file " <> storeLogFilePath <> " exists but store_files is `database`."
putStrLn "Use `file-server database import` to migrate, or set `db_store_log: on`."
exitFailure
#else
putStrLn "Error: server binary is compiled without support for PostgreSQL database."
putStrLn "Please re-compile with `cabal build -fserver_postgres`."
exitFailure
#endif
checkFileStoreMode _ (AFSType SFSMemory) _ = pure ()
-- | Import StoreLog to PostgreSQL database.
importToDatabase :: FilePath -> Ini -> MigrationConfirmation -> IO ()
#if defined(dbServerPostgres)
importToDatabase storeLogFilePath ini _confirmMigrations = do
let dbCfg = PostgresFileStoreCfg {dbOpts = iniDBOptions ini defaultXFTPDBOpts, dbStoreLogPath = Nothing, confirmMigrations = _confirmMigrations}
importFileStore storeLogFilePath dbCfg
#else
importToDatabase _ _ _ = error "Error: server binary is compiled without support for PostgreSQL database.\nPlease re-compile with `cabal build -fserver_postgres`."
#endif
-- | Export PostgreSQL database to StoreLog.
exportFromDatabase :: FilePath -> Ini -> MigrationConfirmation -> IO ()
#if defined(dbServerPostgres)
exportFromDatabase storeLogFilePath ini _confirmMigrations = do
let dbCfg = PostgresFileStoreCfg {dbOpts = iniDBOptions ini defaultXFTPDBOpts, dbStoreLogPath = Nothing, confirmMigrations = _confirmMigrations}
exportFileStore storeLogFilePath dbCfg
#else
exportFromDatabase _ _ _ = error "Error: server binary is compiled without support for PostgreSQL database.\nPlease re-compile with `cabal build -fserver_postgres`."
#endif
+116 -54
View File
@@ -5,6 +5,7 @@
{-# LANGUAGE OverloadedLists #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PatternSynonyms #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE TypeApplications #-}
module Simplex.FileTransfer.Server.Main
@@ -12,7 +13,7 @@ module Simplex.FileTransfer.Server.Main
xftpServerCLI_,
) where
import Control.Monad (when)
import Control.Monad (unless, when)
import Data.Either (fromRight)
import Data.Functor (($>))
import Data.Ini (lookupValue, readIniFile)
@@ -28,11 +29,12 @@ import Options.Applicative
import Simplex.FileTransfer.Chunks
import Simplex.FileTransfer.Description (FileSize (..))
import Simplex.FileTransfer.Server (runXFTPServer)
import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), defFileExpirationHours, defaultFileExpiration, defaultInactiveClientExpiration)
import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), XFTPStoreConfig, AFStoreType (..), defFileExpirationHours, defaultFileExpiration, defaultInactiveClientExpiration, readFileStoreType, runWithStoreConfig, checkFileStoreMode, importToDatabase, exportFromDatabase)
import Simplex.FileTransfer.Transport (alpnSupportedXFTPhandshakes, supportedFileServerVRange)
import qualified Simplex.Messaging.Crypto as C
import Simplex.Messaging.Encoding.String
import Simplex.Messaging.Protocol (ProtoServerWithAuth (..), pattern XFTPServer)
import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation (..))
import Simplex.Messaging.Server.CLI
import Simplex.Messaging.Server.Expiration
import Simplex.Messaging.Server.Information (ServerPublicInfo (..))
@@ -51,7 +53,7 @@ xftpServerCLI :: FilePath -> FilePath -> IO ()
xftpServerCLI = xftpServerCLI_ (\_ _ _ _ -> pure ()) (\_ -> pure ())
xftpServerCLI_ ::
(XFTPServerConfig -> Maybe ServerPublicInfo -> Maybe TransportHost -> FilePath -> IO ()) ->
(forall s. XFTPServerConfig s -> Maybe ServerPublicInfo -> Maybe TransportHost -> FilePath -> IO ()) ->
(EmbeddedWebParams -> IO ()) ->
FilePath ->
FilePath ->
@@ -66,9 +68,13 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do
doesFileExist iniFile >>= \case
True -> genOnline cfgPath certOpts
_ -> exitError $ "Error: server is not initialized (" <> iniFile <> " does not exist).\nRun `" <> executableName <> " init`."
Start ->
Start opts ->
doesFileExist iniFile >>= \case
True -> readIniFile iniFile >>= either exitError runServer
True -> readIniFile iniFile >>= either exitError (runServer opts)
_ -> exitError $ "Error: server is not initialized (" <> iniFile <> " does not exist).\nRun `" <> executableName <> " init`."
Database cmd ->
doesFileExist iniFile >>= \case
True -> readIniFile iniFile >>= either exitError (runDatabaseCmd cmd)
_ -> exitError $ "Error: server is not initialized (" <> iniFile <> " does not exist).\nRun `" <> executableName <> " init`."
Delete -> do
confirmOrExit
@@ -84,6 +90,21 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do
executableName = "file-server"
storeLogFilePath = combine logPath "file-server-store.log"
defaultStaticPath = combine logPath "www"
runDatabaseCmd cmd ini = case cmd of
SCImport -> do
storeLogExists <- doesFileExist storeLogFilePath
unless storeLogExists $ exitError $ "Error: store log file " <> storeLogFilePath <> " does not exist."
confirmOrExit
("Import store log " <> storeLogFilePath <> " to PostgreSQL database?")
"Import cancelled."
importToDatabase storeLogFilePath ini MCYesUp
SCExport -> do
storeLogExists <- doesFileExist storeLogFilePath
when storeLogExists $ exitError $ "Error: store log file " <> storeLogFilePath <> " already exists."
confirmOrExit
("Export PostgreSQL database to store log " <> storeLogFilePath <> "?")
"Export cancelled."
exportFromDatabase storeLogFilePath ini MCConsole
initializeServer InitOptions {enableStoreLog, signAlgorithm, ip, fqdn, filesPath, fileSizeQuota, webStaticPath = webStaticPath_} = do
clearDirIfExists cfgPath
clearDirIfExists logPath
@@ -104,20 +125,20 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do
\# available to the end users of the server.\n\
\# LICENSE: https://github.com/simplex-chat/simplexmq/blob/stable/LICENSE\n\
\# Include correct source code URI in case the server source code is modified in any way.\n\
\# source_code: https://github.com/simplex-chat/simplexmq\n\
\# source_code = https://github.com/simplex-chat/simplexmq\n\
\\n\
\# Declaring all below information is optional, any of these fields can be omitted.\n\
\# server_country: ISO-3166 2-letter code\n\
\# operator: entity (organization or person name)\n\
\# operator_country: ISO-3166 2-letter code\n\
\# website:\n\
\# admin_simplex: SimpleX address\n\
\# admin_email:\n\
\# complaints_simplex: SimpleX address\n\
\# complaints_email:\n\
\# hosting: entity (organization or person name)\n\
\# hosting_country: ISO-3166 2-letter code\n\
\# hosting_type: virtual\n\
\# server_country = ISO-3166 2-letter code\n\
\# operator = entity (organization or person name)\n\
\# operator_country = ISO-3166 2-letter code\n\
\# website =\n\
\# admin_simplex = SimpleX address\n\
\# admin_email =\n\
\# complaints_simplex = SimpleX address\n\
\# complaints_email =\n\
\# hosting = entity (organization or person name)\n\
\# hosting_country = ISO-3166 2-letter code\n\
\# hosting_type = virtual\n\
\\n\
\[STORE_LOG]\n\
\# The server uses STM memory for persistence,\n\
@@ -125,55 +146,63 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do
\# This option enables saving memory to append only log,\n\
\# and restoring it when the server is started.\n\
\# Log is compacted on start (deleted objects are removed).\n"
<> ("enable: " <> onOff enableStoreLog <> "\n\n")
<> ("enable = " <> onOff enableStoreLog <> "\n\n")
<> "# File storage mode: `memory` or `database` (PostgreSQL).\n\
\store_files = memory\n\n\
\# Database connection settings for PostgreSQL database (`store_files = database`).\n\
\# db_connection = postgresql://xftp@/xftp_server_store\n\
\# db_schema = xftp_server\n\
\# db_pool_size = 10\n\n\
\# Write database changes to store log file\n\
\# db_store_log = off\n\n"
<> "# Expire files after the specified number of hours.\n"
<> ("expire_files_hours: " <> tshow defFileExpirationHours <> "\n\n")
<> "log_stats: off\n\
<> ("expire_files_hours = " <> tshow defFileExpirationHours <> "\n\n")
<> "log_stats = off\n\
\\n\
\# Log interval for real-time Prometheus metrics\n\
\# prometheus_interval: 60\n\
\# prometheus_interval = 60\n\
\\n\
\[AUTH]\n\
\# Set new_files option to off to completely prohibit uploading new files.\n\
\# This can be useful when you want to decommission the server, but still allow downloading the existing files.\n\
\new_files: on\n\
\new_files = on\n\
\\n\
\# Use create_password option to enable basic auth to upload new files.\n\
\# The password should be used as part of server address in client configuration:\n\
\# xftp://fingerprint:password@host1,host2\n\
\# The password will not be shared with file recipients, you must share it only\n\
\# with the users who you want to allow uploading files to your server.\n\
\# create_password: password to upload files (any printable ASCII characters without whitespace, '@', ':' and '/')\n\
\# create_password = password to upload files (any printable ASCII characters without whitespace, '@', ':' and '/')\n\
\\n\
\# control_port_admin_password:\n\
\# control_port_user_password:\n\
\# control_port_admin_password =\n\
\# control_port_user_password =\n\
\\n\
\[TRANSPORT]\n\
\# host is only used to print server address on start\n"
<> ("host: " <> T.pack host <> "\n")
<> ("port: " <> T.pack defaultServerPort <> "\n")
<> "log_tls_errors: off\n\
\# control_port: 5226\n\
<> ("host = " <> T.pack host <> "\n")
<> ("port = " <> T.pack defaultServerPort <> "\n")
<> "log_tls_errors = off\n\
\# control_port = 5226\n\
\\n\
\[FILES]\n"
<> ("path: " <> T.pack filesPath <> "\n")
<> ("storage_quota: " <> safeDecodeUtf8 (strEncode fileSizeQuota) <> "\n")
<> ("path = " <> T.pack filesPath <> "\n")
<> ("storage_quota = " <> safeDecodeUtf8 (strEncode fileSizeQuota) <> "\n")
<> "\n\
\[INACTIVE_CLIENTS]\n\
\# TTL and interval to check inactive clients\n\
\disconnect: off\n"
<> ("# ttl: " <> tshow (ttl defaultInactiveClientExpiration) <> "\n")
<> ("# check_interval: " <> tshow (checkInterval defaultInactiveClientExpiration) <> "\n")
\disconnect = off\n"
<> ("# ttl = " <> tshow (ttl defaultInactiveClientExpiration) <> "\n")
<> ("# check_interval = " <> tshow (checkInterval defaultInactiveClientExpiration) <> "\n")
<> "\n\
\[WEB]\n\
\# Set path to generate static mini-site for server information\n"
<> ("static_path: " <> T.pack (fromMaybe defaultStaticPath webStaticPath_) <> "\n\n")
<> ("static_path = " <> T.pack (fromMaybe defaultStaticPath webStaticPath_) <> "\n\n")
<> "# Run an embedded HTTP server on this port.\n\
\# http: 8000\n\n\
\# http = 8000\n\n\
\# TLS credentials for HTTPS web server on the same port as XFTP.\n\
\# cert: " <> T.pack (cfgPath `combine` "web.crt") <> "\n\
\# key: " <> T.pack (cfgPath `combine` "web.key") <> "\n"
runServer ini = do
\# cert = " <> T.pack (cfgPath `combine` "web.crt") <> "\n\
\# key = " <> T.pack (cfgPath `combine` "web.key") <> "\n"
runServer StartOptions {confirmMigrations} ini = do
hSetBuffering stdout LineBuffering
hSetBuffering stderr LineBuffering
fp <- checkSavedFingerprint cfgPath defaultX509Config
@@ -183,18 +212,24 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do
printServiceInfo serverVersion srv
let information = serverPublicInfo ini
printSourceCode (sourceCode <$> information)
printXFTPConfig serverConfig
case webStaticPath' of
Just path -> do
let onionHost =
either (const Nothing) (find isOnion) $
strDecode @(L.NonEmpty TransportHost) . encodeUtf8 =<< lookupValue "TRANSPORT" "host" ini
webHttpPort = eitherToMaybe (lookupValue "WEB" "http" ini) >>= readMaybe . T.unpack
generateSite serverConfig information onionHost path
when (isJust webHttpPort || isJust webHttpsParams') $
serveStaticFiles EmbeddedWebParams {webStaticPath = path, webHttpPort, webHttpsParams = webHttpsParams'}
Nothing -> pure ()
runXFTPServer serverConfig
case readFileStoreType ini of
Left err -> error err
Right fsType -> do
checkFileStoreMode ini fsType storeLogFilePath
runWithStoreConfig fsType ini storeLogFilePath confirmMigrations $ \storeCfg -> do
let cfg = serverConfig storeCfg
printXFTPConfig cfg
case webStaticPath' of
Just path -> do
let onionHost =
either (const Nothing) (find isOnion) $
strDecode @(L.NonEmpty TransportHost) . encodeUtf8 =<< lookupValue "TRANSPORT" "host" ini
webHttpPort = eitherToMaybe (lookupValue "WEB" "http" ini) >>= readMaybe . T.unpack
generateSite cfg information onionHost path
when (isJust webHttpPort || isJust webHttpsParams') $
serveStaticFiles EmbeddedWebParams {webStaticPath = path, webHttpPort, webHttpsParams = webHttpsParams'}
Nothing -> pure ()
runXFTPServer cfg
where
isOnion = \case THOnionHost _ -> True; _ -> False
enableStoreLog = settingIsOn "STORE_LOG" "enable" ini
@@ -236,11 +271,13 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do
webStaticPath' = eitherToMaybe $ T.unpack <$> lookupValue "WEB" "static_path" ini
serverConfig =
serverConfig :: XFTPStoreConfig s -> XFTPServerConfig s
serverConfig serverStoreCfg =
XFTPServerConfig
{ xftpPort = T.unpack $ strictIni "TRANSPORT" "port" ini,
controlPort = either (const Nothing) (Just . T.unpack) $ lookupValue "TRANSPORT" "control_port" ini,
fileIdSize = 16,
serverStoreCfg,
storeLogFile = enableStoreLog $> storeLogFilePath,
filesPath = T.unpack $ strictIni "FILES" "path" ini,
fileSizeQuota = either error unFileSize <$> strDecodeIni "FILES" "storage_quota" ini,
@@ -289,9 +326,16 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do
data CliCommand
= Init InitOptions
| OnlineCert CertOptions
| Start
| Start StartOptions
| Database StoreCmd
| Delete
data StoreCmd = SCImport | SCExport
newtype StartOptions = StartOptions
{ confirmMigrations :: MigrationConfirmation
}
data InitOptions = InitOptions
{ enableStoreLog :: Bool,
signAlgorithm :: SignAlgorithm,
@@ -308,7 +352,8 @@ cliCommandP cfgPath logPath iniFile =
hsubparser
( command "init" (info (Init <$> initP) (progDesc $ "Initialize server - creates " <> cfgPath <> " and " <> logPath <> " directories and configuration files"))
<> command "cert" (info (OnlineCert <$> certOptionsP) (progDesc $ "Generate new online TLS server credentials (configuration: " <> iniFile <> ")"))
<> command "start" (info (pure Start) (progDesc $ "Start server (configuration: " <> iniFile <> ")"))
<> command "start" (info (Start <$> startOptsP) (progDesc $ "Start server (configuration: " <> iniFile <> ")"))
<> command "database" (info (Database <$> storeCmdP) (progDesc "Import/export file store to/from PostgreSQL database"))
<> command "delete" (info (pure Delete) (progDesc "Delete configuration and log files"))
)
where
@@ -375,3 +420,20 @@ cliCommandP cfgPath logPath iniFile =
<> metavar "PATH"
)
pure InitOptions {enableStoreLog, signAlgorithm, ip, fqdn, filesPath, fileSizeQuota, webStaticPath}
startOptsP :: Parser StartOptions
startOptsP = do
confirmMigrations <-
option
parseConfirmMigrations
( long "confirm-migrations"
<> metavar "CONFIRM_MIGRATIONS"
<> help "Confirm PostgreSQL database migration: up, down (default is manual confirmation)"
<> value MCConsole
)
pure StartOptions {confirmMigrations}
storeCmdP :: Parser StoreCmd
storeCmdP =
hsubparser
( command "import" (info (pure SCImport) (progDesc "Import store log file into PostgreSQL database"))
<> command "export" (info (pure SCExport) (progDesc "Export PostgreSQL database to store log file"))
)
+134 -101
View File
@@ -1,51 +1,53 @@
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE TypeFamilies #-}
module Simplex.FileTransfer.Server.Store
( FileStore (..),
( FSType (..),
SFSType (..),
FileStoreClass (..),
FileRec (..),
FileRecipient (..),
STMFileStore (..),
RoundedFileTime,
newFileStore,
addFile,
setFilePath,
addRecipient,
deleteFile,
blockFile,
deleteRecipient,
expiredFilePath,
getFile,
ackFile,
fileTimePrecision,
)
where
import Data.Kind (Type)
import Control.Concurrent.STM
import Control.Monad
import Control.Monad (forM, void)
import qualified Data.Attoparsec.ByteString.Char8 as A
import Data.Int (Int64)
import qualified Data.Map.Strict as M
import Data.Maybe (catMaybes)
import Data.Set (Set)
import qualified Data.Set as S
import Data.Word (Word32)
import Simplex.FileTransfer.Protocol (FileInfo (..), SFileParty (..), XFTPFileId)
import Simplex.FileTransfer.Transport (XFTPErrorType (..))
import qualified Simplex.Messaging.Crypto as C
import Simplex.Messaging.Encoding.String
import Simplex.Messaging.Protocol (BlockingInfo, RcvPublicAuthKey, RecipientId, SenderId)
import Simplex.Messaging.Server.QueueStore (ServerEntityStatus (..))
import Simplex.Messaging.Server.StoreLog (StoreLog, closeStoreLog)
import System.IO (IOMode (..))
import Simplex.Messaging.SystemTime
import Simplex.Messaging.TMap (TMap)
import qualified Simplex.Messaging.TMap as TM
import Simplex.Messaging.Util (ifM, ($>>=))
import Simplex.Messaging.Util (ifM)
data FileStore = FileStore
{ files :: TMap SenderId FileRec,
recipients :: TMap RecipientId (SenderId, RcvPublicAuthKey),
usedStorage :: TVar Int64
}
data FSType = FSMemory | FSPostgres
data SFSType :: FSType -> Type where
SFSMemory :: SFSType 'FSMemory
SFSPostgres :: SFSType 'FSPostgres
data FileRec = FileRec
{ senderId :: SenderId,
@@ -59,28 +61,126 @@ data FileRec = FileRec
type RoundedFileTime = RoundedSystemTime 3600
fileTimePrecision :: Int64
fileTimePrecision = 3600 -- truncate creation time to 1 hour
fileTimePrecision = 3600
data FileRecipient = FileRecipient RecipientId RcvPublicAuthKey
data FileRecipient = FileRecipient RecipientId C.APublicAuthKey
deriving (Show)
instance StrEncoding FileRecipient where
strEncode (FileRecipient rId rKey) = strEncode rId <> ":" <> strEncode rKey
strP = FileRecipient <$> strP <* A.char ':' <*> strP
newFileStore :: IO FileStore
newFileStore = do
files <- TM.emptyIO
recipients <- TM.emptyIO
usedStorage <- newTVarIO 0
pure FileStore {files, recipients, usedStorage}
class FileStoreClass s where
type FileStoreConfig s
newFileStore :: FileStoreConfig s -> IO s
closeFileStore :: s -> IO ()
addFile :: s -> SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> IO (Either XFTPErrorType ())
setFilePath :: s -> SenderId -> FilePath -> IO (Either XFTPErrorType ())
addRecipient :: s -> SenderId -> FileRecipient -> IO (Either XFTPErrorType ())
deleteFile :: s -> SenderId -> IO (Either XFTPErrorType ())
deleteFiles :: s -> [SenderId] -> IO ()
deleteFiles s = mapM_ (void . deleteFile s)
blockFile :: s -> SenderId -> BlockingInfo -> Bool -> IO (Either XFTPErrorType ())
deleteRecipient :: s -> RecipientId -> FileRec -> IO ()
getFile :: s -> SFileParty p -> XFTPFileId -> IO (Either XFTPErrorType (FileRec, C.APublicAuthKey))
ackFile :: s -> RecipientId -> IO (Either XFTPErrorType ())
expiredFiles :: s -> Int64 -> Int -> IO [(SenderId, Maybe FilePath, Word32)]
getUsedStorage :: s -> IO Int64
getFileCount :: s -> IO Int
addFile :: FileStore -> SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> STM (Either XFTPErrorType ())
addFile FileStore {files} sId fileInfo createdAt status =
ifM (TM.member sId files) (pure $ Left DUPLICATE_) $ do
f <- newFileRec sId fileInfo createdAt status
TM.insert sId f files
pure $ Right ()
-- STM in-memory store
data STMFileStore = STMFileStore
{ files :: TMap SenderId FileRec,
recipients :: TMap RecipientId (SenderId, RcvPublicAuthKey),
stmStoreLog :: TVar (Maybe (StoreLog 'WriteMode))
}
instance FileStoreClass STMFileStore where
type FileStoreConfig STMFileStore = ()
newFileStore () = do
files <- TM.emptyIO
recipients <- TM.emptyIO
stmStoreLog <- newTVarIO Nothing
pure STMFileStore {files, recipients, stmStoreLog}
closeFileStore STMFileStore {stmStoreLog} = readTVarIO stmStoreLog >>= mapM_ closeStoreLog
addFile STMFileStore {files} sId fileInfo createdAt status = atomically $
ifM (TM.member sId files) (pure $ Left DUPLICATE_) $ do
f <- newFileRec sId fileInfo createdAt status
TM.insert sId f files
pure $ Right ()
setFilePath st sId fPath = atomically $
withFile st sId $ \FileRec {filePath, fileStatus} -> do
readTVar filePath >>= \case
Just _ -> pure $ Left AUTH
Nothing ->
readTVar fileStatus >>= \case
EntityActive -> do
writeTVar filePath (Just fPath)
pure $ Right ()
_ -> pure $ Left AUTH
addRecipient st@STMFileStore {recipients} senderId (FileRecipient rId rKey) = atomically $
withFile st senderId $ \FileRec {recipientIds} -> do
rIds <- readTVar recipientIds
mem <- TM.member rId recipients
if rId `S.member` rIds || mem
then pure $ Left DUPLICATE_
else do
writeTVar recipientIds $! S.insert rId rIds
TM.insert rId (senderId, rKey) recipients
pure $ Right ()
deleteFile STMFileStore {files, recipients} senderId = atomically $ do
TM.lookupDelete senderId files >>= \case
Just FileRec {recipientIds} -> do
readTVar recipientIds >>= mapM_ (`TM.delete` recipients)
pure $ Right ()
_ -> pure $ Left AUTH
blockFile st senderId info _deleted = atomically $
withFile st senderId $ \FileRec {fileStatus} -> do
writeTVar fileStatus $! EntityBlocked info
pure $ Right ()
deleteRecipient STMFileStore {recipients} rId FileRec {recipientIds} = atomically $ do
TM.delete rId recipients
modifyTVar' recipientIds $ S.delete rId
getFile st party fId = atomically $ case party of
SFSender -> withFile st fId $ pure . Right . (\f -> (f, sndKey $ fileInfo f))
SFRecipient ->
TM.lookup fId (recipients st) >>= \case
Just (sId, rKey) -> withFile st sId $ pure . Right . (,rKey)
_ -> pure $ Left AUTH
ackFile st@STMFileStore {recipients} recipientId = atomically $ do
TM.lookupDelete recipientId recipients >>= \case
Just (sId, _) ->
withFile st sId $ \FileRec {recipientIds} -> do
modifyTVar' recipientIds $ S.delete recipientId
pure $ Right ()
_ -> pure $ Left AUTH
expiredFiles STMFileStore {files} old _limit = do
fs <- readTVarIO files
fmap catMaybes . forM (M.toList fs) $ \(sId, FileRec {fileInfo = FileInfo {size}, filePath, createdAt = RoundedSystemTime createdAt}) ->
if createdAt + fileTimePrecision < old
then do
path <- readTVarIO filePath
pure $ Just (sId, path, size)
else pure Nothing
getUsedStorage STMFileStore {files} =
M.foldl' (\acc FileRec {fileInfo = FileInfo {size}} -> acc + fromIntegral size) 0 <$> readTVarIO files
getFileCount STMFileStore {files} = M.size <$> readTVarIO files
-- Internal STM helpers
newFileRec :: SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> STM FileRec
newFileRec senderId fileInfo createdAt status = do
@@ -89,75 +189,8 @@ newFileRec senderId fileInfo createdAt status = do
fileStatus <- newTVar status
pure FileRec {senderId, fileInfo, filePath, recipientIds, createdAt, fileStatus}
setFilePath :: FileStore -> SenderId -> FilePath -> STM (Either XFTPErrorType ())
setFilePath st sId fPath =
withFile st sId $ \FileRec {fileInfo, filePath} -> do
writeTVar filePath (Just fPath)
modifyTVar' (usedStorage st) (+ fromIntegral (size fileInfo))
pure $ Right ()
addRecipient :: FileStore -> SenderId -> FileRecipient -> STM (Either XFTPErrorType ())
addRecipient st@FileStore {recipients} senderId (FileRecipient rId rKey) =
withFile st senderId $ \FileRec {recipientIds} -> do
rIds <- readTVar recipientIds
mem <- TM.member rId recipients
if rId `S.member` rIds || mem
then pure $ Left DUPLICATE_
else do
writeTVar recipientIds $! S.insert rId rIds
TM.insert rId (senderId, rKey) recipients
pure $ Right ()
-- this function must be called after the file is deleted from the file system
deleteFile :: FileStore -> SenderId -> STM (Either XFTPErrorType ())
deleteFile FileStore {files, recipients, usedStorage} senderId = do
TM.lookupDelete senderId files >>= \case
Just FileRec {fileInfo, recipientIds} -> do
readTVar recipientIds >>= mapM_ (`TM.delete` recipients)
modifyTVar' usedStorage $ subtract (fromIntegral $ size fileInfo)
pure $ Right ()
_ -> pure $ Left AUTH
-- this function must be called after the file is deleted from the file system
blockFile :: FileStore -> SenderId -> BlockingInfo -> Bool -> STM (Either XFTPErrorType ())
blockFile st@FileStore {usedStorage} senderId info deleted =
withFile st senderId $ \FileRec {fileInfo, fileStatus} -> do
when deleted $ modifyTVar' usedStorage $ subtract (fromIntegral $ size fileInfo)
writeTVar fileStatus $! EntityBlocked info
pure $ Right ()
deleteRecipient :: FileStore -> RecipientId -> FileRec -> STM ()
deleteRecipient FileStore {recipients} rId FileRec {recipientIds} = do
TM.delete rId recipients
modifyTVar' recipientIds $ S.delete rId
getFile :: FileStore -> SFileParty p -> XFTPFileId -> STM (Either XFTPErrorType (FileRec, C.APublicAuthKey))
getFile st party fId = case party of
SFSender -> withFile st fId $ pure . Right . (\f -> (f, sndKey $ fileInfo f))
SFRecipient ->
TM.lookup fId (recipients st) >>= \case
Just (sId, rKey) -> withFile st sId $ pure . Right . (,rKey)
_ -> pure $ Left AUTH
expiredFilePath :: FileStore -> XFTPFileId -> Int64 -> STM (Maybe (Maybe FilePath))
expiredFilePath FileStore {files} sId old =
TM.lookup sId files
$>>= \FileRec {filePath, createdAt = RoundedSystemTime createdAt} ->
if createdAt + fileTimePrecision < old
then Just <$> readTVar filePath
else pure Nothing
ackFile :: FileStore -> RecipientId -> STM (Either XFTPErrorType ())
ackFile st@FileStore {recipients} recipientId = do
TM.lookupDelete recipientId recipients >>= \case
Just (sId, _) ->
withFile st sId $ \FileRec {recipientIds} -> do
modifyTVar' recipientIds $ S.delete recipientId
pure $ Right ()
_ -> pure $ Left AUTH
withFile :: FileStore -> SenderId -> (FileRec -> STM (Either XFTPErrorType a)) -> STM (Either XFTPErrorType a)
withFile FileStore {files} sId a =
withFile :: STMFileStore -> SenderId -> (FileRec -> STM (Either XFTPErrorType a)) -> STM (Either XFTPErrorType a)
withFile STMFileStore {files} sId a =
TM.lookup sId files >>= \case
Just f -> a f
_ -> pure $ Left AUTH
@@ -0,0 +1,370 @@
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeFamilies #-}
module Simplex.FileTransfer.Server.Store.Postgres
( PostgresFileStore (..),
importFileStore,
exportFileStore,
)
where
import qualified Control.Exception as E
import Control.Logger.Simple
import Control.Monad
import Control.Monad.Except
import Control.Monad.IO.Class
import Control.Monad.Trans.Except (throwE)
import Data.ByteString (ByteString)
import qualified Data.ByteString.Char8 as B
import Data.ByteString.Builder (Builder)
import qualified Data.ByteString.Builder as BB
import qualified Data.ByteString.Lazy as LB
import Data.Functor (($>))
import Data.Int (Int32, Int64)
import Data.List (intersperse)
import qualified Data.List.NonEmpty as L
import qualified Data.Map.Strict as M
import qualified Data.Set as S
import Data.Text (Text)
import Data.Word (Word32)
import Database.PostgreSQL.Simple (Binary (..), In (..), Only (..), SqlError, (:.) (..))
import qualified Database.PostgreSQL.Simple as DB
import qualified Database.PostgreSQL.Simple.Copy as DB
import Database.PostgreSQL.Simple.Errors (ConstraintViolation (..), constraintViolation)
import Database.PostgreSQL.Simple.ToField (Action (..), ToField (..))
import GHC.IO (catchAny)
import Simplex.FileTransfer.Protocol (FileInfo (..), SFileParty (..))
import Simplex.FileTransfer.Server.Store
import Simplex.FileTransfer.Server.Store.Postgres.Config
import Simplex.FileTransfer.Server.Store.Postgres.Migrations (xftpServerMigrations)
import Simplex.FileTransfer.Server.StoreLog
import Simplex.FileTransfer.Transport (XFTPErrorType (..))
import Simplex.Messaging.Agent.Store.Postgres (closeDBStore, createDBStore)
import Simplex.Messaging.Agent.Store.Postgres.Common (DBStore, withTransaction)
import Simplex.Messaging.Agent.Store.Postgres.Options (DBOpts (..))
import Simplex.Messaging.Agent.Store.Shared (MigrationConfig (..), MigrationConfirmation (..))
import qualified Simplex.Messaging.Crypto as C
import Simplex.Messaging.Protocol (RcvPublicAuthKey, RecipientId, SenderId)
import Simplex.Messaging.Transport (EntityId (..))
import Simplex.Messaging.Server.QueueStore (ServerEntityStatus (..))
import Simplex.Messaging.Server.QueueStore.Postgres ()
import Simplex.Messaging.Server.StoreLog (openWriteStoreLog)
import Simplex.Messaging.Util (firstRow, tshow)
import System.Directory (renameFile)
import System.Exit (exitFailure)
import System.IO (IOMode (..), hFlush, stdout)
import UnliftIO.STM
data PostgresFileStore = PostgresFileStore
{ dbStore :: DBStore,
dbStoreLog :: Maybe (StoreLog 'WriteMode)
}
instance FileStoreClass PostgresFileStore where
type FileStoreConfig PostgresFileStore = PostgresFileStoreCfg
newFileStore PostgresFileStoreCfg {dbOpts, dbStoreLogPath, confirmMigrations} = do
dbStore <- either err pure =<< createDBStore dbOpts xftpServerMigrations (MigrationConfig confirmMigrations Nothing)
dbStoreLog <- mapM (openWriteStoreLog True) dbStoreLogPath
pure PostgresFileStore {dbStore, dbStoreLog}
where
err e = do
logError $ "STORE: newFileStore, error opening PostgreSQL database, " <> tshow e
exitFailure
closeFileStore PostgresFileStore {dbStore, dbStoreLog} = do
closeDBStore dbStore
mapM_ closeStoreLog dbStoreLog
addFile st sId fileInfo@FileInfo {sndKey, size, digest} createdAt status =
E.uninterruptibleMask_ $ runExceptT $ do
void $ withDB "addFile" st $ \db ->
E.try
( DB.execute
db
"INSERT INTO files (sender_id, file_size, file_digest, sender_key, created_at, status) VALUES (?,?,?,?,?,?)"
(sId, (fromIntegral size :: Int32), Binary digest, Binary (C.encodePubKey sndKey), createdAt, status)
)
>>= either handleDuplicate (pure . Right)
withLog "addFile" st $ \s -> logAddFile s sId fileInfo createdAt status
setFilePath st sId fPath = E.uninterruptibleMask_ $ runExceptT $ do
assertUpdated $ withDB' "setFilePath" st $ \db ->
DB.execute db "UPDATE files SET file_path = ? WHERE sender_id = ? AND file_path IS NULL AND status = 'active'" (fPath, sId)
withLog "setFilePath" st $ \s -> logPutFile s sId fPath
addRecipient st senderId (FileRecipient rId rKey) = E.uninterruptibleMask_ $ runExceptT $ do
void $ withDB "addRecipient" st $ \db ->
E.try
( DB.execute
db
"INSERT INTO recipients (recipient_id, sender_id, recipient_key) VALUES (?,?,?)"
(rId, senderId, Binary (C.encodePubKey rKey))
)
>>= either handleDuplicate (pure . Right)
withLog "addRecipient" st $ \s -> logAddRecipients s senderId (pure $ FileRecipient rId rKey)
deleteFile st sId = E.uninterruptibleMask_ $ runExceptT $ do
assertUpdated $ withDB' "deleteFile" st $ \db ->
DB.execute db "DELETE FROM files WHERE sender_id = ?" (Only sId)
withLog "deleteFile" st $ \s -> logDeleteFile s sId
deleteFiles st sIds = E.uninterruptibleMask_ $ do
withTransaction (dbStore st) $ \db ->
DB.execute db "DELETE FROM files WHERE sender_id IN ?" (Only (In sIds))
withLog "deleteFiles" st $ \s -> mapM_ (logDeleteFile s) sIds
blockFile st sId info _deleted = E.uninterruptibleMask_ $ runExceptT $ do
assertUpdated $ withDB' "blockFile" st $ \db ->
DB.execute db "UPDATE files SET status = ? WHERE sender_id = ?" (EntityBlocked info, sId)
withLog "blockFile" st $ \s -> logBlockFile s sId info
deleteRecipient st rId _fr =
void $ runExceptT $ withDB' "deleteRecipient" st $ \db ->
DB.execute db "DELETE FROM recipients WHERE recipient_id = ?" (Only rId)
getFile st party fId = runExceptT $ case party of
SFSender -> do
row <- loadFileRow "SELECT sender_id, file_size, file_digest, sender_key, file_path, created_at, status FROM files WHERE sender_id = ?"
fr <- ExceptT $ rowToFileRec row
pure (fr, sndKey (fileInfo fr))
SFRecipient -> do
row :. Only rcpKeyBs <-
loadFileRow
"SELECT f.sender_id, f.file_size, f.file_digest, f.sender_key, f.file_path, f.created_at, f.status, r.recipient_key FROM files f JOIN recipients r ON r.sender_id = f.sender_id WHERE r.recipient_id = ?"
fr <- ExceptT $ rowToFileRec row
rcpKey <- either (const $ throwE INTERNAL) pure $ C.decodePubKey rcpKeyBs
pure (fr, rcpKey)
where
loadFileRow :: DB.FromRow r => DB.Query -> ExceptT XFTPErrorType IO r
loadFileRow q =
withDB "getFile" st $ \db ->
firstRow id AUTH $ DB.query db q (Only fId)
ackFile st rId = E.uninterruptibleMask_ $ runExceptT $ do
assertUpdated $ withDB' "ackFile" st $ \db ->
DB.execute db "DELETE FROM recipients WHERE recipient_id = ?" (Only rId)
withLog "ackFile" st $ \s -> logAckFile s rId
expiredFiles st old limit =
fmap toResult $ withTransaction (dbStore st) $ \db ->
DB.query
db
"SELECT sender_id, file_path, file_size FROM files WHERE created_at + ? < ? ORDER BY created_at LIMIT ?"
(fileTimePrecision, old, limit)
where
toResult :: [(SenderId, Maybe FilePath, Int32)] -> [(SenderId, Maybe FilePath, Word32)]
toResult = map (\(sId, path, size) -> (sId, path, fromIntegral size))
getUsedStorage st =
withTransaction (dbStore st) $ \db -> do
[Only total] <- DB.query_ db "SELECT COALESCE(SUM(file_size::BIGINT), 0)::BIGINT FROM files"
pure total
getFileCount st =
withTransaction (dbStore st) $ \db -> do
[Only count] <- DB.query_ db "SELECT COUNT(*) FROM files"
pure (fromIntegral (count :: Int64))
-- Internal helpers
mkFileRec :: SenderId -> FileInfo -> Maybe FilePath -> RoundedFileTime -> ServerEntityStatus -> IO FileRec
mkFileRec senderId fileInfo path createdAt status = do
filePath <- newTVarIO path
recipientIds <- newTVarIO S.empty
fileStatus <- newTVarIO status
pure FileRec {senderId, fileInfo, filePath, recipientIds, createdAt, fileStatus}
type FileRecRow = (SenderId, Int32, ByteString, ByteString, Maybe FilePath, RoundedFileTime, ServerEntityStatus)
rowToFileRec :: FileRecRow -> IO (Either XFTPErrorType FileRec)
rowToFileRec (sId, size, digest, sndKeyBs, path, createdAt, status) =
case C.decodePubKey sndKeyBs of
Right sndKey -> do
let fileInfo = FileInfo {sndKey, size = fromIntegral size, digest}
Right <$> mkFileRec sId fileInfo path createdAt status
Left _ -> pure $ Left INTERNAL
-- DB helpers
withDB :: forall a. Text -> PostgresFileStore -> (DB.Connection -> IO (Either XFTPErrorType a)) -> ExceptT XFTPErrorType IO a
withDB op st action =
ExceptT $ E.try (withTransaction (dbStore st) action) >>= either logErr pure
where
logErr :: E.SomeException -> IO (Either XFTPErrorType a)
logErr e = logError ("STORE: " <> err) $> Left INTERNAL
where
err = op <> ", withDB, " <> tshow e
withDB' :: Text -> PostgresFileStore -> (DB.Connection -> IO a) -> ExceptT XFTPErrorType IO a
withDB' op st action = withDB op st $ fmap Right . action
assertUpdated :: ExceptT XFTPErrorType IO Int64 -> ExceptT XFTPErrorType IO ()
assertUpdated = (>>= \n -> when (n == 0) (throwE AUTH))
handleDuplicate :: SqlError -> IO (Either XFTPErrorType a)
handleDuplicate e = case constraintViolation e of
Just (UniqueViolation _) -> pure $ Left DUPLICATE_
Just (ForeignKeyViolation _ _) -> pure $ Left AUTH
_ -> E.throwIO e
withLog :: MonadIO m => Text -> PostgresFileStore -> (StoreLog 'WriteMode -> IO ()) -> m ()
withLog op PostgresFileStore {dbStoreLog} action =
forM_ dbStoreLog $ \sl -> liftIO $ action sl `catchAny` \e ->
logWarn $ "STORE: " <> op <> ", withLog, " <> tshow e
-- Import: StoreLog -> PostgreSQL
importFileStore :: FilePath -> PostgresFileStoreCfg -> IO ()
importFileStore storeLogFilePath dbCfg = do
putStrLn $ "Reading store log: " <> storeLogFilePath
stmStore <- newFileStore () :: IO STMFileStore
sl <- readWriteFileStore storeLogFilePath stmStore
closeStoreLog sl
allFiles <- readTVarIO (files stmStore)
allRcps <- readTVarIO (recipients stmStore)
let fileCount = M.size allFiles
rcpCount = M.size allRcps
putStrLn $ "Loaded " <> show fileCount <> " files, " <> show rcpCount <> " recipients."
let dbCfg' = dbCfg {dbOpts = (dbOpts dbCfg) {createSchema = True}, confirmMigrations = MCYesUp}
pgStore <- newFileStore dbCfg' :: IO PostgresFileStore
existingCount <- getFileCount pgStore
when (existingCount > 0) $ do
putStrLn $ "WARNING: database already contains " <> show existingCount <> " files. Import will fail on duplicate keys."
putStrLn "Drop the existing schema first or use a fresh database."
exitFailure
putStrLn "Importing files..."
fCnt <- withTransaction (dbStore pgStore) $ \db -> do
DB.copy_
db
"COPY files (sender_id, file_size, file_digest, sender_key, file_path, created_at, status) FROM STDIN WITH (FORMAT csv)"
iforM_ (M.toList allFiles) $ \i (sId, fr) -> do
DB.putCopyData db =<< fileRecToCSV sId fr
when (i > 0 && i `mod` 10000 == 0) $ putStr (" " <> show i <> " files\r") >> hFlush stdout
DB.putCopyEnd db
[Only cnt] <- DB.query_ db "SELECT COUNT(*) FROM files"
pure (cnt :: Int64)
putStrLn $ "Imported " <> show fCnt <> " files."
putStrLn "Importing recipients..."
rCnt <- withTransaction (dbStore pgStore) $ \db -> do
DB.copy_
db
"COPY recipients (recipient_id, sender_id, recipient_key) FROM STDIN WITH (FORMAT csv)"
iforM_ (M.toList allRcps) $ \i (rId, (sId, rKey)) -> do
DB.putCopyData db $ recipientToCSV rId sId rKey
when (i > 0 && i `mod` 10000 == 0) $ putStr (" " <> show i <> " recipients\r") >> hFlush stdout
DB.putCopyEnd db
[Only cnt] <- DB.query_ db "SELECT COUNT(*) FROM recipients"
pure (cnt :: Int64)
putStrLn $ "Imported " <> show rCnt <> " recipients."
when (fromIntegral fileCount /= fCnt) $
putStrLn $ "WARNING: expected " <> show fileCount <> " files, got " <> show fCnt
when (fromIntegral rcpCount /= rCnt) $
putStrLn $ "WARNING: expected " <> show rcpCount <> " recipients, got " <> show rCnt
closeFileStore pgStore
renameFile storeLogFilePath (storeLogFilePath <> ".bak")
putStrLn $ "Store log renamed to " <> storeLogFilePath <> ".bak"
-- Export: PostgreSQL -> StoreLog
exportFileStore :: FilePath -> PostgresFileStoreCfg -> IO ()
exportFileStore storeLogFilePath dbCfg = do
pgStore <- newFileStore dbCfg :: IO PostgresFileStore
sl <- openWriteStoreLog False storeLogFilePath
-- Fold 1: stream files, write FNEW + FPUT per file
putStrLn "Exporting files..."
!fCnt <- withTransaction (dbStore pgStore) $ \db ->
DB.fold_
db
"SELECT sender_id, file_size, file_digest, sender_key, file_path, created_at, status FROM files ORDER BY created_at"
(0 :: Int)
( \(!fc) (sId, size :: Int32, digest :: ByteString, sndKeyBs :: ByteString, path :: Maybe String, createdAt, status) ->
case C.decodePubKey sndKeyBs of
Right sndKey -> do
let fileInfo = FileInfo {sndKey, size = fromIntegral size, digest}
logAddFile sl sId fileInfo createdAt status
forM_ path $ logPutFile sl sId
pure (fc + 1)
Left _ -> do
putStrLn $ "WARNING: invalid sender key for " <> show sId
pure fc
)
-- Fold 2: stream recipients ordered by sender_id, flush FADD on sender change
putStrLn "Exporting recipients..."
!rCnt <- withTransaction (dbStore pgStore) $ \db ->
DB.fold_
db
"SELECT sender_id, recipient_id, recipient_key FROM recipients ORDER BY sender_id"
(Nothing :: Maybe SenderId, [] :: [FileRecipient], 0 :: Int)
( \(!prevSId, !buf, !rc) (sId, rId, rKeyBs :: ByteString) ->
case C.decodePubKey rKeyBs of
Right rKey -> do
let rcp = FileRecipient rId rKey
case prevSId of
Just prev | prev /= sId -> do
forM_ (L.nonEmpty buf) $ logAddRecipients sl prev
pure (Just sId, [rcp], rc + length buf)
_ -> pure (Just sId, rcp : buf, rc)
Left _ -> putStrLn ("WARNING: invalid recipient key for " <> show rId) $> (prevSId, buf, rc)
)
>>= \(lastSId, buf, rc) -> do
forM_ lastSId $ \sId -> forM_ (L.nonEmpty buf) $ logAddRecipients sl sId
pure (rc + length buf)
closeStoreLog sl
closeFileStore pgStore
putStrLn $ "Exported " <> show fCnt <> " files, " <> show rCnt <> " recipients to " <> storeLogFilePath
-- CSV helpers for COPY protocol
iforM_ :: Monad m => [a] -> (Int -> a -> m ()) -> m ()
iforM_ xs f = zipWithM_ f [0 ..] xs
fileRecToCSV :: SenderId -> FileRec -> IO ByteString
fileRecToCSV sId FileRec {fileInfo = FileInfo {sndKey, size, digest}, filePath, createdAt, fileStatus} = do
path <- readTVarIO filePath
status <- readTVarIO fileStatus
pure $ LB.toStrict $ BB.toLazyByteString $ mconcat (BB.char7 ',' `intersperse` fields path status) <> BB.char7 '\n'
where
fields path status =
[ renderField (toField (Binary (unEntityId sId))),
renderField (toField (fromIntegral size :: Int32)),
renderField (toField (Binary digest)),
renderField (toField (Binary (C.encodePubKey sndKey))),
nullable (toField <$> path),
renderField (toField createdAt),
quotedField (toField status)
]
recipientToCSV :: RecipientId -> SenderId -> RcvPublicAuthKey -> ByteString
recipientToCSV rId sId rKey =
LB.toStrict $ BB.toLazyByteString $ mconcat (BB.char7 ',' `intersperse` fields) <> BB.char7 '\n'
where
fields =
[ renderField (toField (Binary (unEntityId rId))),
renderField (toField (Binary (unEntityId sId))),
renderField (toField (Binary (C.encodePubKey rKey)))
]
renderField :: Action -> Builder
renderField = \case
Plain bld -> bld
Escape s -> BB.byteString s
EscapeByteA s -> BB.string7 "\\x" <> BB.byteStringHex s
EscapeIdentifier s -> BB.byteString s
Many as -> mconcat (map renderField as)
nullable :: Maybe Action -> Builder
nullable = maybe mempty renderField
quotedField :: Action -> Builder
quotedField a = BB.char7 '"' <> escapeQuotes (renderField a) <> BB.char7 '"'
where
escapeQuotes bld =
let bs = LB.toStrict $ BB.toLazyByteString bld
in BB.byteString $ B.concatMap (\c -> if c == '"' then "\"\"" else B.singleton c) bs
@@ -0,0 +1,25 @@
{-# LANGUAGE OverloadedStrings #-}
module Simplex.FileTransfer.Server.Store.Postgres.Config
( PostgresFileStoreCfg (..),
defaultXFTPDBOpts,
)
where
import Simplex.Messaging.Agent.Store.Postgres.Options (DBOpts (..))
import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation)
data PostgresFileStoreCfg = PostgresFileStoreCfg
{ dbOpts :: DBOpts,
dbStoreLogPath :: Maybe FilePath,
confirmMigrations :: MigrationConfirmation
}
defaultXFTPDBOpts :: DBOpts
defaultXFTPDBOpts =
DBOpts
{ connstr = "postgresql://xftp@/xftp_server_store",
schema = "xftp_server",
poolSize = 10,
createSchema = False
}
@@ -0,0 +1,47 @@
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
module Simplex.FileTransfer.Server.Store.Postgres.Migrations
( xftpServerMigrations,
)
where
import Data.List (sortOn)
import Data.Text (Text)
import Simplex.Messaging.Agent.Store.Shared
import Text.RawString.QQ (r)
xftpSchemaMigrations :: [(String, Text, Maybe Text)]
xftpSchemaMigrations =
[ ("20260325_initial", m20260325_initial, Nothing)
]
-- | The list of migrations in ascending order by date
xftpServerMigrations :: [Migration]
xftpServerMigrations = sortOn name $ map migration xftpSchemaMigrations
where
migration (name, up, down) = Migration {name, up, down = down}
m20260325_initial :: Text
m20260325_initial =
[r|
CREATE TABLE files (
sender_id BYTEA NOT NULL PRIMARY KEY,
file_size INTEGER NOT NULL,
file_digest BYTEA NOT NULL,
sender_key BYTEA NOT NULL,
file_path TEXT,
created_at BIGINT NOT NULL,
status TEXT NOT NULL DEFAULT 'active'
);
CREATE TABLE recipients (
recipient_id BYTEA NOT NULL PRIMARY KEY,
sender_id BYTEA NOT NULL REFERENCES files ON DELETE CASCADE,
recipient_key BYTEA NOT NULL
);
CREATE INDEX idx_recipients_sender_id ON recipients (sender_id);
CREATE INDEX idx_files_created_at ON files (created_at);
|]
+10 -6
View File
@@ -10,6 +10,7 @@ module Simplex.FileTransfer.Server.StoreLog
FileStoreLogRecord (..),
closeStoreLog,
readWriteFileStore,
writeFileStore,
logAddFile,
logPutFile,
logAddRecipients,
@@ -32,6 +33,7 @@ import Data.Map.Strict (Map)
import qualified Data.Map.Strict as M
import Simplex.FileTransfer.Protocol (FileInfo (..))
import Simplex.FileTransfer.Server.Store
import Simplex.FileTransfer.Transport (XFTPErrorType (..))
import Simplex.Messaging.Encoding.String
import Simplex.Messaging.Protocol (BlockingInfo, RcvPublicAuthKey, RecipientId, SenderId)
import Simplex.Messaging.Server.QueueStore (ServerEntityStatus (..))
@@ -87,20 +89,22 @@ logBlockFile s fId = logFileStoreRecord s . BlockFile fId
logAckFile :: StoreLog 'WriteMode -> RecipientId -> IO ()
logAckFile s = logFileStoreRecord s . AckFile
readWriteFileStore :: FilePath -> FileStore -> IO (StoreLog 'WriteMode)
readWriteFileStore :: FilePath -> STMFileStore -> IO (StoreLog 'WriteMode)
readWriteFileStore = readWriteStoreLog readFileStore writeFileStore
readFileStore :: FilePath -> FileStore -> IO ()
readFileStore :: FilePath -> STMFileStore -> IO ()
readFileStore f st = mapM_ (addFileLogRecord . LB.toStrict) . LB.lines =<< LB.readFile f
where
addFileLogRecord s = case strDecode s of
Left e -> B.putStrLn $ "Log parsing error (" <> B.pack e <> "): " <> B.take 100 s
Right lr ->
atomically (addToStore lr) >>= \case
addToStore lr >>= \case
Left e -> B.putStrLn $ "Log processing error (" <> bshow e <> "): " <> B.take 100 s
_ -> pure ()
addToStore = \case
AddFile sId file createdAt status -> addFile st sId file createdAt status
AddFile sId file createdAt status
| size file > 0 -> addFile st sId file createdAt status
| otherwise -> pure $ Left SIZE
PutFile qId path -> setFilePath st qId path
AddRecipients sId rcps -> runExceptT $ addRecipients sId rcps
DeleteFile sId -> deleteFile st sId
@@ -108,8 +112,8 @@ readFileStore f st = mapM_ (addFileLogRecord . LB.toStrict) . LB.lines =<< LB.re
AckFile rId -> ackFile st rId
addRecipients sId rcps = mapM_ (ExceptT . addRecipient st sId) rcps
writeFileStore :: StoreLog 'WriteMode -> FileStore -> IO ()
writeFileStore s FileStore {files, recipients} = do
writeFileStore :: StoreLog 'WriteMode -> STMFileStore -> IO ()
writeFileStore s STMFileStore {files, recipients} = do
allRcps <- readTVarIO recipients
readTVarIO files >>= mapM_ (logFile allRcps)
where
+9 -4
View File
@@ -230,7 +230,7 @@ import qualified Data.Attoparsec.ByteString.Char8 as A
import Data.Bifunctor (bimap, first)
import Data.ByteArray (ByteArrayAccess)
import qualified Data.ByteArray as BA
import Data.ByteString.Base64 (decode, encode)
import Data.ByteString.Base64 (decode)
import qualified Data.ByteString.Base64.URL as U
import Data.ByteString.Char8 (ByteString)
import qualified Data.ByteString.Char8 as B
@@ -805,17 +805,22 @@ data ASignature
deriving instance Show ASignature
class CryptoSignature s where
serializeSignature :: s -> ByteString
serializeSignature = encode . signatureBytes
signatureBytes :: s -> ByteString
decodeSignature :: ByteString -> Either String s
instance CryptoSignature (Signature s) => StrEncoding (Signature s) where
strEncode = serializeSignature
strEncode = strEncode . signatureBytes
{-# INLINE strEncode #-}
strDecode = decodeSignature
{-# INLINE strDecode #-}
instance CryptoSignature (Signature s) => ToJSON (Signature s) where
toJSON = strToJSON
toEncoding = strToJEncoding
instance CryptoSignature (Signature s) => FromJSON (Signature s) where
parseJSON = strParseJSON "Signature"
instance CryptoSignature (Signature s) => Encoding (Signature s) where
smpEncode = smpEncode . signatureBytes
{-# INLINE smpEncode #-}
@@ -165,50 +165,50 @@ ntfServerCLI cfgPath logPath =
\# This option enables saving memory to append only log,\n\
\# and restoring it when the server is started.\n\
\# Log is compacted on start (deleted objects are removed).\n"
<> ("enable: " <> onOff enableStoreLog <> "\n\n")
<> ("enable = " <> onOff enableStoreLog <> "\n\n")
<> "# Database connection settings for PostgreSQL database.\n"
<> iniDbOpts dbOptions defaultNtfDBOpts
<> "Time to retain deleted entities in the database, days.\n"
<> ("# db_deleted_ttl: " <> tshow defaultDeletedTTL <> "\n\n")
<> "log_stats: off\n\n\
<> "# Time to retain deleted entities in the database, days.\n"
<> ("# db_deleted_ttl = " <> tshow defaultDeletedTTL <> "\n\n")
<> "log_stats = off\n\n\
\# Log interval for real-time Prometheus metrics\n\
\# prometheus_interval: 60\n\
\# prometheus_interval = 60\n\
\\n\
\[AUTH]\n\
\# control_port_admin_password:\n\
\# control_port_user_password:\n\
\# control_port_admin_password =\n\
\# control_port_user_password =\n\
\\n\
\[TRANSPORT]\n\
\# Host is only used to print server address on start.\n\
\# You can specify multiple server ports.\n"
<> ("host: " <> T.pack host <> "\n")
<> ("port: " <> T.pack defaultServerPort <> "\n")
<> "log_tls_errors: off\n\n\
\# Use `websockets: 443` to run websockets server in addition to plain TLS.\n\
\websockets: off\n\n\
\# control_port: 5227\n\
<> ("host = " <> T.pack host <> "\n")
<> ("port = " <> T.pack defaultServerPort <> "\n")
<> "log_tls_errors = off\n\n\
\# Use `websockets = 443` to run websockets server in addition to plain TLS.\n\
\websockets = off\n\n\
\# control_port = 5227\n\
\\n\
\[SUBSCRIBER]\n\
\# Network configuration for notification server client.\n\
\# `host_mode` can be 'public' (default) or 'onion'.\n\
\# It defines prefferred hostname for destination servers with multiple hostnames.\n\
\# host_mode: public\n\
\# required_host_mode: off\n\n\
\# host_mode = public\n\
\# required_host_mode = off\n\n\
\# SOCKS proxy port for subscribing to SMP servers.\n\
\# You may need a separate instance of SOCKS proxy for incoming single-hop requests.\n\
\# socks_proxy: localhost:9050\n\n\
\# socks_proxy = localhost:9050\n\n\
\# `socks_mode` can be 'onion' for SOCKS proxy to be used for .onion destination hosts only (default)\n\
\# or 'always' to be used for all destination hosts (can be used if it is an .onion server).\n\
\# socks_mode: onion\n\n\
\# socks_mode = onion\n\n\
\# The domain suffixes of the relays you operate (space-separated) to count as separate proxy statistics.\n\
\# own_server_domains: \n\n\
\# own_server_domains = \n\n\
\# User service subscriptions with server certificate\n\n\
\# use_service_credentials: off\n\n\
\# use_service_credentials = off\n\n\
\[INACTIVE_CLIENTS]\n\
\# TTL and interval to check inactive clients\n\
\disconnect: off\n"
<> ("# ttl: " <> tshow (ttl defaultInactiveClientExpiration) <> "\n")
<> ("# check_interval: " <> tshow (checkInterval defaultInactiveClientExpiration) <> "\n")
\disconnect = off\n"
<> ("# ttl = " <> tshow (ttl defaultInactiveClientExpiration) <> "\n")
<> ("# check_interval = " <> tshow (checkInterval defaultInactiveClientExpiration) <> "\n")
enableStoreLog' = settingIsOn "STORE_LOG" "enable"
runServer startOptions ini = do
setLogLevel $ logLevel startOptions
+7 -6
View File
@@ -27,6 +27,7 @@ module Simplex.Messaging.Server.CLI
certOptionsP,
dbOptsP,
startOptionsP,
parseConfirmMigrations,
parseLogLevel,
genOnline,
warnCAPrivateKeyFile,
@@ -288,12 +289,12 @@ startOptionsP = do
<> value MCConsole
)
pure StartOptions {maintenance, compactLog, logLevel, skipWarnings, confirmMigrations}
where
parseConfirmMigrations :: ReadM MigrationConfirmation
parseConfirmMigrations = eitherReader $ \case
"up" -> Right MCYesUp
"down" -> Right MCYesUpDown
_ -> Left "invalid migration confirmation, pass 'up' or 'down'"
parseConfirmMigrations :: ReadM MigrationConfirmation
parseConfirmMigrations = eitherReader $ \case
"up" -> Right MCYesUp
"down" -> Right MCYesUpDown
_ -> Left "invalid migration confirmation, pass 'up' or 'down'"
parseLogLevel :: ReadM LogLevel
parseLogLevel = eitherReader $ \case
+59 -59
View File
@@ -78,101 +78,101 @@ iniFileContent cfgPath logPath opts host basicAuth controlPortPwds =
informationIniContent opts
<> "[STORE_LOG]\n\
\# The server uses memory or PostgreSQL database for persisting queue records.\n\
\# Use `enable: on` to use append-only log to preserve and restore queue records on restart.\n\
\# Use `enable = on` to use append-only log to preserve and restore queue records on restart.\n\
\# Log is compacted on start (deleted objects are removed).\n"
<> ("enable: " <> onOff enableStoreLog <> "\n\n")
<> ("enable = " <> onOff enableStoreLog <> "\n\n")
<> "# Queue storage mode: `memory` or `database` (to store queue records in PostgreSQL database).\n\
\# `memory` - in-memory persistence, with optional append-only log (`enable: on`).\n\
\# `database`- PostgreSQL databass (requires `store_messages: journal`).\n\
\store_queues: memory\n\n\
\# Database connection settings for PostgreSQL database (`store_queues: database`).\n"
\# `memory` - in-memory persistence, with optional append-only log (`enable = on`).\n\
\# `database`- PostgreSQL databass (requires `store_messages = journal`).\n\
\store_queues = memory\n\n\
\# Database connection settings for PostgreSQL database (`store_queues = database`).\n"
<> iniDbOpts dbOptions defaultDBOpts
<> "# Write database changes to store log file\n\
\# db_store_log: off\n\n\
\# db_store_log = off\n\n\
\# Time to retain deleted queues in the database, days.\n"
<> ("# db_deleted_ttl: " <> tshow defaultDeletedTTL <> "\n\n")
<> ("# db_deleted_ttl = " <> tshow defaultDeletedTTL <> "\n\n")
<> "# Message storage mode: `memory` or `journal`.\n\
\store_messages: memory\n\n\
\store_messages = memory\n\n\
\# When store_messages is `memory`, undelivered messages are optionally saved and restored\n\
\# when the server restarts, they are preserved in the .bak file until the next restart.\n"
<> ("restore_messages: " <> onOff enableStoreLog <> "\n\n")
<> ("restore_messages = " <> onOff enableStoreLog <> "\n\n")
<> "# Messages and notifications expiration periods.\n"
<> ("expire_messages_days: " <> tshow defMsgExpirationDays <> "\n")
<> "expire_messages_on_start: on\n\
\expire_messages_on_send: off\n"
<> ("expire_ntfs_hours: " <> tshow defNtfExpirationHours <> "\n\n")
<> ("expire_messages_days = " <> tshow defMsgExpirationDays <> "\n")
<> "expire_messages_on_start = on\n\
\expire_messages_on_send = off\n"
<> ("expire_ntfs_hours = " <> tshow defNtfExpirationHours <> "\n\n")
<> "# Log daily server statistics to CSV file\n"
<> ("log_stats: " <> onOff logStats <> "\n\n")
<> ("log_stats = " <> onOff logStats <> "\n\n")
<> "# Log interval for real-time Prometheus metrics\n\
\# prometheus_interval: 60\n\n\
\# prometheus_interval = 60\n\n\
\[AUTH]\n\
\# Set new_queues option to off to completely prohibit creating new messaging queues.\n\
\# This can be useful when you want to decommission the server, but not all connections are switched yet.\n\
\new_queues: on\n\n\
\new_queues = on\n\n\
\# Use create_password option to enable basic auth to create new messaging queues.\n\
\# The password should be used as part of server address in client configuration:\n\
\# smp://fingerprint:password@host1,host2\n\
\# The password will not be shared with the connecting contacts, you must share it only\n\
\# with the users who you want to allow creating messaging queues on your server.\n"
<> ( let noPassword = "password to create new queues and forward messages (any printable ASCII characters without whitespace, '@', ':' and '/')"
in optDisabled basicAuth <> "create_password: " <> maybe noPassword (safeDecodeUtf8 . strEncode) basicAuth
in optDisabled basicAuth <> "create_password = " <> maybe noPassword (safeDecodeUtf8 . strEncode) basicAuth
)
<> "\n\n"
<> (optDisabled controlPortPwds <> "control_port_admin_password: " <> maybe "" fst controlPortPwds <> "\n")
<> (optDisabled controlPortPwds <> "control_port_user_password: " <> maybe "" snd controlPortPwds <> "\n\n")
<> (optDisabled controlPortPwds <> "control_port_admin_password = " <> maybe "" fst controlPortPwds <> "\n")
<> (optDisabled controlPortPwds <> "control_port_user_password = " <> maybe "" snd controlPortPwds <> "\n\n")
<> "# The limit for queues that can be blocked via control port per day, resets at 0:00 UTC.\n\
\# Set to 0 to disable limit, to -1 to prohibit blocking. Default is 20.\n\
\# daily_block_queue_quota: 20\n\
\# daily_block_queue_quota = 20\n\
\\n\
\[TRANSPORT]\n\
\# Host is only used to print server address on start.\n\
\# You can specify multiple server ports.\n"
<> ("host: " <> T.pack host <> "\n")
<> ("port: " <> defaultServerPorts <> "\n")
<> "log_tls_errors: off\n\n\
\# Use `websockets: 443` to run websockets server in addition to plain TLS.\n\
<> ("host = " <> T.pack host <> "\n")
<> ("port = " <> defaultServerPorts <> "\n")
<> "log_tls_errors = off\n\n\
\# Use `websockets = 443` to run websockets server in addition to plain TLS.\n\
\# This option is deprecated and should be used for testing only.\n\
\# , port 443 should be specified in port above\n\
\websockets: off\n"
<> (optDisabled controlPort <> "control_port: " <> tshow (fromMaybe defaultControlPort controlPort))
\websockets = off\n"
<> (optDisabled controlPort <> "control_port = " <> tshow (fromMaybe defaultControlPort controlPort))
<> "\n\n\
\[PROXY]\n\
\# Network configuration for SMP proxy client.\n\
\# `host_mode` can be 'public' (default) or 'onion'.\n\
\# It defines prefferred hostname for destination servers with multiple hostnames.\n\
\# host_mode: public\n\
\# required_host_mode: off\n\n\
\# host_mode = public\n\
\# required_host_mode = off\n\n\
\# The domain suffixes of the relays you operate (space-separated) to count as separate proxy statistics.\n"
<> (optDisabled ownDomains <> "own_server_domains: " <> maybe "" (safeDecodeUtf8 . strEncode) ownDomains)
<> (optDisabled ownDomains <> "own_server_domains = " <> maybe "" (safeDecodeUtf8 . strEncode) ownDomains)
<> "\n\n\
\# SOCKS proxy port for forwarding messages to destination servers.\n\
\# You may need a separate instance of SOCKS proxy for incoming single-hop requests.\n"
<> (optDisabled socksProxy <> "socks_proxy: " <> maybe "localhost:9050" (safeDecodeUtf8 . strEncode) socksProxy)
<> (optDisabled socksProxy <> "socks_proxy = " <> maybe "localhost:9050" (safeDecodeUtf8 . strEncode) socksProxy)
<> "\n\n\
\# `socks_mode` can be 'onion' for SOCKS proxy to be used for .onion destination hosts only (default)\n\
\# or 'always' to be used for all destination hosts (can be used if it is an .onion server).\n\
\# socks_mode: onion\n\n\
\# socks_mode = onion\n\n\
\# Limit number of threads a client can spawn to process proxy commands in parrallel.\n"
<> ("# client_concurrency: " <> tshow defaultProxyClientConcurrency)
<> ("# client_concurrency = " <> tshow defaultProxyClientConcurrency)
<> "\n\n\
\[INACTIVE_CLIENTS]\n\
\# TTL and interval to check inactive clients\n\
\disconnect: on\n"
<> ("ttl: " <> tshow (ttl defaultInactiveClientExpiration) <> "\n")
<> ("check_interval: " <> tshow (checkInterval defaultInactiveClientExpiration))
\disconnect = on\n"
<> ("ttl = " <> tshow (ttl defaultInactiveClientExpiration) <> "\n")
<> ("check_interval = " <> tshow (checkInterval defaultInactiveClientExpiration))
<> "\n\n\
\[WEB]\n\
\# Set path to generate static mini-site for server information and qr codes/links\n"
<> ("static_path: " <> T.pack (fromMaybe defaultStaticPath webStaticPath) <> "\n\n")
<> ("static_path = " <> T.pack (fromMaybe defaultStaticPath webStaticPath) <> "\n\n")
<> "# Run an embedded server on this port\n\
\# Onion sites can use any port and register it in the hidden service config.\n\
\# Running on a port 80 may require setting process capabilities.\n\
\# http: 8000\n\n\
\# http = 8000\n\n\
\# You can run an embedded TLS web server too if you provide port and cert and key files.\n\
\# Not required for running relay on onion address.\n"
<> (webDisabled <> "https: 443\n")
<> (webDisabled <> "cert: " <> T.pack httpsCertFile <> "\n")
<> (webDisabled <> "key: " <> T.pack httpsKeyFile <> "\n")
<> (webDisabled <> "https = 443\n")
<> (webDisabled <> "cert = " <> T.pack httpsCertFile <> "\n")
<> (webDisabled <> "key = " <> T.pack httpsKeyFile <> "\n")
where
InitOptions {enableStoreLog, dbOptions, socksProxy, ownDomains, controlPort, webStaticPath, disableWeb, logStats} = opts
defaultServerPorts = "5223,443"
@@ -189,53 +189,53 @@ informationIniContent InitOptions {sourceCode, serverInfo} =
\# LICENSE: https://github.com/simplex-chat/simplexmq/blob/stable/LICENSE\n\
\# Include correct source code URI in case the server source code is modified in any way.\n\
\# If any other information fields are present, source code property also MUST be present.\n\n"
<> (optDisabled sourceCode <> "source_code: " <> fromMaybe "URI" sourceCode)
<> (optDisabled sourceCode <> "source_code = " <> fromMaybe "URI" sourceCode)
<> "\n\n\
\# Declaring all below information is optional, any of these fields can be omitted.\n\
\\n\
\# Server usage conditions and amendments.\n\
\# It is recommended to use standard conditions with any amendments in a separate document.\n\
\# usage_conditions: https://github.com/simplex-chat/simplex-chat/blob/stable/PRIVACY.md\n\
\# condition_amendments: link\n\
\# usage_conditions = https://github.com/simplex-chat/simplex-chat/blob/stable/PRIVACY.md\n\
\# condition_amendments = link\n\
\\n\
\# Server location and operator.\n"
<> countryStr "server" serverCountry
<> enitiyStrs "operator" operator
<> (optDisabled website <> "website: " <> fromMaybe "" website)
<> (optDisabled website <> "website = " <> fromMaybe "" website)
<> "\n\n\
\# Administrative contacts.\n\
\# admin_simplex: SimpleX address\n\
\# admin_email:\n\
\# admin_pgp:\n\
\# admin_pgp_fingerprint:\n\
\# admin_simplex = SimpleX address\n\
\# admin_email =\n\
\# admin_pgp =\n\
\# admin_pgp_fingerprint =\n\
\\n\
\# Contacts for complaints and feedback.\n\
\# complaints_simplex: SimpleX address\n\
\# complaints_email:\n\
\# complaints_pgp:\n\
\# complaints_pgp_fingerprint:\n\
\# complaints_simplex = SimpleX address\n\
\# complaints_email =\n\
\# complaints_pgp =\n\
\# complaints_pgp_fingerprint =\n\
\\n\
\# Hosting provider.\n"
<> enitiyStrs "hosting" hosting
<> "\n\
\# Hosting type can be `virtual`, `dedicated`, `colocation`, `owned`\n"
<> ("hosting_type: " <> maybe "virtual" (decodeLatin1 . strEncode) hostingType <> "\n\n")
<> ("hosting_type = " <> maybe "virtual" (decodeLatin1 . strEncode) hostingType <> "\n\n")
where
ServerPublicInfo {operator, website, hosting, hostingType, serverCountry} = serverInfo
countryStr optName country = optDisabled country <> optName <> "_country: " <> fromMaybe "ISO-3166 2-letter code" country <> "\n"
countryStr optName country = optDisabled country <> optName <> "_country = " <> fromMaybe "ISO-3166 2-letter code" country <> "\n"
enitiyStrs optName entity =
optDisabled entity
<> optName
<> ": "
<> " = "
<> maybe "entity (organization or person name)" name entity
<> "\n"
<> countryStr optName (country =<< entity)
iniDbOpts :: DBOpts -> DBOpts -> Text
iniDbOpts DBOpts {connstr, schema, poolSize} DBOpts {connstr = defConnstr, schema = defSchema, poolSize = defPoolSize} =
(optDisabled' (connstr == defConnstr) <> "db_connection: " <> safeDecodeUtf8 connstr <> "\n")
<> (optDisabled' (schema == defSchema) <> "db_schema: " <> safeDecodeUtf8 schema <> "\n")
<> (optDisabled' (poolSize == defPoolSize) <> "db_pool_size: " <> tshow poolSize <> "\n\n")
(optDisabled' (connstr == defConnstr) <> "db_connection = " <> safeDecodeUtf8 connstr <> "\n")
<> (optDisabled' (schema == defSchema) <> "db_schema = " <> safeDecodeUtf8 schema <> "\n")
<> (optDisabled' (poolSize == defPoolSize) <> "db_pool_size = " <> tshow poolSize <> "\n\n")
optDisabled :: Maybe a -> Text
optDisabled = optDisabled' . isNothing
+2 -2
View File
@@ -108,9 +108,9 @@ instance StrEncoding RCSignedInvitation where
mconcat
[ strEncode invitation,
"&ssig=",
strEncode $ C.signatureBytes ssig,
strEncode ssig,
"&idsig=",
strEncode $ C.signatureBytes idsig
strEncode idsig
]
strP = do
+274
View File
@@ -0,0 +1,274 @@
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
module CoreTests.XFTPStoreTests (xftpStoreTests, xftpMigrationTests) where
import Control.Monad
import Data.Word (Word32)
import Simplex.FileTransfer.Protocol (FileInfo (..), SFileParty (..))
import Simplex.FileTransfer.Server.Store
import Simplex.FileTransfer.Server.Store.Postgres (PostgresFileStore, importFileStore, exportFileStore)
import Simplex.FileTransfer.Server.StoreLog (closeStoreLog, readWriteFileStore, writeFileStore)
import Simplex.FileTransfer.Transport (XFTPErrorType (..))
import qualified Simplex.Messaging.Crypto as C
import Simplex.Messaging.Protocol (BlockingInfo (..), BlockingReason (..), EntityId (..))
import Simplex.Messaging.Server.QueueStore (ServerEntityStatus (..))
import Simplex.Messaging.Server.StoreLog (openWriteStoreLog)
import Simplex.Messaging.SystemTime (RoundedSystemTime (..))
import System.Directory (doesFileExist, removeFile)
import Test.Hspec hiding (fit, it)
import UnliftIO.STM
import Util
import XFTPClient (testXFTPPostgresCfg)
xftpStoreTests :: Spec
xftpStoreTests = describe "PostgresFileStore operations" $ do
it "should add and get file by sender" testAddGetFileSender
it "should add and get file by recipient" testAddGetFileRecipient
it "should reject duplicate file" testDuplicateFile
it "should return AUTH for nonexistent file" testGetNonexistent
it "should set file path with IS NULL guard" testSetFilePath
it "should reject duplicate recipient" testDuplicateRecipient
it "should delete file and cascade recipients" testDeleteFileCascade
it "should block file and update status" testBlockFile
it "should ack file reception" testAckFile
it "should return expired files with limit" testExpiredFiles
it "should compute used storage and file count" testStorageAndCount
xftpMigrationTests :: Spec
xftpMigrationTests = describe "XFTP migration round-trip" $ do
it "should export to StoreLog and import back to Postgres preserving data" testMigrationRoundTrip
-- Test helpers
withPgStore :: (PostgresFileStore -> IO ()) -> IO ()
withPgStore test = do
st <- newFileStore testXFTPPostgresCfg :: IO PostgresFileStore
test st
closeFileStore st
testSenderId :: EntityId
testSenderId = EntityId "sender001_______"
testRecipientId :: EntityId
testRecipientId = EntityId "recipient001____"
testFileInfo :: C.APublicAuthKey -> FileInfo
testFileInfo sndKey =
FileInfo
{ sndKey,
size = 128000 :: Word32,
digest = "test_digest_bytes_here___"
}
testCreatedAt :: RoundedFileTime
testCreatedAt = RoundedSystemTime 1000000
-- Tests
testAddGetFileSender :: Expectation
testAddGetFileSender = withPgStore $ \st -> do
g <- C.newRandom
(sk, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sk
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
result <- getFile st SFSender testSenderId
case result of
Right (FileRec {senderId, fileInfo = fi, createdAt}, key) -> do
senderId `shouldBe` testSenderId
sndKey fi `shouldBe` sk
size fi `shouldBe` 128000
createdAt `shouldBe` testCreatedAt
key `shouldBe` sk
Left e -> expectationFailure $ "getFile failed: " <> show e
testAddGetFileRecipient :: Expectation
testAddGetFileRecipient = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
(rcpKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sndKey
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Right ()
result <- getFile st SFRecipient testRecipientId
case result of
Right (FileRec {senderId}, key) -> do
senderId `shouldBe` testSenderId
key `shouldBe` rcpKey
Left e -> expectationFailure $ "getFile failed: " <> show e
testDuplicateFile :: Expectation
testDuplicateFile = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sndKey
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Left DUPLICATE_
testGetNonexistent :: Expectation
testGetNonexistent = withPgStore $ \st -> do
getFile st SFSender testSenderId >>= (`shouldBe` Left AUTH) . fmap (const ())
getFile st SFRecipient testRecipientId >>= (`shouldBe` Left AUTH) . fmap (const ())
testSetFilePath :: Expectation
testSetFilePath = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sndKey
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
setFilePath st testSenderId "/tmp/test_file" `shouldReturn` Right ()
-- Second setFilePath should fail (file_path IS NULL guard)
setFilePath st testSenderId "/tmp/other_file" `shouldReturn` Left AUTH
-- Verify path was set
result <- getFile st SFSender testSenderId
case result of
Right (FileRec {filePath}, _) -> readTVarIO filePath `shouldReturn` Just "/tmp/test_file"
Left e -> expectationFailure $ "getFile failed: " <> show e
testDuplicateRecipient :: Expectation
testDuplicateRecipient = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
(rcpKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sndKey
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Right ()
addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Left DUPLICATE_
testDeleteFileCascade :: Expectation
testDeleteFileCascade = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
(rcpKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sndKey
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Right ()
deleteFile st testSenderId `shouldReturn` Right ()
-- File and recipient should both be gone
getFile st SFSender testSenderId >>= (`shouldBe` Left AUTH) . fmap (const ())
getFile st SFRecipient testRecipientId >>= (`shouldBe` Left AUTH) . fmap (const ())
testBlockFile :: Expectation
testBlockFile = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sndKey
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
let blockInfo = BlockingInfo {reason = BRContent, notice = Nothing}
blockFile st testSenderId blockInfo False `shouldReturn` Right ()
result <- getFile st SFSender testSenderId
case result of
Right (FileRec {fileStatus}, _) -> readTVarIO fileStatus `shouldReturn` EntityBlocked blockInfo
Left e -> expectationFailure $ "getFile failed: " <> show e
testAckFile :: Expectation
testAckFile = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
(rcpKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sndKey
addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Right ()
ackFile st testRecipientId `shouldReturn` Right ()
-- Recipient gone, but file still exists
getFile st SFRecipient testRecipientId >>= (`shouldBe` Left AUTH) . fmap (const ())
result <- getFile st SFSender testSenderId
case result of
Right _ -> pure ()
Left e -> expectationFailure $ "getFile failed: " <> show e
testExpiredFiles :: Expectation
testExpiredFiles = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo = testFileInfo sndKey
oldTime = RoundedSystemTime 100000
newTime = RoundedSystemTime 999999999
-- Add old and new files
addFile st (EntityId "old_file________") fileInfo oldTime EntityActive `shouldReturn` Right ()
void $ setFilePath st (EntityId "old_file________") "/tmp/old"
addFile st (EntityId "new_file________") fileInfo newTime EntityActive `shouldReturn` Right ()
-- Query expired with cutoff that only catches old file
expired <- expiredFiles st 500000 100
length expired `shouldBe` 1
case expired of
[(sId, path, sz)] -> do
sId `shouldBe` EntityId "old_file________"
path `shouldBe` Just "/tmp/old"
sz `shouldBe` 128000
_ -> expectationFailure "expected 1 expired file"
testStorageAndCount :: Expectation
testStorageAndCount = withPgStore $ \st -> do
g <- C.newRandom
(sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
getUsedStorage st `shouldReturn` 0
getFileCount st `shouldReturn` 0
let fileInfo = testFileInfo sndKey
addFile st (EntityId "file_a__________") fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
addFile st (EntityId "file_b__________") fileInfo testCreatedAt EntityActive `shouldReturn` Right ()
getFileCount st `shouldReturn` 2
used <- getUsedStorage st
used `shouldBe` 256000 -- 128000 * 2
-- Migration round-trip test
testMigrationRoundTrip :: Expectation
testMigrationRoundTrip = do
let storeLogPath = "tests/tmp/xftp-migration-test.log"
storeLogPath2 = "tests/tmp/xftp-migration-test2.log"
-- 1. Create STM store with test data
stmStore <- newFileStore () :: IO STMFileStore
g <- C.newRandom
(sndKey1, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
(rcpKey1, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
(sndKey2, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
let fileInfo1 = testFileInfo sndKey1
fileInfo2 = FileInfo {sndKey = sndKey2, size = 64000, digest = "other_digest____________"}
sId1 = EntityId "migration_file_1"
sId2 = EntityId "migration_file_2"
rId1 = EntityId "migration_rcp_1_"
addFile stmStore sId1 fileInfo1 testCreatedAt EntityActive `shouldReturn` Right ()
void $ setFilePath stmStore sId1 "/tmp/file1"
addRecipient stmStore sId1 (FileRecipient rId1 rcpKey1) `shouldReturn` Right ()
let testBlockInfo = BlockingInfo {reason = BRSpam, notice = Nothing}
addFile stmStore sId2 fileInfo2 testCreatedAt (EntityBlocked testBlockInfo) `shouldReturn` Right ()
-- 2. Write to StoreLog
sl <- openWriteStoreLog False storeLogPath
writeFileStore sl stmStore
closeStoreLog sl
-- 3. Import StoreLog to Postgres
importFileStore storeLogPath testXFTPPostgresCfg
-- StoreLog should be renamed to .bak
doesFileExist storeLogPath `shouldReturn` False
doesFileExist (storeLogPath <> ".bak") `shouldReturn` True
-- 4. Export from Postgres back to StoreLog
exportFileStore storeLogPath2 testXFTPPostgresCfg
-- 5. Read exported StoreLog into a new STM store and verify
stmStore2 <- newFileStore () :: IO STMFileStore
sl2 <- readWriteFileStore storeLogPath2 stmStore2
closeStoreLog sl2
-- Verify file 1
result1 <- getFile stmStore2 SFSender sId1
case result1 of
Right (FileRec {fileInfo = fi, filePath, fileStatus}, _) -> do
size fi `shouldBe` 128000
readTVarIO filePath `shouldReturn` Just "/tmp/file1"
readTVarIO fileStatus `shouldReturn` EntityActive
Left e -> expectationFailure $ "getFile sId1 failed: " <> show e
-- Verify recipient
result1r <- getFile stmStore2 SFRecipient rId1
case result1r of
Right (_, key) -> key `shouldBe` rcpKey1
Left e -> expectationFailure $ "getFile rId1 failed: " <> show e
-- Verify file 2 (blocked)
result2 <- getFile stmStore2 SFSender sId2
case result2 of
Right (FileRec {fileInfo = fi, fileStatus}, _) -> do
size fi `shouldBe` 64000
readTVarIO fileStatus `shouldReturn` EntityBlocked (BlockingInfo {reason = BRSpam, notice = Nothing})
Left e -> expectationFailure $ "getFile sId2 failed: " <> show e
-- Cleanup
removeFile (storeLogPath <> ".bak")
removeFile storeLogPath2
+22 -4
View File
@@ -33,7 +33,9 @@ import System.Environment (setEnv)
import Test.Hspec hiding (fit, it)
import Util
import XFTPAgent
import XFTPCLI
import XFTPCLI (xftpCLIFileTests)
import Simplex.FileTransfer.Server.Env (AFStoreType (..))
import Simplex.FileTransfer.Server.Store (SFSType (..))
import XFTPServerTests (xftpServerTests)
import WebTests (webTests)
import XFTPWebTests (xftpWebTests)
@@ -47,12 +49,14 @@ import AgentTests.SchemaDump (schemaDumpTest)
#endif
#if defined(dbServerPostgres)
import CoreTests.XFTPStoreTests (xftpStoreTests, xftpMigrationTests)
import NtfServerTests (ntfServerTests)
import NtfClient (ntfTestServerDBConnectInfo, ntfTestStoreDBOpts)
import PostgresSchemaDump (postgresSchemaDumpTest)
import SMPClient (testServerDBConnectInfo, testStoreDBOpts)
import Simplex.Messaging.Notifications.Server.Store.Migrations (ntfServerMigrations)
import Simplex.Messaging.Server.QueueStore.Postgres.Migrations (serverMigrations)
import XFTPClient (testXFTPDBConnectInfo)
#endif
#if defined(dbPostgres) || defined(dbServerPostgres)
@@ -149,10 +153,24 @@ main = do
describe "SMP proxy, jornal message store" $
before (pure $ ASType SQSMemory SMSJournal) smpProxyTests
describe "XFTP" $ do
describe "XFTP server" xftpServerTests
describe "XFTP server" $
before (pure $ AFSType SFSMemory) xftpServerTests
describe "XFTP file description" fileDescriptionTests
describe "XFTP CLI" xftpCLITests
describe "XFTP agent" xftpAgentTests
describe "XFTP CLI (memory)" $
before (pure $ AFSType SFSMemory) xftpCLIFileTests
describe "XFTP agent" $
before (pure $ AFSType SFSMemory) xftpAgentTests
#if defined(dbServerPostgres)
around_ (postgressBracket testXFTPDBConnectInfo) $ do
describe "XFTP Postgres store operations" xftpStoreTests
describe "XFTP migration round-trip" xftpMigrationTests
describe "XFTP server (PostgreSQL)" $
before (pure $ AFSType SFSPostgres) xftpServerTests
describe "XFTP agent (PostgreSQL)" $
before (pure $ AFSType SFSPostgres) xftpAgentTests
describe "XFTP CLI (PostgreSQL)" $
before (pure $ AFSType SFSPostgres) xftpCLIFileTests
#endif
#if defined(dbPostgres)
describe "XFTP Web Client" $ xftpWebTests (dropAllSchemasExceptSystem testDBConnectInfo)
#else
+92 -82
View File
@@ -26,7 +26,8 @@ import SMPClient (xit'')
import Simplex.FileTransfer.Client (XFTPClientConfig (..))
import Simplex.FileTransfer.Description (FileChunk (..), FileDescription (..), FileDescriptionURI (..), ValidFileDescription, fileDescriptionURI, kb, mb, qrSizeLimit, pattern ValidFileDescription)
import Simplex.FileTransfer.Protocol (FileParty (..))
import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..))
import Simplex.FileTransfer.Server.Env (AFStoreType, XFTPServerConfig (..))
import Simplex.FileTransfer.Server.Store (STMFileStore)
import Simplex.FileTransfer.Transport (XFTPErrorType (AUTH))
import Simplex.FileTransfer.Types (RcvFileId, SndFileId)
import Simplex.Messaging.Agent (AgentClient, testProtocolServer, xftpDeleteRcvFile, xftpDeleteSndFileInternal, xftpDeleteSndFileRemote, xftpReceiveFile, xftpSendDescription, xftpSendFile, xftpStartWorkers)
@@ -54,7 +55,7 @@ import Fixtures
import Simplex.Messaging.Agent.Store.Postgres.Util (dropAllSchemasExceptSystem)
#endif
xftpAgentTests :: Spec
xftpAgentTests :: SpecWith AFStoreType
xftpAgentTests =
around_ testBracket
#if defined(dbPostgres)
@@ -63,35 +64,42 @@ xftpAgentTests =
. describe "agent XFTP API" $ do
it "should send and receive file" $ withXFTPServer testXFTPAgentSendReceive
-- uncomment CPP option slow_servers and run hpack to run this test
xit "should send and receive file with slow server responses" $
xit "should send and receive file with slow server responses" $ \_ ->
withXFTPServerCfg testXFTPServerConfig {responseDelay = 500000} $
\_ -> testXFTPAgentSendReceive
it "should send and receive with encrypted local files" testXFTPAgentSendReceiveEncrypted
it "should send and receive large file with a redirect" testXFTPAgentSendReceiveRedirect
it "should send and receive small file without a redirect" testXFTPAgentSendReceiveNoRedirect
describe "sending and receiving with version negotiation" testXFTPAgentSendReceiveMatrix
it "should resume receiving file after restart" testXFTPAgentReceiveRestore
it "should cleanup rcv tmp path after permanent error" testXFTPAgentReceiveCleanup
it "should resume sending file after restart" testXFTPAgentSendRestore
xit'' "should cleanup snd prefix path after permanent error" testXFTPAgentSendCleanup
describe "sending and receiving with version negotiation" $ beforeWith (const (pure ())) testXFTPAgentSendReceiveMatrix
it "should resume receiving file after restart" $ \_ -> testXFTPAgentReceiveRestore
it "should cleanup rcv tmp path after permanent error" $ \_ -> testXFTPAgentReceiveCleanup
it "should resume sending file after restart" $ \_ -> testXFTPAgentSendRestore
xit'' "should cleanup snd prefix path after permanent error" $ \_ -> testXFTPAgentSendCleanup
it "should delete sent file on server" testXFTPAgentDelete
it "should resume deleting file after restart" testXFTPAgentDeleteRestore
it "should resume deleting file after restart" $ \_ -> testXFTPAgentDeleteRestore
-- TODO when server is fixed to correctly send AUTH error, this test has to be modified to expect AUTH error
it "if file is deleted on server, should limit retries and continue receiving next file" testXFTPAgentDeleteOnServer
it "if file is expired on server, should report error and continue receiving next file" testXFTPAgentExpiredOnServer
it "should request additional recipient IDs when number of recipients exceeds maximum per request" testXFTPAgentRequestAdditionalRecipientIDs
describe "XFTP server test via agent API" $ do
it "should pass without basic auth" $ testXFTPServerTest Nothing (noAuthSrv testXFTPServer2) `shouldReturn` Nothing
it "should pass without basic auth" $ \_ -> testXFTPServerTest Nothing (noAuthSrv testXFTPServer2) `shouldReturn` Nothing
let srv1 = testXFTPServer2 {keyHash = "1234"}
it "should fail with incorrect fingerprint" $ do
it "should fail with incorrect fingerprint" $ \_ -> do
testXFTPServerTest Nothing (noAuthSrv srv1) `shouldReturn` Just (ProtocolTestFailure TSConnect $ BROKER (B.unpack $ strEncode srv1) $ NETWORK NEUnknownCAError)
describe "server with password" $ do
let auth = Just "abcd"
srv = ProtoServerWithAuth testXFTPServer2
authErr = Just (ProtocolTestFailure TSCreateFile $ XFTP (B.unpack $ strEncode testXFTPServer2) AUTH)
it "should pass with correct password" $ testXFTPServerTest auth (srv auth) `shouldReturn` Nothing
it "should fail without password" $ testXFTPServerTest auth (srv Nothing) `shouldReturn` authErr
it "should fail with incorrect password" $ testXFTPServerTest auth (srv $ Just "wrong") `shouldReturn` authErr
it "should pass with correct password" $ \_ -> testXFTPServerTest auth (srv auth) `shouldReturn` Nothing
it "should fail without password" $ \_ -> testXFTPServerTest auth (srv Nothing) `shouldReturn` authErr
it "should fail with incorrect password" $ \_ -> testXFTPServerTest auth (srv $ Just "wrong") `shouldReturn` authErr
testXFTPServerTest :: HasCallStack => Maybe BasicAuth -> XFTPServerWithAuth -> IO (Maybe ProtocolTestFailure)
testXFTPServerTest newFileBasicAuth srv =
withXFTPServerCfg testXFTPServerConfig {newFileBasicAuth, xftpPort = xftpTestPort2} $ \_ ->
-- initially passed server is not running
withAgent 1 agentCfg initAgentServers testDB $ \a ->
testProtocolServer a NRMInteractive 1 srv
rfProgress :: forall m. (HasCallStack, MonadIO m, MonadFail m) => AgentClient -> Int64 -> m ()
rfProgress c expected = loop 0
@@ -135,7 +143,7 @@ testXFTPAgentSendReceive = do
rfId <- runRight $ testReceive rcp rfd originalFilePath
xftpDeleteRcvFile rcp rfId
testXFTPAgentSendReceiveEncrypted :: HasCallStack => IO ()
testXFTPAgentSendReceiveEncrypted :: HasCallStack => AFStoreType -> IO ()
testXFTPAgentSendReceiveEncrypted = withXFTPServer $ do
g <- C.newRandom
filePath <- createRandomFile
@@ -156,7 +164,7 @@ testXFTPAgentSendReceiveEncrypted = withXFTPServer $ do
rfId <- runRight $ testReceiveCF rcp rfd cfArgs originalFilePath
xftpDeleteRcvFile rcp rfId
testXFTPAgentSendReceiveRedirect :: HasCallStack => IO ()
testXFTPAgentSendReceiveRedirect :: HasCallStack => AFStoreType -> IO ()
testXFTPAgentSendReceiveRedirect = withXFTPServer $ do
--- sender
filePathIn <- createRandomFile
@@ -214,7 +222,7 @@ testXFTPAgentSendReceiveRedirect = withXFTPServer $ do
inBytes <- B.readFile filePathIn
B.readFile out `shouldReturn` inBytes
testXFTPAgentSendReceiveNoRedirect :: HasCallStack => IO ()
testXFTPAgentSendReceiveNoRedirect :: HasCallStack => AFStoreType -> IO ()
testXFTPAgentSendReceiveNoRedirect = withXFTPServer $ do
--- sender
let fileSize = mb 5
@@ -272,7 +280,7 @@ testXFTPAgentSendReceiveMatrix = do
newClient = agentCfg
oldServer = withXFTPServerCfgNoALPN
newServer = withXFTPServerCfg
run :: HasCallStack => (HasCallStack => XFTPServerConfig -> (ThreadId -> IO ()) -> IO ()) -> AgentConfig -> AgentConfig -> IO ()
run :: HasCallStack => (HasCallStack => XFTPServerConfig STMFileStore -> (ThreadId -> IO ()) -> IO ()) -> AgentConfig -> AgentConfig -> IO ()
run withServer sender receiver =
withServer testXFTPServerConfig $ \_t -> do
filePath <- createRandomFile_ (kb 319 :: Integer) "testfile"
@@ -498,37 +506,38 @@ testXFTPAgentSendCleanup = withGlobalLogging logCfgNoLogs $ do
doesDirectoryExist prefixPath `shouldReturn` False
doesFileExist encPath `shouldReturn` False
testXFTPAgentDelete :: HasCallStack => IO ()
testXFTPAgentDelete = withGlobalLogging logCfgNoLogs $
withXFTPServer $ do
filePath <- createRandomFile
testXFTPAgentDelete :: HasCallStack => AFStoreType -> IO ()
testXFTPAgentDelete = withGlobalLogging logCfgNoLogs . withXFTPServer test
where
test = do
filePath <- createRandomFile
-- send file
withAgent 1 agentCfg initAgentServers testDB $ \sndr -> do
(sfId, sndDescr, rfd1, rfd2) <- runRight $ testSend sndr filePath
-- send file
withAgent 1 agentCfg initAgentServers testDB $ \sndr -> do
(sfId, sndDescr, rfd1, rfd2) <- runRight $ testSend sndr filePath
-- receive file
withAgent 2 agentCfg initAgentServers testDB2 $ \rcp1 -> do
runRight_ . void $ testReceive rcp1 rfd1 filePath
-- receive file
withAgent 2 agentCfg initAgentServers testDB2 $ \rcp1 -> do
runRight_ . void $ testReceive rcp1 rfd1 filePath
length <$> listDirectory xftpServerFiles `shouldReturn` 6
length <$> listDirectory xftpServerFiles `shouldReturn` 6
-- delete file
runRight_ $ xftpStartWorkers sndr (Just senderFiles)
xftpDeleteSndFileRemote sndr 1 sfId sndDescr
Nothing <- 100000 `timeout` sfGet sndr
pure ()
-- delete file
runRight_ $ xftpStartWorkers sndr (Just senderFiles)
xftpDeleteSndFileRemote sndr 1 sfId sndDescr
Nothing <- 100000 `timeout` sfGet sndr
pure ()
threadDelay 1000000
length <$> listDirectory xftpServerFiles `shouldReturn` 0
threadDelay 1000000
length <$> listDirectory xftpServerFiles `shouldReturn` 0
-- receive file - should fail with AUTH error
withAgent 3 agentCfg initAgentServers testDB2 $ \rcp2 -> runRight $ do
xftpStartWorkers rcp2 (Just recipientFiles)
rfId <- xftpReceiveFile rcp2 1 rfd2 Nothing True
("", rfId', RFERR (XFTP "xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@localhost:8000" AUTH)) <-
rfGet rcp2
liftIO $ rfId' `shouldBe` rfId
-- receive file - should fail with AUTH error
withAgent 3 agentCfg initAgentServers testDB2 $ \rcp2 -> runRight $ do
xftpStartWorkers rcp2 (Just recipientFiles)
rfId <- xftpReceiveFile rcp2 1 rfd2 Nothing True
("", rfId', RFERR (XFTP "xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@localhost:8000" AUTH)) <-
rfGet rcp2
liftIO $ rfId' `shouldBe` rfId
testXFTPAgentDeleteRestore :: HasCallStack => IO ()
testXFTPAgentDeleteRestore = withGlobalLogging logCfgNoLogs $ do
@@ -568,48 +577,48 @@ testXFTPAgentDeleteRestore = withGlobalLogging logCfgNoLogs $ do
rfGet rcp2
liftIO $ rfId' `shouldBe` rfId
testXFTPAgentDeleteOnServer :: HasCallStack => IO ()
testXFTPAgentDeleteOnServer = withGlobalLogging logCfgNoLogs $
withXFTPServer $ do
filePath1 <- createRandomFile' "testfile1"
testXFTPAgentDeleteOnServer :: HasCallStack => AFStoreType -> IO ()
testXFTPAgentDeleteOnServer = withGlobalLogging logCfgNoLogs . withXFTPServer test
where
test = do
filePath1 <- createRandomFile' "testfile1"
-- send file 1
withAgent 1 agentCfg initAgentServers testDB $ \sndr -> do
(_, _, rfd1_1, rfd1_2) <- runRight $ testSend sndr filePath1
-- send file 1
withAgent 1 agentCfg initAgentServers testDB $ \sndr -> do
(_, _, rfd1_1, rfd1_2) <- runRight $ testSend sndr filePath1
-- receive file 1 successfully
withAgent 2 agentCfg initAgentServers testDB2 $ \rcp -> do
runRight_ . void $ testReceive rcp rfd1_1 filePath1
-- receive file 1 successfully
withAgent 2 agentCfg initAgentServers testDB2 $ \rcp -> do
runRight_ . void $ testReceive rcp rfd1_1 filePath1
serverFiles <- listDirectory xftpServerFiles
length serverFiles `shouldBe` 6
serverFiles <- listDirectory xftpServerFiles
length serverFiles `shouldBe` 6
-- delete file 1 on server from file system
forM_ serverFiles (\file -> removeFile (xftpServerFiles </> file))
-- delete file 1 on server from file system
forM_ serverFiles (\file -> removeFile (xftpServerFiles </> file))
threadDelay 1000000
length <$> listDirectory xftpServerFiles `shouldReturn` 0
threadDelay 1000000
length <$> listDirectory xftpServerFiles `shouldReturn` 0
-- create and send file 2
filePath2 <- createRandomFile' "testfile2"
(_, _, rfd2, _) <- runRight $ testSend sndr filePath2
-- create and send file 2
filePath2 <- createRandomFile' "testfile2"
(_, _, rfd2, _) <- runRight $ testSend sndr filePath2
length <$> listDirectory xftpServerFiles `shouldReturn` 6
length <$> listDirectory xftpServerFiles `shouldReturn` 6
runRight_ . void $ do
-- receive file 1 again
rfId1 <- xftpReceiveFile rcp 1 rfd1_2 Nothing True
("", rfId1', RFERR (XFTP "xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@localhost:8000" AUTH)) <-
rfGet rcp
liftIO $ rfId1 `shouldBe` rfId1'
runRight_ . void $ do
-- receive file 1 again
rfId1 <- xftpReceiveFile rcp 1 rfd1_2 Nothing True
("", rfId1', RFERR (XFTP "xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@localhost:8000" AUTH)) <-
rfGet rcp
liftIO $ rfId1 `shouldBe` rfId1'
-- receive file 2
testReceive' rcp rfd2 filePath2
-- receive file 2
testReceive' rcp rfd2 filePath2
testXFTPAgentExpiredOnServer :: HasCallStack => IO ()
testXFTPAgentExpiredOnServer = withGlobalLogging logCfgNoLogs $ do
let fastExpiration = ExpirationConfig {ttl = 2, checkInterval = 1}
withXFTPServerCfg testXFTPServerConfig {fileExpiration = Just fastExpiration} . const $ do
testXFTPAgentExpiredOnServer :: HasCallStack => AFStoreType -> IO ()
testXFTPAgentExpiredOnServer fsType = withGlobalLogging logCfgNoLogs $
withXFTPServerConfigOn (updateXFTPCfg (cfgFS fsType) $ \c -> c {fileExpiration = Just fastExpiration}) . const $ do
filePath1 <- createRandomFile' "testfile1"
-- send file 1
@@ -644,8 +653,10 @@ testXFTPAgentExpiredOnServer = withGlobalLogging logCfgNoLogs $ do
-- receive file 2 successfully
runRight_ . void $ testReceive' rcp rfd2 filePath2
where
fastExpiration = ExpirationConfig {ttl = 2, checkInterval = 1}
testXFTPAgentRequestAdditionalRecipientIDs :: HasCallStack => IO ()
testXFTPAgentRequestAdditionalRecipientIDs :: HasCallStack => AFStoreType -> IO ()
testXFTPAgentRequestAdditionalRecipientIDs = withXFTPServer $ do
filePath <- createRandomFile
@@ -670,9 +681,8 @@ testXFTPAgentRequestAdditionalRecipientIDs = withXFTPServer $ do
void $ testReceive rcp (rfds !! 299) filePath
void $ testReceive rcp (rfds !! 499) filePath
testXFTPServerTest :: HasCallStack => Maybe BasicAuth -> XFTPServerWithAuth -> IO (Maybe ProtocolTestFailure)
testXFTPServerTest newFileBasicAuth srv =
withXFTPServerCfg testXFTPServerConfig {newFileBasicAuth, xftpPort = xftpTestPort2} $ \_ ->
-- initially passed server is not running
withAgent 1 agentCfg initAgentServers testDB $ \a ->
testProtocolServer a NRMInteractive 1 srv
testXFTPServerTest_ :: HasCallStack => XFTPServerWithAuth -> IO (Maybe ProtocolTestFailure)
testXFTPServerTest_ srv =
-- initially passed server is not running
withAgent 1 agentCfg initAgentServers testDB $ \a ->
testProtocolServer a NRMInteractive 1 srv
+17 -14
View File
@@ -1,4 +1,4 @@
module XFTPCLI where
module XFTPCLI (xftpCLIFileTests, xftpCLI, senderFiles, recipientFiles, testBracket) where
import Control.Exception (bracket_)
import qualified Data.ByteString as LB
@@ -11,14 +11,17 @@ import System.FilePath ((</>))
import System.IO.Silently (capture_)
import Test.Hspec hiding (fit, it)
import Util
import XFTPClient (testXFTPServerStr, testXFTPServerStr2, withXFTPServer, withXFTPServer2, xftpServerFiles, xftpServerFiles2)
import Simplex.FileTransfer.Server.Env (AFStoreType)
import XFTPClient (cfgFS, cfgFS2, withXFTPServer, withXFTPServerConfigOn, testXFTPServerStr, testXFTPServerStr2, xftpServerFiles, xftpServerFiles2)
xftpCLITests :: Spec
xftpCLITests = around_ testBracket . describe "XFTP CLI" $ do
it "should send and receive file" testXFTPCLISendReceive
it "should send and receive file with 2 servers" testXFTPCLISendReceive2servers
it "should delete file from 2 servers" testXFTPCLIDelete
it "prepareChunkSizes should use 2 chunk sizes" testPrepareChunkSizes
xftpCLIFileTests :: SpecWith AFStoreType
xftpCLIFileTests = around_ testBracket $ do
it "should send and receive file" $ withXFTPServer testXFTPCLISendReceive_
it "should send and receive file with 2 servers" $ \fsType ->
withXFTPServerConfigOn (cfgFS fsType) $ \_ -> withXFTPServerConfigOn (cfgFS2 fsType) $ \_ -> testXFTPCLISendReceive2servers_
it "should delete file from 2 servers" $ \fsType ->
withXFTPServerConfigOn (cfgFS fsType) $ \_ -> withXFTPServerConfigOn (cfgFS2 fsType) $ \_ -> testXFTPCLIDelete_
it "prepareChunkSizes should use 2 chunk sizes" $ \_ -> testPrepareChunkSizes
testBracket :: IO () -> IO ()
testBracket =
@@ -37,8 +40,8 @@ recipientFiles = "tests/tmp/xftp-recipient-files"
xftpCLI :: [String] -> IO [String]
xftpCLI params = lines <$> capture_ (withArgs params xftpClientCLI)
testXFTPCLISendReceive :: IO ()
testXFTPCLISendReceive = withXFTPServer $ do
testXFTPCLISendReceive_ :: IO ()
testXFTPCLISendReceive_ = do
let filePath = senderFiles </> "testfile"
xftpCLI ["rand", filePath, "17mb"] `shouldReturn` ["File created: " <> filePath]
file <- LB.readFile filePath
@@ -73,8 +76,8 @@ testXFTPCLISendReceive = withXFTPServer $ do
recvResult `shouldBe` ["File description " <> fd <> " is deleted."]
LB.readFile (recipientFiles </> fileName) `shouldReturn` file
testXFTPCLISendReceive2servers :: IO ()
testXFTPCLISendReceive2servers = withXFTPServer . withXFTPServer2 $ do
testXFTPCLISendReceive2servers_ :: IO ()
testXFTPCLISendReceive2servers_ = do
let filePath = senderFiles </> "testfile"
xftpCLI ["rand", filePath, "17mb"] `shouldReturn` ["File created: " <> filePath]
file <- LB.readFile filePath
@@ -111,8 +114,8 @@ testXFTPCLISendReceive2servers = withXFTPServer . withXFTPServer2 $ do
recvResult `shouldBe` ["File description " <> fd <> " is deleted."]
LB.readFile (recipientFiles </> fileName) `shouldReturn` file
testXFTPCLIDelete :: IO ()
testXFTPCLIDelete = withXFTPServer . withXFTPServer2 $ do
testXFTPCLIDelete_ :: IO ()
testXFTPCLIDelete_ = do
let filePath = senderFiles </> "testfile"
xftpCLI ["rand", filePath, "17mb"] `shouldReturn` ["File created: " <> filePath]
file <- LB.readFile filePath
+90 -27
View File
@@ -1,3 +1,5 @@
{-# LANGUAGE CPP #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
@@ -7,6 +9,7 @@
module XFTPClient where
import Control.Concurrent (ThreadId, threadDelay)
import Control.Monad (void)
import Data.String (fromString)
import Data.Time.Clock (getCurrentTime)
import Network.Socket (ServiceName)
@@ -14,48 +17,102 @@ import SMPClient (serverBracket)
import Simplex.FileTransfer.Client
import Simplex.FileTransfer.Description
import Simplex.FileTransfer.Server (runXFTPServerBlocking)
import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), defaultFileExpiration, defaultInactiveClientExpiration)
import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), XFTPStoreConfig (..), AFStoreType (..), defaultFileExpiration, defaultInactiveClientExpiration)
import Simplex.FileTransfer.Server.Store (FileStoreClass, SFSType (..), STMFileStore)
import Simplex.FileTransfer.Transport (alpnSupportedXFTPhandshakes, supportedFileServerVRange)
import Simplex.Messaging.Protocol (XFTPServer)
import Simplex.Messaging.Transport.HTTP2 (httpALPN)
import Simplex.Messaging.Transport.Server
import Test.Hspec hiding (fit, it)
#if defined(dbServerPostgres)
import qualified Database.PostgreSQL.Simple as PSQL
import Database.PostgreSQL.Simple (ConnectInfo (..), defaultConnectInfo)
import Simplex.FileTransfer.Server.Store.Postgres.Config (PostgresFileStoreCfg (..), defaultXFTPDBOpts)
import Simplex.Messaging.Agent.Store.Postgres.Options (DBOpts (..))
import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation (..))
#endif
xftpTest :: HasCallStack => (HasCallStack => XFTPClient -> IO ()) -> Expectation
xftpTest test = runXFTPTest test `shouldReturn` ()
data AXFTPServerConfig = forall s. FileStoreClass s => AXFTPSrvCfg (XFTPServerConfig s)
xftpTestN :: HasCallStack => Int -> (HasCallStack => [XFTPClient] -> IO ()) -> Expectation
xftpTestN n test = runXFTPTestN n test `shouldReturn` ()
updateXFTPCfg :: AXFTPServerConfig -> (forall s. XFTPServerConfig s -> XFTPServerConfig s) -> AXFTPServerConfig
updateXFTPCfg (AXFTPSrvCfg cfg) f = AXFTPSrvCfg (f cfg)
xftpTest2 :: HasCallStack => (HasCallStack => XFTPClient -> XFTPClient -> IO ()) -> Expectation
cfgFS :: AFStoreType -> AXFTPServerConfig
cfgFS (AFSType fs) = case fs of
SFSMemory -> AXFTPSrvCfg testXFTPServerConfig
#if defined(dbServerPostgres)
SFSPostgres -> AXFTPSrvCfg testXFTPServerConfig {serverStoreCfg = XSCDatabase testXFTPPostgresCfg}
#endif
cfgFS2 :: AFStoreType -> AXFTPServerConfig
cfgFS2 (AFSType fs) = case fs of
SFSMemory -> AXFTPSrvCfg testXFTPServerConfig2
#if defined(dbServerPostgres)
SFSPostgres -> AXFTPSrvCfg testXFTPServerConfig2 {serverStoreCfg = XSCDatabase testXFTPPostgresCfg}
#endif
withXFTPServerConfigOn :: HasCallStack => AXFTPServerConfig -> (HasCallStack => ThreadId -> IO a) -> IO a
withXFTPServerConfigOn (AXFTPSrvCfg cfg) = withXFTPServerCfg cfg
#if defined(dbServerPostgres)
testXFTPDBConnectInfo :: ConnectInfo
testXFTPDBConnectInfo =
defaultConnectInfo
{ connectUser = "test_xftp_server_user",
connectDatabase = "test_xftp_server_db"
}
testXFTPPostgresCfg :: PostgresFileStoreCfg
testXFTPPostgresCfg =
PostgresFileStoreCfg
{ dbOpts = defaultXFTPDBOpts
{ connstr = "postgresql://test_xftp_server_user@/test_xftp_server_db",
schema = "xftp_server_test",
poolSize = 10,
createSchema = True
},
dbStoreLogPath = Nothing,
confirmMigrations = MCYesUp
}
clearXFTPPostgresStore :: IO ()
clearXFTPPostgresStore = do
let DBOpts {connstr} = dbOpts testXFTPPostgresCfg
conn <- PSQL.connectPostgreSQL connstr
void $ PSQL.execute_ conn "SET search_path TO xftp_server_test,public"
void $ PSQL.execute_ conn "DELETE FROM files"
PSQL.close conn
#endif
xftpTest :: HasCallStack => (HasCallStack => XFTPClient -> IO ()) -> AFStoreType -> Expectation
xftpTest test fsType = withXFTPServerConfigOn (cfgFS fsType) (\_ -> testXFTPClient test) `shouldReturn` ()
xftpTestN :: HasCallStack => Int -> (HasCallStack => [XFTPClient] -> IO ()) -> AFStoreType -> Expectation
xftpTestN nClients test fsType = withXFTPServerConfigOn (cfgFS fsType) (\_ -> run nClients []) `shouldReturn` ()
where
run :: Int -> [XFTPClient] -> IO ()
run 0 hs = test hs
run n hs = testXFTPClient $ \h -> run (n - 1) (h : hs)
xftpTest2 :: HasCallStack => (HasCallStack => XFTPClient -> XFTPClient -> IO ()) -> AFStoreType -> Expectation
xftpTest2 test = xftpTestN 2 _test
where
_test [h1, h2] = test h1 h2
_test _ = error "expected 2 handles"
xftpTest4 :: HasCallStack => (HasCallStack => XFTPClient -> XFTPClient -> XFTPClient -> XFTPClient -> IO ()) -> Expectation
xftpTest4 :: HasCallStack => (HasCallStack => XFTPClient -> XFTPClient -> XFTPClient -> XFTPClient -> IO ()) -> AFStoreType -> Expectation
xftpTest4 test = xftpTestN 4 _test
where
_test [h1, h2, h3, h4] = test h1 h2 h3 h4
_test _ = error "expected 4 handles"
runXFTPTest :: HasCallStack => (HasCallStack => XFTPClient -> IO a) -> IO a
runXFTPTest test = withXFTPServer $ testXFTPClient test
runXFTPTestN :: forall a. HasCallStack => Int -> (HasCallStack => [XFTPClient] -> IO a) -> IO a
runXFTPTestN nClients test = withXFTPServer $ run nClients []
where
run :: Int -> [XFTPClient] -> IO a
run 0 hs = test hs
run n hs = testXFTPClient $ \h -> run (n - 1) (h : hs)
withXFTPServerStoreLogOn :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a
withXFTPServerStoreLogOn = withXFTPServerCfg testXFTPServerConfig {storeLogFile = Just testXFTPLogFile, serverStatsBackupFile = Just testXFTPStatsBackupFile}
withXFTPServerStoreLogOn = withXFTPServerCfg testXFTPServerConfig {serverStoreCfg = XSCMemory (Just testXFTPLogFile), storeLogFile = Just testXFTPLogFile, serverStatsBackupFile = Just testXFTPStatsBackupFile}
withXFTPServerCfgNoALPN :: HasCallStack => XFTPServerConfig -> (HasCallStack => ThreadId -> IO a) -> IO a
withXFTPServerCfgNoALPN :: (HasCallStack, FileStoreClass s) => XFTPServerConfig s -> (HasCallStack => ThreadId -> IO a) -> IO a
withXFTPServerCfgNoALPN cfg = withXFTPServerCfg cfg {transportConfig = (transportConfig cfg) {serverALPN = Nothing}}
withXFTPServerCfg :: HasCallStack => XFTPServerConfig -> (HasCallStack => ThreadId -> IO a) -> IO a
withXFTPServerCfg :: (HasCallStack, FileStoreClass s) => XFTPServerConfig s -> (HasCallStack => ThreadId -> IO a) -> IO a
withXFTPServerCfg cfg =
serverBracket
(\started -> runXFTPServerBlocking started cfg)
@@ -64,11 +121,13 @@ withXFTPServerCfg cfg =
withXFTPServerThreadOn :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a
withXFTPServerThreadOn = withXFTPServerCfg testXFTPServerConfig
withXFTPServer :: HasCallStack => IO a -> IO a
withXFTPServer = withXFTPServerCfg testXFTPServerConfig . const
withXFTPServer :: HasCallStack => IO a -> AFStoreType -> IO a
withXFTPServer test fsType = withXFTPServerConfigOn (cfgFS fsType) $ const test
withXFTPServer2 :: HasCallStack => IO a -> IO a
withXFTPServer2 = withXFTPServerCfg testXFTPServerConfig {xftpPort = xftpTestPort2, filesPath = xftpServerFiles2} . const
withXFTPServer2 :: HasCallStack => IO a -> AFStoreType -> IO a
withXFTPServer2 test fsType = withXFTPServerConfigOn (cfgFS2 fsType) $ const test
-- Constants
xftpTestPort :: ServiceName
xftpTestPort = "8000"
@@ -103,12 +162,13 @@ testXFTPStatsBackupFile = "tests/tmp/xftp-server-stats.log"
xftpTestPrometheusMetricsFile :: FilePath
xftpTestPrometheusMetricsFile = "tests/tmp/xftp-server-metrics.txt"
testXFTPServerConfig :: XFTPServerConfig
testXFTPServerConfig :: XFTPServerConfig STMFileStore
testXFTPServerConfig =
XFTPServerConfig
{ xftpPort = xftpTestPort,
controlPort = Nothing,
fileIdSize = 16,
serverStoreCfg = XSCMemory Nothing,
storeLogFile = Nothing,
filesPath = xftpServerFiles,
fileSizeQuota = Nothing,
@@ -139,6 +199,9 @@ testXFTPServerConfig =
webStaticPath = Nothing
}
testXFTPServerConfig2 :: XFTPServerConfig STMFileStore
testXFTPServerConfig2 = testXFTPServerConfig {xftpPort = xftpTestPort2, filesPath = xftpServerFiles2}
testXFTPClientConfig :: XFTPClientConfig
testXFTPClientConfig = defaultXFTPClientConfig
@@ -152,7 +215,7 @@ testXFTPClientWith cfg client = do
Right c -> client c
Left e -> error $ show e
testXFTPServerConfigSNI :: XFTPServerConfig
testXFTPServerConfigSNI :: XFTPServerConfig STMFileStore
testXFTPServerConfigSNI =
testXFTPServerConfig
{ httpCredentials =
@@ -171,7 +234,7 @@ testXFTPServerConfigSNI =
withXFTPServerSNI :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a
withXFTPServerSNI = withXFTPServerCfg testXFTPServerConfigSNI
testXFTPServerConfigEd25519SNI :: XFTPServerConfig
testXFTPServerConfigEd25519SNI :: XFTPServerConfig STMFileStore
testXFTPServerConfigEd25519SNI =
testXFTPServerConfig
{ xftpCredentials =
+27 -26
View File
@@ -6,7 +6,7 @@
{-# LANGUAGE PatternSynonyms #-}
{-# LANGUAGE ScopedTypeVariables #-}
module XFTPServerTests where
module XFTPServerTests (xftpServerTests) where
import AgentTests.FunctionalAPITests (runRight_)
import Control.Concurrent (threadDelay)
@@ -31,7 +31,7 @@ import ServerTests (logSize)
import Simplex.FileTransfer.Client
import Simplex.FileTransfer.Description (kb)
import Simplex.FileTransfer.Protocol (FileInfo (..), XFTPFileId, xftpBlockSize)
import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..))
import Simplex.FileTransfer.Server.Env (AFStoreType, XFTPServerConfig (..))
import Simplex.FileTransfer.Transport (XFTPClientHandshake (..), XFTPClientHello (..), XFTPErrorType (..), XFTPRcvChunkSpec (..), XFTPServerHandshake (..), pattern VersionXFTP)
import Simplex.Messaging.Client (ProtocolClientError (..))
import qualified Simplex.Messaging.Crypto as C
@@ -52,7 +52,7 @@ import UnliftIO.STM
import Util
import XFTPClient
xftpServerTests :: Spec
xftpServerTests :: SpecWith AFStoreType
xftpServerTests =
before_ (createDirectoryIfMissing False xftpServerFiles) . after_ (removeDirectoryRecursive xftpServerFiles) $ do
describe "XFTP file chunk delivery" $ do
@@ -76,7 +76,7 @@ xftpServerTests =
it "allowed with correct basic auth" $ testFileBasicAuth True (Just "pwd") (Just "pwd") True
it "allowed with auth on server without auth" $ testFileBasicAuth True Nothing (Just "any") True
it "should not change content for uploaded and committed files" testFileSkipCommitted
describe "XFTP SNI and CORS" $ do
describe "XFTP SNI and CORS" $ beforeWith (const (pure ())) $ do
it "should select web certificate when SNI is used" testSNICertSelection
it "should select XFTP certificate when SNI is not used" testNoSNICertSelection
it "should add CORS headers when SNI is used" testCORSHeaders
@@ -103,10 +103,10 @@ createTestChunk fp = do
readChunk :: XFTPFileId -> IO ByteString
readChunk sId = B.readFile (xftpServerFiles </> B.unpack (B64.encode $ unEntityId sId))
testFileChunkDelivery :: Expectation
testFileChunkDelivery :: AFStoreType -> Expectation
testFileChunkDelivery = xftpTest $ \c -> runRight_ $ runTestFileChunkDelivery c c
testFileChunkDelivery2 :: Expectation
testFileChunkDelivery2 :: AFStoreType -> Expectation
testFileChunkDelivery2 = xftpTest2 $ \s r -> runRight_ $ runTestFileChunkDelivery s r
runTestFileChunkDelivery :: XFTPClient -> XFTPClient -> ExceptT XFTPClientError IO ()
@@ -129,7 +129,7 @@ runTestFileChunkDelivery s r = do
downloadXFTPChunk g r rpKey rId $ XFTPRcvChunkSpec "tests/tmp/received_chunk1" chSize digest
liftIO $ B.readFile "tests/tmp/received_chunk1" `shouldReturn` bytes
testFileChunkDeliveryAddRecipients :: Expectation
testFileChunkDeliveryAddRecipients :: AFStoreType -> Expectation
testFileChunkDeliveryAddRecipients = xftpTest4 $ \s r1 r2 r3 -> runRight_ $ do
g <- liftIO C.newRandom
(sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
@@ -150,10 +150,10 @@ testFileChunkDeliveryAddRecipients = xftpTest4 $ \s r1 r2 r3 -> runRight_ $ do
testReceiveChunk r2 rpKey2 rId2 "tests/tmp/received_chunk2"
testReceiveChunk r3 rpKey3 rId3 "tests/tmp/received_chunk3"
testFileChunkDelete :: Expectation
testFileChunkDelete :: AFStoreType -> Expectation
testFileChunkDelete = xftpTest $ \c -> runRight_ $ runTestFileChunkDelete c c
testFileChunkDelete2 :: Expectation
testFileChunkDelete2 :: AFStoreType -> Expectation
testFileChunkDelete2 = xftpTest2 $ \s r -> runRight_ $ runTestFileChunkDelete s r
runTestFileChunkDelete :: XFTPClient -> XFTPClient -> ExceptT XFTPClientError IO ()
@@ -179,10 +179,10 @@ runTestFileChunkDelete s r = do
deleteXFTPChunk s spKey sId
`catchError` (liftIO . (`shouldBe` PCEProtocolError AUTH))
testFileChunkAck :: Expectation
testFileChunkAck :: AFStoreType -> Expectation
testFileChunkAck = xftpTest $ \c -> runRight_ $ runTestFileChunkAck c c
testFileChunkAck2 :: Expectation
testFileChunkAck2 :: AFStoreType -> Expectation
testFileChunkAck2 = xftpTest2 $ \s r -> runRight_ $ runTestFileChunkAck s r
runTestFileChunkAck :: XFTPClient -> XFTPClient -> ExceptT XFTPClientError IO ()
@@ -206,7 +206,7 @@ runTestFileChunkAck s r = do
ackXFTPChunk r rpKey rId
`catchError` (liftIO . (`shouldBe` PCEProtocolError AUTH))
testWrongChunkSize :: Expectation
testWrongChunkSize :: AFStoreType -> Expectation
testWrongChunkSize = xftpTest $ \c -> do
g <- C.newRandom
(sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
@@ -218,8 +218,8 @@ testWrongChunkSize = xftpTest $ \c -> do
void (createXFTPChunk c spKey file [rcvKey] Nothing)
`catchError` (liftIO . (`shouldBe` PCEProtocolError SIZE))
testFileChunkExpiration :: Expectation
testFileChunkExpiration = withXFTPServerCfg testXFTPServerConfig {fileExpiration} $
testFileChunkExpiration :: AFStoreType -> Expectation
testFileChunkExpiration fsType = withXFTPServerConfigOn (updateXFTPCfg (cfgFS fsType) $ \c -> c {fileExpiration}) $
\_ -> testXFTPClient $ \c -> runRight_ $ do
g <- liftIO C.newRandom
(sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
@@ -242,8 +242,8 @@ testFileChunkExpiration = withXFTPServerCfg testXFTPServerConfig {fileExpiration
where
fileExpiration = Just ExpirationConfig {ttl = 1, checkInterval = 1}
testInactiveClientExpiration :: Expectation
testInactiveClientExpiration = withXFTPServerCfg testXFTPServerConfig {inactiveClientExpiration} $ \_ -> runRight_ $ do
testInactiveClientExpiration :: AFStoreType -> Expectation
testInactiveClientExpiration fsType = withXFTPServerConfigOn (updateXFTPCfg (cfgFS fsType) $ \c -> c {inactiveClientExpiration}) $ \_ -> runRight_ $ do
disconnected <- newEmptyTMVarIO
ts <- liftIO getCurrentTime
c <- ExceptT $ getXFTPClient (1, testXFTPServer, Nothing) testXFTPClientConfig [] ts (\_ -> atomically $ putTMVar disconnected ())
@@ -258,8 +258,8 @@ testInactiveClientExpiration = withXFTPServerCfg testXFTPServerConfig {inactiveC
where
inactiveClientExpiration = Just ExpirationConfig {ttl = 1, checkInterval = 1}
testFileStorageQuota :: Expectation
testFileStorageQuota = withXFTPServerCfg testXFTPServerConfig {fileSizeQuota = Just $ chSize * 2} $
testFileStorageQuota :: AFStoreType -> Expectation
testFileStorageQuota fsType = withXFTPServerConfigOn (updateXFTPCfg (cfgFS fsType) $ \c -> c {fileSizeQuota = Just $ chSize * 2}) $
\_ -> testXFTPClient $ \c -> runRight_ $ do
g <- liftIO C.newRandom
(sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
@@ -286,8 +286,8 @@ testFileStorageQuota = withXFTPServerCfg testXFTPServerConfig {fileSizeQuota = J
uploadXFTPChunk c spKey sId3 chunkSpec
download rId3
testFileLog :: Expectation
testFileLog = do
testFileLog :: AFStoreType -> Expectation
testFileLog _ = do
g <- C.newRandom
bytes <- liftIO $ createTestChunk testChunkPath
(sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
@@ -378,9 +378,9 @@ testFileLog = do
downloadXFTPChunk g c rpKey rId $ XFTPRcvChunkSpec "tests/tmp/received_chunk1" chSize digest
liftIO $ B.readFile "tests/tmp/received_chunk1" `shouldReturn` bytes
testFileBasicAuth :: Bool -> Maybe BasicAuth -> Maybe BasicAuth -> Bool -> IO ()
testFileBasicAuth allowNewFiles newFileBasicAuth clntAuth success =
withXFTPServerCfg testXFTPServerConfig {allowNewFiles, newFileBasicAuth} $
testFileBasicAuth :: Bool -> Maybe BasicAuth -> Maybe BasicAuth -> Bool -> AFStoreType -> IO ()
testFileBasicAuth allowNewFiles newFileBasicAuth clntAuth success fsType =
withXFTPServerConfigOn (updateXFTPCfg (cfgFS fsType) $ \c -> c {allowNewFiles, newFileBasicAuth}) $
\_ -> testXFTPClient $ \c -> do
g <- C.newRandom
(sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
@@ -400,9 +400,9 @@ testFileBasicAuth allowNewFiles newFileBasicAuth clntAuth success =
void (createXFTPChunk c spKey file [rcvKey] clntAuth)
`catchError` (liftIO . (`shouldBe` PCEProtocolError AUTH))
testFileSkipCommitted :: IO ()
testFileSkipCommitted =
withXFTPServerCfg testXFTPServerConfig $
testFileSkipCommitted :: AFStoreType -> IO ()
testFileSkipCommitted fsType =
withXFTPServerConfigOn (cfgFS fsType) $
\_ -> testXFTPClient $ \c -> do
g <- C.newRandom
(sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g
@@ -598,3 +598,4 @@ testStaleWebSession =
decoded <- either (error . show) pure $ C.unPad respBody
decoded `shouldBe` smpEncode SESSION
+10 -9
View File
@@ -45,6 +45,7 @@ import System.Process (CreateProcess (..), StdStream (..), createProcess, proc,
import Test.Hspec hiding (fit, it)
import Util
import Simplex.FileTransfer.Server.Env (XFTPServerConfig)
import Simplex.FileTransfer.Server.Store (STMFileStore)
import XFTPClient (testXFTPServerConfigEd25519SNI, testXFTPServerConfigSNI, withXFTPServerCfg, xftpTestPort)
import AgentTests.FunctionalAPITests (rfGet, runRight, runRight_, sfGet, withAgent)
import Simplex.Messaging.Agent (AgentClient, xftpReceiveFile, xftpSendFile, xftpStartWorkers)
@@ -2857,7 +2858,7 @@ tsIntegrationTests dbCleanup = describe "integration" $
it "cross-language: Haskell upload, TS download" $
haskellUploadTsDownloadTest testXFTPServerConfigSNI
webHandshakeTest :: XFTPServerConfig -> FilePath -> Expectation
webHandshakeTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation
webHandshakeTest cfg caFile = do
withXFTPServerCfg cfg $ \_ -> do
Fingerprint fp <- loadFileFingerprint caFile
@@ -2898,7 +2899,7 @@ webHandshakeTest cfg caFile = do
<> jsOut "new Uint8Array([idOk ? 1 : 0, ack.length === 0 ? 1 : 0])"
result `shouldBe` B.pack [1, 1]
pingTest :: XFTPServerConfig -> FilePath -> Expectation
pingTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation
pingTest cfg caFile = do
withXFTPServerCfg cfg $ \_ -> do
Fingerprint fp <- loadFileFingerprint caFile
@@ -2920,7 +2921,7 @@ pingTest cfg caFile = do
<> jsOut "new Uint8Array([1])"
result `shouldBe` B.pack [1]
fullRoundTripTest :: XFTPServerConfig -> FilePath -> Expectation
fullRoundTripTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation
fullRoundTripTest cfg caFile = do
createDirectoryIfMissing False "tests/tmp/xftp-server-files"
withXFTPServerCfg cfg $ \_ -> do
@@ -3001,7 +3002,7 @@ agentURIRoundTripTest = do
<> jsOut "new Uint8Array([match])"
result `shouldBe` B.pack [1]
agentUploadDownloadTest :: XFTPServerConfig -> FilePath -> Expectation
agentUploadDownloadTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation
agentUploadDownloadTest cfg caFile = do
createDirectoryIfMissing False "tests/tmp/xftp-server-files"
withXFTPServerCfg cfg $ \_ -> do
@@ -3034,7 +3035,7 @@ agentUploadDownloadTest cfg caFile = do
<> jsOut "new Uint8Array([nameMatch, sizeMatch, dataMatch])"
result `shouldBe` B.pack [1, 1, 1]
agentDeleteTest :: XFTPServerConfig -> FilePath -> Expectation
agentDeleteTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation
agentDeleteTest cfg caFile = do
createDirectoryIfMissing False "tests/tmp/xftp-server-files"
withXFTPServerCfg cfg $ \_ -> do
@@ -3066,7 +3067,7 @@ agentDeleteTest cfg caFile = do
<> jsOut "new Uint8Array([deleted])"
result `shouldBe` B.pack [1]
agentRedirectTest :: XFTPServerConfig -> FilePath -> Expectation
agentRedirectTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation
agentRedirectTest cfg caFile = do
createDirectoryIfMissing False "tests/tmp/xftp-server-files"
withXFTPServerCfg cfg $ \_ -> do
@@ -3100,7 +3101,7 @@ agentRedirectTest cfg caFile = do
<> jsOut "new Uint8Array([hasRedirect, nameMatch, sizeMatch, dataMatch])"
result `shouldBe` B.pack [1, 1, 1, 1]
tsUploadHaskellDownloadTest :: XFTPServerConfig -> FilePath -> Expectation
tsUploadHaskellDownloadTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation
tsUploadHaskellDownloadTest cfg caFile = do
createDirectoryIfMissing False "tests/tmp/xftp-server-files"
createDirectoryIfMissing False recipientFiles
@@ -3135,7 +3136,7 @@ tsUploadHaskellDownloadTest cfg caFile = do
downloadedData <- B.readFile outPath
downloadedData `shouldBe` originalData
tsUploadRedirectHaskellDownloadTest :: XFTPServerConfig -> FilePath -> Expectation
tsUploadRedirectHaskellDownloadTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation
tsUploadRedirectHaskellDownloadTest cfg caFile = do
createDirectoryIfMissing False "tests/tmp/xftp-server-files"
createDirectoryIfMissing False recipientFiles
@@ -3170,7 +3171,7 @@ tsUploadRedirectHaskellDownloadTest cfg caFile = do
downloadedData <- B.readFile outPath
downloadedData `shouldBe` originalData
haskellUploadTsDownloadTest :: XFTPServerConfig -> Expectation
haskellUploadTsDownloadTest :: XFTPServerConfig STMFileStore -> Expectation
haskellUploadTsDownloadTest cfg = do
createDirectoryIfMissing False "tests/tmp/xftp-server-files"
createDirectoryIfMissing False senderFiles
+170
View File
@@ -0,0 +1,170 @@
# XFTP Server Manual Test Suite
Automated integration tests for the XFTP server covering memory and PostgreSQL backends, migration, persistence, blocking, and edge cases.
- `xftp-test.py` — automated test script (143 checks)
- `xftp-server-testing.md` — manual step-by-step guide covering the same scenarios
## Prerequisites
- Linux (tested)
- Python 3
- Haskell toolchain (`cabal`, `ghc`)
- PostgreSQL 16+ (`postgresql-16` package or equivalent)
## Setup
### 1. Build the XFTP binaries
```bash
cabal build -fserver_postgres exe:xftp-server exe:xftp
```
### 2. Set up a local PostgreSQL instance
The test script connects to PostgreSQL via `PGHOST` (Unix socket path). Set up a local instance that you own (no root required):
```bash
# Pick a data directory and socket directory
export PGDATA=/tmp/pgdata
export PGHOST=/tmp/pgsocket
# Clean up any previous instance
rm -rf $PGDATA $PGHOST
mkdir -p $PGDATA $PGHOST
# Initialize the cluster
/usr/lib/postgresql/16/bin/initdb -D $PGDATA --auth=trust --no-locale --encoding=UTF8
# Configure to listen on our socket directory and localhost TCP
echo "unix_socket_directories = '$PGHOST'" >> $PGDATA/postgresql.conf
echo "listen_addresses = '127.0.0.1'" >> $PGDATA/postgresql.conf
# Start the server
/usr/lib/postgresql/16/bin/pg_ctl -D $PGDATA -l /tmp/pg.log start
# Verify it's running
pg_isready -h $PGHOST
# Expected: /tmp/pgsocket:5432 - accepting connections
```
### 3. Create the required PostgreSQL roles
The test script expects three roles to exist:
- `postgres` — admin role used by the test bracket to create/drop databases
- `xftp` — test user for the XFTP server database
```bash
# Create the postgres admin role (if initdb created the cluster as your user)
psql -h $PGHOST -d postgres -c "CREATE USER postgres WITH SUPERUSER;"
# Create the xftp test user
psql -h $PGHOST -U postgres -d postgres -c "CREATE USER xftp WITH SUPERUSER;"
```
Verify both roles exist:
```bash
psql -h $PGHOST -U postgres -d postgres -c "\du"
```
## Run the test suite
```bash
PGHOST=/tmp/pgsocket python3 tests/manual/xftp-test.py
```
Expected output (abbreviated):
```
XFTP server: /project/git/simplexmq-4/dist-newstyle/.../xftp-server
XFTP client: /project/git/simplexmq-4/dist-newstyle/.../xftp
Test dir: /project/git/simplexmq-4/xftp-test
PGHOST: /tmp/pgsocket
=== 1. Basic send/receive (memory) ===
[PASS] 1.1 rcv1.xftp created
...
=== 12. Recipient cascade and storage accounting ===
...
[PASS] 12.2e DB files after delete (0)
==========================================
Results: 143 passed, 0 failed
==========================================
```
Total runtime: ~3 minutes. Exit code 0 on success, 1 on any failure.
## What the suite tests
| # | Section | Checks | Scope |
|---|---------|--------|-------|
| 1 | Basic memory | 9 | Send/recv/delete on STM backend |
| 2 | Basic PostgreSQL | 7 | Send/recv/delete on PG backend, DB row verification |
| 3 | Migration memory → PG | 12 | Send on memory, partial recv, import, recv remaining |
| 4 | Migration PG → memory | 5 | Export, switch to memory, delete exported files |
| 4b | Send PG, recv memory | 7 | Reverse direction — send on PG, export, recv on memory |
| 5 | Restart persistence | 6 | memory+log / memory no log / PostgreSQL |
| 6 | Config edge cases | 15 | store log conflicts, missing schema, dual-write, import/export guards |
| 7 | File blocking | 13 | Control port block, block state survives migration both directions |
| 8 | Migration edge cases | 23 | Acked recipients preserved, deleted files absent, 20MB multi-chunk, double round-trip |
| 9 | Auth & access control | 9 | allowNewFiles, basic auth (none/wrong/correct/server-no-auth), quota |
| 10 | Control port ops | 8 | No auth, wrong auth, stats, delete, invalid block |
| 11 | Blocked sender delete | 3 | Sender can't delete blocked file |
| 12 | Cascade & storage | 8 | Recipient cascade, disk/DB accounting |
## Troubleshooting
### Server binary not found
```
Binary not found: .../xftp-server
Run: cabal build -fserver_postgres exe:xftp-server
```
Run the cabal build command from step 1.
### Cannot connect to PostgreSQL
```
Cannot connect to PostgreSQL as postgres. Is it running?
```
Check:
1. `pg_isready -h $PGHOST` returns "accepting connections"
2. `PGHOST` environment variable is exported in the shell running the test
3. The `postgres` role exists: `psql -h $PGHOST -U postgres -d postgres -c "SELECT 1;"`
### PostgreSQL user 'xftp' does not exist
```
PostgreSQL user 'xftp' does not exist.
Run: psql -U postgres -c "CREATE USER xftp WITH SUPERUSER;"
```
Run the create-user command from step 3.
### Port 7921 or 15230 already in use
The test uses port 7921 for XFTP and 15230 for the control port. If these are occupied, stop whatever is using them or edit `PORT` / `CONTROL_PORT` constants at the top of `xftp-test.py`.
### Server fails to start mid-test
Check `xftp-test/server.log` in the project directory for the server's stdout/stderr. The test framework prints the last 5 lines of the log on startup failure.
## Stopping the test PostgreSQL instance
```bash
/usr/lib/postgresql/16/bin/pg_ctl -D /tmp/pgdata stop
```
## Cleanup
The test script cleans up its own test directory (`./xftp-test/`) and drops the test database (`xftp_server_store`) on completion. To also remove the PostgreSQL instance:
```bash
/usr/lib/postgresql/16/bin/pg_ctl -D /tmp/pgdata stop
rm -rf /tmp/pgdata /tmp/pgsocket /tmp/pg.log
```
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff