persistent client sessions

This commit is contained in:
Evgeny @ SimpleX Chat
2026-02-04 16:05:17 +00:00
parent af3a183cda
commit 41d474f0d4
8 changed files with 266 additions and 168 deletions

1
.gitignore vendored
View File

@@ -11,3 +11,4 @@ cabal.project.local~
.hpc/
*.tix
.coverage

View File

@@ -0,0 +1,53 @@
# XFTPClientAgent Pattern
## TOC
1. Executive Summary
2. Changes: client.ts
3. Changes: agent.ts
4. Changes: test/browser.test.ts
5. Verification
## Executive Summary
Add `XFTPClientAgent` — a per-server connection pool matching the Haskell pattern. The agent caches `XFTPClient` instances by server URL. All orchestration functions (`uploadFile`, `downloadFile`, `deleteFile`) take `agent` as first parameter and use `getXFTPServerClient(agent, server)` instead of calling `connectXFTP` directly. Connections stay open on success; the caller creates and closes the agent.
`connectXFTP` and `closeXFTP` stay exported (used by `XFTPWebTests.hs` Haskell tests). The `browserClients` hack, per-function `connections: Map`, and `getOrConnect` are deleted.
## Changes: client.ts
**Add** after types section: `XFTPClientAgent` interface, `newXFTPAgent`, `getXFTPServerClient`, `closeXFTPServerClient`, `closeXFTPAgent`.
**Delete**: `browserClients` Map and all `isNode` browser-cache checks in `connectXFTP` and `closeXFTP`.
**Revert `closeXFTP`** to unconditional `c.transport.close()` (browser transport.close() is already a no-op).
`connectXFTP` stays exported (backward compat) but becomes a raw low-level function — no caching.
## Changes: agent.ts
**Imports**: replace `connectXFTP`/`closeXFTP` with `getXFTPServerClient`/`closeXFTPAgent` etc.
**Re-export** from agent.ts: `newXFTPAgent`, `closeXFTPAgent`, `XFTPClientAgent`.
**`uploadFile`**: add `agent: XFTPClientAgent` as first param. Replace `connectXFTP``getXFTPServerClient`. Remove `finally { closeXFTP }`. Pass `agent` to `uploadRedirectDescription`.
**`uploadRedirectDescription`**: change from `(client, server, innerFd)` to `(agent, server, innerFd)`. Get client via `getXFTPServerClient`.
**`downloadFile`**: add `agent` param. Delete local `connections: Map`. Replace `getOrConnect``getXFTPServerClient`. Remove finally cleanup. Pass `agent` to `downloadWithRedirect`.
**`downloadWithRedirect`**: add `agent` param. Same replacements. Remove try/catch cleanup. Recursive call passes `agent`.
**`deleteFile`**: add `agent` param. Same pattern.
**Delete**: `getOrConnect` function entirely.
## Changes: test/browser.test.ts
Create agent before operations, pass to upload/download, close in finally.
## Verification
1. `npx vitest --run` — browser round-trip test passes
2. No remaining `browserClients`, `getOrConnect`, or per-function `connections: Map` locals
3. `connectXFTP` and `closeXFTP` still exported (XFTPWebTests.hs compat)
4. All orchestration functions take `agent` as first param

1
xftp-web/.gitignore vendored
View File

@@ -1,2 +1,3 @@
node_modules/
dist/
package-lock.json

50
xftp-web/README.md Normal file
View File

@@ -0,0 +1,50 @@
# xftp-web
Browser-compatible XFTP file transfer client in TypeScript.
## Prerequisites
- Haskell toolchain with `cabal` (to build `xftp-server`)
- Node.js 20+
- Chromium system dependencies (see below)
## Setup
```bash
# Build the XFTP server binary (from repo root)
cabal build xftp-server
# Install JS dependencies
cd xftp-web
npm install
# Install Chromium for Playwright (browser tests)
npx playwright install chromium
```
If Chromium fails to launch due to missing system libraries, install them with:
```bash
# Requires root
npx playwright install-deps chromium
```
## Running tests
```bash
# Browser round-trip test (vitest + Playwright headless Chromium)
npm run test:browser -- --run
# Unit tests (Jest, Node.js)
npm test
```
The browser test automatically starts an `xftp-server` instance on port 7000 via `globalSetup`, using certs from `tests/fixtures/`.
## Build
```bash
npm run build
```
Output goes to `dist/`.

View File

@@ -9,7 +9,7 @@
"postinstall": "ln -sf ../../../libsodium-sumo/dist/modules-sumo-esm/libsodium-sumo.mjs node_modules/libsodium-wrappers-sumo/dist/modules-sumo-esm/libsodium-sumo.mjs",
"build": "tsc",
"test": "node --experimental-vm-modules node_modules/.bin/jest",
"test:browser": "vitest"
"test:browser": "vitest --run"
},
"devDependencies": {
"@types/libsodium-wrappers-sumo": "^0.7.8",

View File

@@ -15,12 +15,13 @@ import {
} from "./protocol/description.js"
import type {FileInfo} from "./protocol/commands.js"
import {
connectXFTP, createXFTPChunk, uploadXFTPChunk, downloadXFTPChunk,
ackXFTPChunk, deleteXFTPChunk, closeXFTP, type XFTPClient
getXFTPServerClient, createXFTPChunk, uploadXFTPChunk, downloadXFTPChunk,
ackXFTPChunk, deleteXFTPChunk, type XFTPClientAgent
} from "./client.js"
export {newXFTPAgent, closeXFTPAgent, type XFTPClientAgent} from "./client.js"
import {processDownloadedFile} from "./download.js"
import type {XFTPServer} from "./protocol/address.js"
import {formatXFTPServer} from "./protocol/address.js"
import {formatXFTPServer, parseXFTPServer} from "./protocol/address.js"
import {concatBytes} from "./protocol/encoding.js"
import type {FileHeader} from "./crypto/file.js"
@@ -93,53 +94,50 @@ export function encryptFileForUpload(source: Uint8Array, fileName: string): Encr
const DEFAULT_REDIRECT_THRESHOLD = 400
export async function uploadFile(
agent: XFTPClientAgent,
server: XFTPServer,
encrypted: EncryptedFileInfo,
onProgress?: (uploaded: number, total: number) => void,
redirectThreshold?: number
): Promise<UploadResult> {
const specs = prepareChunkSpecs(encrypted.chunkSizes)
const client = await connectXFTP(server)
const client = await getXFTPServerClient(agent, server)
const sentChunks: SentChunk[] = []
let uploaded = 0
try {
for (let i = 0; i < specs.length; i++) {
const spec = specs[i]
const chunkNo = i + 1
const sndKp = generateEd25519KeyPair()
const rcvKp = generateEd25519KeyPair()
const chunkData = encrypted.encData.subarray(spec.chunkOffset, spec.chunkOffset + spec.chunkSize)
const chunkDigest = getChunkDigest(chunkData)
const fileInfo: FileInfo = {
sndKey: encodePubKeyEd25519(sndKp.publicKey),
size: spec.chunkSize,
digest: chunkDigest
}
const {senderId, recipientIds} = await createXFTPChunk(
client, sndKp.privateKey, fileInfo, [encodePubKeyEd25519(rcvKp.publicKey)]
)
await uploadXFTPChunk(client, sndKp.privateKey, senderId, chunkData)
sentChunks.push({
chunkNo, senderId, senderKey: sndKp.privateKey,
recipientId: recipientIds[0], recipientKey: rcvKp.privateKey,
chunkSize: spec.chunkSize, digest: chunkDigest, server
})
uploaded += spec.chunkSize
onProgress?.(uploaded, encrypted.encData.length)
for (let i = 0; i < specs.length; i++) {
const spec = specs[i]
const chunkNo = i + 1
const sndKp = generateEd25519KeyPair()
const rcvKp = generateEd25519KeyPair()
const chunkData = encrypted.encData.subarray(spec.chunkOffset, spec.chunkOffset + spec.chunkSize)
const chunkDigest = getChunkDigest(chunkData)
const fileInfo: FileInfo = {
sndKey: encodePubKeyEd25519(sndKp.publicKey),
size: spec.chunkSize,
digest: chunkDigest
}
const rcvDescription = buildDescription("recipient", encrypted, sentChunks)
const sndDescription = buildDescription("sender", encrypted, sentChunks)
let uri = encodeDescriptionURI(rcvDescription)
let finalRcvDescription = rcvDescription
const threshold = redirectThreshold ?? DEFAULT_REDIRECT_THRESHOLD
if (uri.length > threshold && sentChunks.length > 1) {
finalRcvDescription = await uploadRedirectDescription(client, server, rcvDescription)
uri = encodeDescriptionURI(finalRcvDescription)
}
return {rcvDescription: finalRcvDescription, sndDescription, uri}
} finally {
closeXFTP(client)
const {senderId, recipientIds} = await createXFTPChunk(
client, sndKp.privateKey, fileInfo, [encodePubKeyEd25519(rcvKp.publicKey)]
)
await uploadXFTPChunk(client, sndKp.privateKey, senderId, chunkData)
sentChunks.push({
chunkNo, senderId, senderKey: sndKp.privateKey,
recipientId: recipientIds[0], recipientKey: rcvKp.privateKey,
chunkSize: spec.chunkSize, digest: chunkDigest, server
})
uploaded += spec.chunkSize
onProgress?.(uploaded, encrypted.encData.length)
}
const rcvDescription = buildDescription("recipient", encrypted, sentChunks)
const sndDescription = buildDescription("sender", encrypted, sentChunks)
let uri = encodeDescriptionURI(rcvDescription)
let finalRcvDescription = rcvDescription
const threshold = redirectThreshold ?? DEFAULT_REDIRECT_THRESHOLD
if (uri.length > threshold && sentChunks.length > 1) {
finalRcvDescription = await uploadRedirectDescription(agent, server, rcvDescription)
uri = encodeDescriptionURI(finalRcvDescription)
}
return {rcvDescription: finalRcvDescription, sndDescription, uri}
}
function buildDescription(
@@ -170,10 +168,11 @@ function buildDescription(
}
async function uploadRedirectDescription(
client: XFTPClient,
agent: XFTPClientAgent,
server: XFTPServer,
innerFd: FileDescription
): Promise<FileDescription> {
const client = await getXFTPServerClient(agent, server)
const yaml = encodeFileDescription(innerFd)
const yamlBytes = new TextEncoder().encode(yaml)
const enc = encryptFileForUpload(yamlBytes, "")
@@ -225,136 +224,106 @@ async function uploadRedirectDescription(
// ── Download ────────────────────────────────────────────────────
export async function downloadFile(
agent: XFTPClientAgent,
fd: FileDescription,
onProgress?: (downloaded: number, total: number) => void
): Promise<DownloadResult> {
const err = validateFileDescription(fd)
if (err) throw new Error("downloadFile: " + err)
if (fd.redirect !== null) {
return downloadWithRedirect(fd, onProgress)
return downloadWithRedirect(agent, fd, onProgress)
}
const connections = new Map<string, XFTPClient>()
try {
const plaintextChunks: Uint8Array[] = new Array(fd.chunks.length)
let downloaded = 0
for (const chunk of fd.chunks) {
const replica = chunk.replicas[0]
if (!replica) throw new Error("downloadFile: chunk has no replicas")
const client = await getOrConnect(connections, replica.server)
const plaintextChunks: Uint8Array[] = new Array(fd.chunks.length)
let downloaded = 0
for (const chunk of fd.chunks) {
const replica = chunk.replicas[0]
if (!replica) throw new Error("downloadFile: chunk has no replicas")
const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server))
const seed = decodePrivKeyEd25519(replica.replicaKey)
const kp = ed25519KeyPairFromSeed(seed)
const data = await downloadXFTPChunk(client, kp.privateKey, replica.replicaId, chunk.digest)
plaintextChunks[chunk.chunkNo - 1] = data
downloaded += chunk.chunkSize
onProgress?.(downloaded, fd.size)
}
// Verify file size
const totalSize = plaintextChunks.reduce((s, c) => s + c.length, 0)
if (totalSize !== fd.size) throw new Error("downloadFile: file size mismatch")
// Verify file digest (SHA-512 of encrypted file data)
const combined = plaintextChunks.length === 1 ? plaintextChunks[0] : concatBytes(...plaintextChunks)
const digest = sha512(combined)
if (!digestEqual(digest, fd.digest)) throw new Error("downloadFile: file digest mismatch")
// Decrypt
const result = processDownloadedFile(fd, plaintextChunks)
// ACK all chunks (best-effort)
for (const chunk of fd.chunks) {
const replica = chunk.replicas[0]
if (!replica) continue
try {
const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server))
const seed = decodePrivKeyEd25519(replica.replicaKey)
const kp = ed25519KeyPairFromSeed(seed)
const data = await downloadXFTPChunk(client, kp.privateKey, replica.replicaId, chunk.digest)
plaintextChunks[chunk.chunkNo - 1] = data
downloaded += chunk.chunkSize
onProgress?.(downloaded, fd.size)
}
// Verify file size
const totalSize = plaintextChunks.reduce((s, c) => s + c.length, 0)
if (totalSize !== fd.size) throw new Error("downloadFile: file size mismatch")
// Verify file digest (SHA-512 of encrypted file data)
const combined = plaintextChunks.length === 1 ? plaintextChunks[0] : concatBytes(...plaintextChunks)
const digest = sha512(combined)
if (!digestEqual(digest, fd.digest)) throw new Error("downloadFile: file digest mismatch")
// Decrypt
const result = processDownloadedFile(fd, plaintextChunks)
// ACK all chunks (best-effort)
for (const chunk of fd.chunks) {
const replica = chunk.replicas[0]
if (!replica) continue
try {
const client = connections.get(replica.server)
if (!client) continue
const seed = decodePrivKeyEd25519(replica.replicaKey)
const kp = ed25519KeyPairFromSeed(seed)
await ackXFTPChunk(client, kp.privateKey, replica.replicaId)
} catch (_) {}
}
return result
} finally {
for (const c of connections.values()) closeXFTP(c)
await ackXFTPChunk(client, kp.privateKey, replica.replicaId)
} catch (_) {}
}
return result
}
async function downloadWithRedirect(
agent: XFTPClientAgent,
fd: FileDescription,
onProgress?: (downloaded: number, total: number) => void
): Promise<DownloadResult> {
const connections = new Map<string, XFTPClient>()
try {
const plaintextChunks: Uint8Array[] = new Array(fd.chunks.length)
for (const chunk of fd.chunks) {
const replica = chunk.replicas[0]
if (!replica) throw new Error("downloadWithRedirect: chunk has no replicas")
const client = await getOrConnect(connections, replica.server)
const plaintextChunks: Uint8Array[] = new Array(fd.chunks.length)
for (const chunk of fd.chunks) {
const replica = chunk.replicas[0]
if (!replica) throw new Error("downloadWithRedirect: chunk has no replicas")
const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server))
const seed = decodePrivKeyEd25519(replica.replicaKey)
const kp = ed25519KeyPairFromSeed(seed)
const data = await downloadXFTPChunk(client, kp.privateKey, replica.replicaId, chunk.digest)
plaintextChunks[chunk.chunkNo - 1] = data
}
const totalSize = plaintextChunks.reduce((s, c) => s + c.length, 0)
if (totalSize !== fd.size) throw new Error("downloadWithRedirect: redirect file size mismatch")
const combined = plaintextChunks.length === 1 ? plaintextChunks[0] : concatBytes(...plaintextChunks)
const digest = sha512(combined)
if (!digestEqual(digest, fd.digest)) throw new Error("downloadWithRedirect: redirect file digest mismatch")
const {content: yamlBytes} = processDownloadedFile(fd, plaintextChunks)
const innerFd = decodeFileDescription(new TextDecoder().decode(yamlBytes))
const innerErr = validateFileDescription(innerFd)
if (innerErr) throw new Error("downloadWithRedirect: inner description invalid: " + innerErr)
if (innerFd.size !== fd.redirect!.size) throw new Error("downloadWithRedirect: redirect size mismatch")
if (!digestEqual(innerFd.digest, fd.redirect!.digest)) throw new Error("downloadWithRedirect: redirect digest mismatch")
// ACK redirect chunks (best-effort)
for (const chunk of fd.chunks) {
const replica = chunk.replicas[0]
if (!replica) continue
try {
const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server))
const seed = decodePrivKeyEd25519(replica.replicaKey)
const kp = ed25519KeyPairFromSeed(seed)
const data = await downloadXFTPChunk(client, kp.privateKey, replica.replicaId, chunk.digest)
plaintextChunks[chunk.chunkNo - 1] = data
}
const totalSize = plaintextChunks.reduce((s, c) => s + c.length, 0)
if (totalSize !== fd.size) throw new Error("downloadWithRedirect: redirect file size mismatch")
const combined = plaintextChunks.length === 1 ? plaintextChunks[0] : concatBytes(...plaintextChunks)
const digest = sha512(combined)
if (!digestEqual(digest, fd.digest)) throw new Error("downloadWithRedirect: redirect file digest mismatch")
const {content: yamlBytes} = processDownloadedFile(fd, plaintextChunks)
const innerFd = decodeFileDescription(new TextDecoder().decode(yamlBytes))
const innerErr = validateFileDescription(innerFd)
if (innerErr) throw new Error("downloadWithRedirect: inner description invalid: " + innerErr)
if (innerFd.size !== fd.redirect!.size) throw new Error("downloadWithRedirect: redirect size mismatch")
if (!digestEqual(innerFd.digest, fd.redirect!.digest)) throw new Error("downloadWithRedirect: redirect digest mismatch")
for (const chunk of fd.chunks) {
const replica = chunk.replicas[0]
if (!replica) continue
try {
const client = connections.get(replica.server)
if (!client) continue
const seed = decodePrivKeyEd25519(replica.replicaKey)
const kp = ed25519KeyPairFromSeed(seed)
await ackXFTPChunk(client, kp.privateKey, replica.replicaId)
} catch (_) {}
}
for (const c of connections.values()) closeXFTP(c)
return downloadFile(innerFd, onProgress)
} catch (e) {
for (const c of connections.values()) closeXFTP(c)
throw e
await ackXFTPChunk(client, kp.privateKey, replica.replicaId)
} catch (_) {}
}
return downloadFile(agent, innerFd, onProgress)
}
// ── Delete ──────────────────────────────────────────────────────
export async function deleteFile(sndDescription: FileDescription): Promise<void> {
const connections = new Map<string, XFTPClient>()
try {
for (const chunk of sndDescription.chunks) {
const replica = chunk.replicas[0]
if (!replica) throw new Error("deleteFile: chunk has no replicas")
const client = await getOrConnect(connections, replica.server)
const seed = decodePrivKeyEd25519(replica.replicaKey)
const kp = ed25519KeyPairFromSeed(seed)
await deleteXFTPChunk(client, kp.privateKey, replica.replicaId)
}
} finally {
for (const c of connections.values()) closeXFTP(c)
export async function deleteFile(agent: XFTPClientAgent, sndDescription: FileDescription): Promise<void> {
for (const chunk of sndDescription.chunks) {
const replica = chunk.replicas[0]
if (!replica) throw new Error("deleteFile: chunk has no replicas")
const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server))
const seed = decodePrivKeyEd25519(replica.replicaKey)
const kp = ed25519KeyPairFromSeed(seed)
await deleteXFTPChunk(client, kp.privateKey, replica.replicaId)
}
}
// ── Internal ────────────────────────────────────────────────────
import {parseXFTPServer} from "./protocol/address.js"
async function getOrConnect(
connections: Map<string, XFTPClient>,
serverStr: string
): Promise<XFTPClient> {
let c = connections.get(serverStr)
if (!c) {
c = await connectXFTP(parseXFTPServer(serverStr))
connections.set(serverStr, c)
}
return c
}
function digestEqual(a: Uint8Array, b: Uint8Array): boolean {
if (a.length !== b.length) return false
let diff = 0

View File

@@ -84,21 +84,44 @@ function createBrowserTransport(baseUrl: string): Transport {
}
}
// ── Connect + handshake ───────────────────────────────────────────
// ── Client agent (connection pool) ───────────────────────────────
// Browser HTTP/2 connections are pooled per origin — the server binds a session
// to the TLS connection, so a second handshake on the same connection fails.
// Cache clients by baseUrl in browser environments to reuse the session.
const browserClients = new Map<string, XFTPClient>()
export interface XFTPClientAgent {
clients: Map<string, XFTPClient>
}
export function newXFTPAgent(): XFTPClientAgent {
return {clients: new Map()}
}
export async function getXFTPServerClient(agent: XFTPClientAgent, server: XFTPServer): Promise<XFTPClient> {
const key = "https://" + server.host + ":" + server.port
let c = agent.clients.get(key)
if (!c) {
c = await connectXFTP(server)
agent.clients.set(key, c)
}
return c
}
export function closeXFTPServerClient(agent: XFTPClientAgent, server: XFTPServer): void {
const key = "https://" + server.host + ":" + server.port
const c = agent.clients.get(key)
if (c) {
agent.clients.delete(key)
c.transport.close()
}
}
export function closeXFTPAgent(agent: XFTPClientAgent): void {
for (const c of agent.clients.values()) c.transport.close()
agent.clients.clear()
}
// ── Connect + handshake ───────────────────────────────────────────
export async function connectXFTP(server: XFTPServer): Promise<XFTPClient> {
const baseUrl = "https://" + server.host + ":" + server.port
if (!isNode) {
const cached = browserClients.get(baseUrl)
if (cached) return cached
}
const transport = await createTransport(baseUrl)
try {
@@ -129,9 +152,7 @@ export async function connectXFTP(server: XFTPServer): Promise<XFTPClient> {
const ack = await transport.post(encodeClientHandshake({xftpVersion, keyHash: server.keyHash}))
if (ack.length !== 0) throw new Error("connectXFTP: non-empty handshake ack")
const client = {baseUrl, sessionId: hs.sessionId, xftpVersion, transport}
if (!isNode) browserClients.set(baseUrl, client)
return client
return {baseUrl, sessionId: hs.sessionId, xftpVersion, transport}
} catch (e) {
transport.close()
throw e
@@ -224,7 +245,5 @@ export async function pingXFTP(c: XFTPClient): Promise<void> {
// ── Close ─────────────────────────────────────────────────────────
export function closeXFTP(c: XFTPClient): void {
// In the browser, HTTP/2 connections are pooled per origin — closing is
// a no-op since the connection persists and the session must stay cached.
if (isNode) c.transport.close()
c.transport.close()
}

View File

@@ -1,14 +1,19 @@
import {test, expect} from 'vitest'
import {encryptFileForUpload, uploadFile, downloadFile} from '../src/agent.js'
import {encryptFileForUpload, uploadFile, downloadFile, newXFTPAgent, closeXFTPAgent} from '../src/agent.js'
import {parseXFTPServer} from '../src/protocol/address.js'
const server = parseXFTPServer(import.meta.env.XFTP_SERVER)
test('browser upload + download round-trip', async () => {
const data = new Uint8Array(50000)
crypto.getRandomValues(data)
const encrypted = encryptFileForUpload(data, 'test.bin')
const {rcvDescription} = await uploadFile(server, encrypted)
const {content} = await downloadFile(rcvDescription)
expect(content).toEqual(data)
const agent = newXFTPAgent()
try {
const data = new Uint8Array(50000)
crypto.getRandomValues(data)
const encrypted = encryptFileForUpload(data, 'test.bin')
const {rcvDescription} = await uploadFile(agent, server, encrypted)
const {content} = await downloadFile(agent, rcvDescription)
expect(content).toEqual(data)
} finally {
closeXFTPAgent(agent)
}
})