mirror of
https://git.quad4.io/RNS-Things/MeshChatX.git
synced 2026-04-16 12:05:42 +00:00
- Introduced a new test suite for NetworkVisualiser focusing on optimization and request cancellation. - Implemented tests to ensure pending requests are aborted on component unmount. - Added validation for stopping visualization processing when requests are aborted. - Included tests for parallelized batch fetching to improve data retrieval efficiency.
169 lines
5.9 KiB
JavaScript
169 lines
5.9 KiB
JavaScript
import { mount } from "@vue/test-utils";
|
|
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
|
import NetworkVisualiser from "@/components/network-visualiser/NetworkVisualiser.vue";
|
|
|
|
// Mock vis-network and vis-data
|
|
vi.mock("vis-network", () => ({
|
|
Network: vi.fn().mockImplementation(() => ({
|
|
on: vi.fn(),
|
|
off: vi.fn(),
|
|
destroy: vi.fn(),
|
|
setOptions: vi.fn(),
|
|
setData: vi.fn(),
|
|
getPositions: vi.fn().mockReturnValue({ me: { x: 0, y: 0 } }),
|
|
})),
|
|
}));
|
|
|
|
vi.mock("vis-data", () => {
|
|
class MockDataSet {
|
|
constructor() {
|
|
this._data = new Map();
|
|
}
|
|
add(data) {
|
|
(Array.isArray(data) ? data : [data]).forEach((i) => this._data.set(i.id, i));
|
|
}
|
|
update(data) {
|
|
(Array.isArray(data) ? data : [data]).forEach((i) => this._data.set(i.id, i));
|
|
}
|
|
remove(ids) {
|
|
(Array.isArray(ids) ? ids : [ids]).forEach((id) => this._data.delete(id));
|
|
}
|
|
get(id) {
|
|
return id === undefined ? Array.from(this._data.values()) : this._data.get(id) || null;
|
|
}
|
|
getIds() {
|
|
return Array.from(this._data.keys());
|
|
}
|
|
get length() {
|
|
return this._data.size;
|
|
}
|
|
}
|
|
return { DataSet: MockDataSet };
|
|
});
|
|
|
|
describe("NetworkVisualiser Optimization and Abort", () => {
|
|
let axiosMock;
|
|
|
|
beforeEach(() => {
|
|
axiosMock = {
|
|
get: vi.fn().mockImplementation((url) => {
|
|
if (url.includes("/api/v1/config")) return Promise.resolve({ data: { config: {} } });
|
|
if (url.includes("/api/v1/interface-stats"))
|
|
return Promise.resolve({ data: { interface_stats: { interfaces: [] } } });
|
|
if (url.includes("/api/v1/lxmf/conversations")) return Promise.resolve({ data: { conversations: [] } });
|
|
if (url.includes("/api/v1/path-table"))
|
|
return Promise.resolve({ data: { path_table: [], total_count: 0 } });
|
|
if (url.includes("/api/v1/announces"))
|
|
return Promise.resolve({ data: { announces: [], total_count: 0 } });
|
|
return Promise.resolve({ data: {} });
|
|
}),
|
|
isCancel: vi.fn().mockImplementation((e) => e && e.name === "AbortError"),
|
|
};
|
|
window.axios = axiosMock;
|
|
});
|
|
|
|
afterEach(() => {
|
|
delete window.axios;
|
|
vi.clearAllMocks();
|
|
});
|
|
|
|
const mountVisualiser = () => {
|
|
return mount(NetworkVisualiser, {
|
|
global: {
|
|
mocks: { $t: (msg) => msg },
|
|
stubs: { Toggle: true },
|
|
},
|
|
});
|
|
};
|
|
|
|
it("aborts pending requests on unmount", async () => {
|
|
// Prevent auto-init
|
|
vi.spyOn(NetworkVisualiser.methods, "init").mockImplementation(() => {});
|
|
const wrapper = mountVisualiser();
|
|
|
|
const abortSpy = vi.spyOn(wrapper.vm.abortController, "abort");
|
|
|
|
let signal;
|
|
axiosMock.get.mockImplementationOnce((url, config) => {
|
|
signal = config.signal;
|
|
return new Promise(() => {});
|
|
});
|
|
|
|
wrapper.vm.getPathTableBatch();
|
|
|
|
expect(axiosMock.get).toHaveBeenCalled();
|
|
expect(signal.aborted).toBe(false);
|
|
|
|
wrapper.unmount();
|
|
|
|
expect(abortSpy).toHaveBeenCalled();
|
|
expect(signal.aborted).toBe(true);
|
|
});
|
|
|
|
it("stops processing visualization batches when aborted", async () => {
|
|
vi.spyOn(NetworkVisualiser.methods, "init").mockImplementation(() => {});
|
|
const wrapper = mountVisualiser();
|
|
|
|
// Prepare large data
|
|
wrapper.vm.pathTable = Array.from({ length: 1000 }, (_, i) => ({ hash: `h${i}`, interface: "eth0", hops: 1 }));
|
|
wrapper.vm.announces = wrapper.vm.pathTable.reduce((acc, cur) => {
|
|
acc[cur.hash] = {
|
|
destination_hash: cur.hash,
|
|
aspect: "lxmf.delivery",
|
|
display_name: "node",
|
|
};
|
|
return acc;
|
|
}, {});
|
|
|
|
// Add lxmf_user_icon to trigger await in createIconImage and slow it down
|
|
const firstHash = wrapper.vm.pathTable[0].hash;
|
|
wrapper.vm.announces[firstHash].lxmf_user_icon = {
|
|
icon_name: "test",
|
|
foreground_colour: "#000",
|
|
background_colour: "#fff",
|
|
};
|
|
wrapper.vm.conversations[firstHash] = { lxmf_user_icon: wrapper.vm.announces[firstHash].lxmf_user_icon };
|
|
|
|
// Mock createIconImage to be slow
|
|
wrapper.vm.createIconImage = vi.fn().mockImplementation(() => new Promise((r) => setTimeout(r, 100)));
|
|
|
|
const processPromise = wrapper.vm.processVisualization();
|
|
|
|
// Give it some time to start first batch and hit the await
|
|
await new Promise((r) => setTimeout(r, 50));
|
|
|
|
// It should be in batch 1 and stuck on createIconImage
|
|
expect(wrapper.vm.currentBatch).toBe(1);
|
|
|
|
// Abort
|
|
wrapper.vm.abortController.abort();
|
|
|
|
await processPromise;
|
|
|
|
// Should have aborted and not reached the end where it resets currentBatch to 0
|
|
// (Wait, actually if it returns early it stays 1)
|
|
expect(wrapper.vm.currentBatch).toBe(1);
|
|
});
|
|
|
|
it("parallelizes batch fetching", async () => {
|
|
vi.spyOn(NetworkVisualiser.methods, "init").mockImplementation(() => {});
|
|
const wrapper = mountVisualiser();
|
|
|
|
// Mock success with total_count > pageSize
|
|
axiosMock.get.mockImplementation((url, config) => {
|
|
if (url === "/api/v1/path-table") {
|
|
return Promise.resolve({ data: { path_table: [], total_count: 5000 } });
|
|
}
|
|
return Promise.resolve({ data: {} });
|
|
});
|
|
|
|
wrapper.vm.pageSize = 1000;
|
|
|
|
await wrapper.vm.getPathTableBatch();
|
|
|
|
// Should have called offset 0, then offsets 1000, 2000, 3000, 4000
|
|
// Total 5 calls
|
|
expect(axiosMock.get).toHaveBeenCalledTimes(5);
|
|
});
|
|
});
|