Fix BanPropagationProtectionTest (it works!)

However, it does hang because something is sat on the event loop,
i can't find out what yet.
This commit is contained in:
gnuxie
2024-02-21 22:39:49 +00:00
parent 70d0cec695
commit 0dc7c84afd
2 changed files with 52 additions and 38 deletions

View File

@@ -30,7 +30,7 @@ limitations under the License.
import { JSXFactory } from "../commands/interface-manager/JSXFactory";
import { renderMatrixAndSend } from "../commands/interface-manager/DeadDocumentMatrix";
import { renderMentionPill } from "../commands/interface-manager/MatrixHelpRenderer";
import { renderMentionPill, renderRoomPill } from "../commands/interface-manager/MatrixHelpRenderer";
import { ListMatches, renderListRules } from "../commands/Rules";
import { printActionResult } from "../models/RoomUpdateError";
import { AbstractProtection, ActionResult, BasicConsequenceProvider, Logger, MatrixRoomID, MatrixRoomReference, MembershipChange, MembershipChangeType, Ok, PermissionError, PolicyRule, PolicyRuleType, ProtectedRoomsSet, ProtectionDescription, Recommendation, RoomActionError, RoomMembershipRevision, RoomUpdateError, StringRoomID, StringUserID, Task, describeProtection, isError, serverName, UserID } from "matrix-protection-suite";
@@ -38,6 +38,7 @@ import { Draupnir } from "../Draupnir";
import { resolveRoomReferenceSafe } from "matrix-protection-suite-for-matrix-bot-sdk";
import { DraupnirProtection } from "./Protection";
import { listInfo } from "../commands/StatusCommand";
import { MatrixReactionHandler } from "../commands/interface-manager/MatrixReactionHandler";
const log = new Logger('BanPropagationProtection');
@@ -46,7 +47,9 @@ const UNBAN_PROPAGATION_PROMPT_LISTENER = 'ge.applied-langua.ge.draupnir.unban_p
// FIXME: https://github.com/the-draupnir-project/Draupnir/issues/160
function makePolicyRoomReactionReferenceMap(rooms: MatrixRoomID[]): Map<string, string> {
return rooms.reduce((map, room, index) => (map.set(`${index + 1}.`, room.toPermalink()), map), new Map())
return MatrixReactionHandler.createItemizedReactionMap(
rooms.map(room => room.toPermalink())
);
}
// would be nice to be able to use presentation types here idk.
@@ -76,7 +79,7 @@ async function promptBanPropagation(
in <a href={`https://matrix.to/#/${change.roomID}`}>{change.roomID}</a> by {new UserID(change.sender)} for <code>{change.content.reason ?? '<no reason supplied>'}</code>.<br/>
Would you like to add the ban to a policy list?
<ol>
{editablePolicyRoomIDs}
{editablePolicyRoomIDs.map((room) => <li><a href={room.toPermalink()}>{room.toRoomIDOrAlias()}</a></li>)}
</ol>
</root>,
draupnir.managementRoomID,
@@ -96,16 +99,16 @@ async function promptBanPropagation(
async function promptUnbanPropagation(
draupnir: Draupnir,
event: any,
roomId: string,
membershipChange: MembershipChange,
roomID: StringRoomID,
rulesMatchingUser: ListMatches[]
): Promise<void> {
const reactionMap = new Map<string, string>(Object.entries({ 'unban from all': 'unban from all'}));
// shouldn't we warn them that the unban will be futile?
const promptEventId = (await renderMatrixAndSend(
<root>
The user {renderMentionPill(event["state_key"], event["content"]?.["displayname"] ?? event["state_key"])} was unbanned
from the room <a href={`https://matrix.to/#/${roomId}`}>{roomId}</a> by {new UserID(event["sender"])} for <code>{event["content"]?.["reason"] ?? '<no reason supplied>'}</code>.<br/>
The user {renderMentionPill(membershipChange.userID, membershipChange.content.displayname ?? membershipChange.userID)} was unbanned
from the room {renderRoomPill(MatrixRoomReference.fromRoomID(roomID))} by {membershipChange.sender} for <code>{membershipChange.content.reason ?? '<no reason supplied>'}</code>.<br/>
However there are rules in Draupnir's watched lists matching this user:
<ul>
{
@@ -121,8 +124,8 @@ async function promptUnbanPropagation(
UNBAN_PROPAGATION_PROMPT_LISTENER,
reactionMap,
{
target: event["state_key"],
reason: event["content"]?.["reason"],
target: membershipChange.userID,
reason: membershipChange.content.reason,
}
)
)).at(0) as string;

View File

@@ -1,39 +1,47 @@
import expect from "expect";
import { Mjolnir } from "../../src/Mjolnir";
import { newTestUser } from "./clientHelper";
import { getFirstEventMatching } from './commands/commandUtils';
import { RULE_USER } from "../../src/models/ListRule";
import { MatrixRoomReference } from "../../src/commands/interface-manager/MatrixRoomReference";
import { DraupnirTestContext, draupnirClient } from "./mjolnirSetupUtils";
import { MatrixRoomReference, PolicyRuleType, PropagationType, StringRoomID, findProtection } from "matrix-protection-suite";
// We will need to disable this in tests that are banning people otherwise it will cause
// mocha to hang for awhile until it times out waiting for a response to a prompt.
describe("Ban propagation test", function() {
it("Should be enabled by default", async function() {
const mjolnir: Mjolnir = this.mjolnir
expect(mjolnir.protectionManager.getProtection("BanPropagationProtection")?.enabled).toBeTruthy();
})
it("Should prompt to add bans to a policy list, then add the ban", async function() {
const mjolnir: Mjolnir = this.mjolnir
const mjolnirId = await mjolnir.client.getUserId();
it("Should be enabled by default", async function(this: DraupnirTestContext) {
const draupnir = this.draupnir;
if (draupnir === undefined) {
throw new TypeError(`setup didn't run properly`);
}
const banPropagationProtection = findProtection("BanPropagationProtection");
if (banPropagationProtection === undefined) {
throw new TypeError(`should be able to find the ban propagation protection`);
}
expect(draupnir.protectedRoomsSet.protections.isEnabledProtection(banPropagationProtection)).toBeTruthy();
} as unknown as Mocha.AsyncFunc)
it("Should prompt to add bans to a policy list, then add the ban", async function(this: DraupnirTestContext) {
const draupnir = this.draupnir;
if (draupnir === undefined) {
throw new TypeError(`setup didn't run properly`);
}
const moderator = await newTestUser(this.config.homeserverUrl, { name: { contains: "moderator" } });
await moderator.joinRoom(mjolnir.managementRoomId);
await moderator.joinRoom(draupnir.managementRoomID);
const protectedRooms = await Promise.all([...Array(5)].map(async _ => {
const room = await moderator.createRoom({ invite: [mjolnirId] });
await mjolnir.client.joinRoom(room);
await moderator.setUserPowerLevel(mjolnirId, room, 100);
await mjolnir.addProtectedRoom(room);
const room = await moderator.createRoom({ invite: [draupnir.clientUserID] });
await draupnir.client.joinRoom(room);
await moderator.setUserPowerLevel(draupnir.clientUserID, room, 100);
await draupnir.protectedRoomsSet.protectedRoomsConfig.addRoom(MatrixRoomReference.fromRoomID(room as StringRoomID));
return room;
}));
// create a policy list so that we can check it for a user rule later
const policyListId = await moderator.createRoom({ invite: [mjolnirId] });
await moderator.setUserPowerLevel(mjolnirId, policyListId, 100);
await mjolnir.client.joinRoom(policyListId);
await mjolnir.policyListManager.watchList(MatrixRoomReference.fromRoomId(policyListId));
const policyListId = await moderator.createRoom({ invite: [draupnir.clientUserID] });
await moderator.setUserPowerLevel(draupnir.clientUserID, policyListId, 100);
await draupnir.client.joinRoom(policyListId);
await draupnir.protectedRoomsSet.issuerManager.watchList(PropagationType.Direct, MatrixRoomReference.fromRoomID(policyListId as StringRoomID), {});
// check for the prompt
const promptEvent = await getFirstEventMatching({
matrix: mjolnir.matrixEmitter,
targetRoom: mjolnir.managementRoomId,
matrix: draupnirClient()!,
targetRoom: draupnir.managementRoomID,
lookAfterEvent: async function () {
// ban a user in one of our protected rooms using the moderator
await moderator.banUser('@test:example.com', protectedRooms[0], "spam");
@@ -45,20 +53,21 @@ describe("Ban propagation test", function() {
})
// select the prompt
await moderator.unstableApis.addReactionToEvent(
mjolnir.managementRoomId, promptEvent['event_id'], '1.'
draupnir.managementRoomID, promptEvent['event_id'], '1️⃣'
);
// check the policy list, after waiting a few seconds.
await new Promise(resolve => setTimeout(resolve, 10000));
const policyList = mjolnir.policyListManager.lists[0];
const rules = policyList.rulesMatchingEntity('@test:example.com', RULE_USER);
const policyListRevisionAfterBan = draupnir.protectedRoomsSet.issuerManager.policyListRevisionIssuer.currentRevision;
const rules = policyListRevisionAfterBan.allRulesMatchingEntity('@test:example.com', PolicyRuleType.User);
expect(rules.length).toBe(1);
expect(rules[0].entity).toBe('@test:example.com');
expect(rules[0].reason).toBe('spam');
// now unban them >:3
const unbanPrompt = await getFirstEventMatching({
matrix: mjolnir.matrixEmitter,
targetRoom: mjolnir.managementRoomId,
matrix: draupnirClient()!,
targetRoom: draupnir.managementRoomID,
lookAfterEvent: async function () {
// ban a user in one of our protected rooms using the moderator
await moderator.unbanUser('@test:example.com', protectedRooms[0]);
@@ -70,10 +79,12 @@ describe("Ban propagation test", function() {
});
await moderator.unstableApis.addReactionToEvent(
mjolnir.managementRoomId, unbanPrompt['event_id'], 'unban from all'
draupnir.managementRoomID, unbanPrompt['event_id'], 'unban from all'
);
await new Promise(resolve => setTimeout(resolve, 10000));
const rulesAfterUnban = policyList.rulesMatchingEntity('@test:example.com', RULE_USER);
const policyListRevisionAfterUnBan = draupnir.protectedRoomsSet.issuerManager.policyListRevisionIssuer.currentRevision;
const rulesAfterUnban = policyListRevisionAfterUnBan.allRulesMatchingEntity('@test:example.com', PolicyRuleType.User);
expect(rulesAfterUnban.length).toBe(0);
})
} as unknown as Mocha.AsyncFunc)
})