2025-06-19 16:40:20 -04:00
|
|
|
|
import { db, newts } from "@server/db";
|
2024-11-15 21:53:58 -05:00
|
|
|
|
import { MessageHandler } from "../ws";
|
2025-06-11 09:13:38 -04:00
|
|
|
|
import { exitNodes, Newt, resources, sites, Target, targets } from "@server/db";
|
2025-03-08 11:58:48 -05:00
|
|
|
|
import { eq, and, sql, inArray } from "drizzle-orm";
|
2024-11-18 22:10:03 -05:00
|
|
|
|
import { addPeer, deletePeer } from "../gerbil/peers";
|
|
|
|
|
import logger from "@server/logger";
|
2025-06-16 22:06:56 -04:00
|
|
|
|
import config from "@server/lib/config";
|
|
|
|
|
import {
|
|
|
|
|
findNextAvailableCidr,
|
|
|
|
|
getNextAvailableClientSubnet
|
|
|
|
|
} from "@server/lib/ip";
|
2024-11-15 21:53:58 -05:00
|
|
|
|
|
2025-06-19 16:39:44 -04:00
|
|
|
|
export type ExitNodePingResult = {
|
2025-06-22 17:19:32 -04:00
|
|
|
|
exitNodeId: number;
|
|
|
|
|
latencyMs: number;
|
|
|
|
|
weight: number;
|
|
|
|
|
error?: string;
|
|
|
|
|
exitNodeName: string;
|
|
|
|
|
endpoint: string;
|
|
|
|
|
wasPreviouslyConnected: boolean;
|
2025-06-19 16:39:44 -04:00
|
|
|
|
};
|
|
|
|
|
|
2025-02-21 10:13:41 -05:00
|
|
|
|
export const handleNewtRegisterMessage: MessageHandler = async (context) => {
|
|
|
|
|
const { message, client, sendToClient } = context;
|
2025-02-21 12:17:56 -05:00
|
|
|
|
const newt = client as Newt;
|
2024-11-18 22:10:03 -05:00
|
|
|
|
|
2025-02-21 10:55:38 -05:00
|
|
|
|
logger.info("Handling register newt message!");
|
2024-12-01 19:45:36 -05:00
|
|
|
|
|
2024-11-15 21:53:58 -05:00
|
|
|
|
if (!newt) {
|
2024-11-18 22:10:03 -05:00
|
|
|
|
logger.warn("Newt not found");
|
2024-11-15 21:53:58 -05:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!newt.siteId) {
|
2024-11-18 22:10:03 -05:00
|
|
|
|
logger.warn("Newt has no site!"); // TODO: Maybe we create the site here?
|
2024-11-15 21:53:58 -05:00
|
|
|
|
return;
|
|
|
|
|
}
|
2024-11-18 22:10:03 -05:00
|
|
|
|
|
2024-11-15 21:53:58 -05:00
|
|
|
|
const siteId = newt.siteId;
|
2024-11-18 22:10:03 -05:00
|
|
|
|
|
2025-06-22 17:19:32 -04:00
|
|
|
|
const { publicKey, pingResults, newtVersion, backwardsCompatible } =
|
|
|
|
|
message.data;
|
2024-11-18 22:10:03 -05:00
|
|
|
|
if (!publicKey) {
|
|
|
|
|
logger.warn("Public key not provided");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-06-19 16:39:44 -04:00
|
|
|
|
if (backwardsCompatible) {
|
2025-06-22 17:19:32 -04:00
|
|
|
|
logger.debug(
|
|
|
|
|
"Backwards compatible mode detecting - not sending connect message and waiting for ping response."
|
|
|
|
|
);
|
2025-06-19 16:39:44 -04:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let exitNodeId: number | undefined;
|
|
|
|
|
if (pingResults) {
|
2025-06-22 17:19:32 -04:00
|
|
|
|
const bestPingResult = selectBestExitNode(
|
|
|
|
|
pingResults as ExitNodePingResult[]
|
|
|
|
|
);
|
2025-06-19 16:39:44 -04:00
|
|
|
|
if (!bestPingResult) {
|
|
|
|
|
logger.warn("No suitable exit node found based on ping results");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
exitNodeId = bestPingResult.exitNodeId;
|
|
|
|
|
}
|
|
|
|
|
|
2025-06-19 16:40:20 -04:00
|
|
|
|
if (newtVersion) {
|
|
|
|
|
// update the newt version in the database
|
|
|
|
|
await db
|
|
|
|
|
.update(newts)
|
|
|
|
|
.set({
|
2025-06-22 17:19:32 -04:00
|
|
|
|
version: newtVersion as string
|
2025-06-19 16:40:20 -04:00
|
|
|
|
})
|
2025-06-22 17:19:32 -04:00
|
|
|
|
.where(eq(newts.newtId, newt.newtId));
|
2025-06-19 16:40:20 -04:00
|
|
|
|
}
|
|
|
|
|
|
2025-06-16 22:06:56 -04:00
|
|
|
|
const [oldSite] = await db
|
2024-11-23 16:28:50 -05:00
|
|
|
|
.select()
|
|
|
|
|
.from(sites)
|
|
|
|
|
.where(eq(sites.siteId, siteId))
|
|
|
|
|
.limit(1);
|
|
|
|
|
|
2025-06-16 22:06:56 -04:00
|
|
|
|
if (!oldSite || !oldSite.exitNodeId) {
|
2024-11-23 16:28:50 -05:00
|
|
|
|
logger.warn("Site not found or does not have exit node");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-06-16 22:06:56 -04:00
|
|
|
|
let siteSubnet = oldSite.subnet;
|
|
|
|
|
let exitNodeIdToQuery = oldSite.exitNodeId;
|
|
|
|
|
if (exitNodeId && oldSite.exitNodeId !== exitNodeId) {
|
|
|
|
|
// This effectively moves the exit node to the new one
|
2025-06-11 09:13:38 -04:00
|
|
|
|
exitNodeIdToQuery = exitNodeId; // Use the provided exitNodeId if it differs from the site's exitNodeId
|
2025-06-16 22:06:56 -04:00
|
|
|
|
|
|
|
|
|
const sitesQuery = await db
|
|
|
|
|
.select({
|
|
|
|
|
subnet: sites.subnet
|
|
|
|
|
})
|
|
|
|
|
.from(sites)
|
|
|
|
|
.where(eq(sites.exitNodeId, exitNodeId));
|
|
|
|
|
|
|
|
|
|
const [exitNode] = await db
|
|
|
|
|
.select()
|
|
|
|
|
.from(exitNodes)
|
|
|
|
|
.where(eq(exitNodes.exitNodeId, exitNodeIdToQuery))
|
|
|
|
|
.limit(1);
|
|
|
|
|
|
|
|
|
|
const blockSize = config.getRawConfig().gerbil.site_block_size;
|
|
|
|
|
const subnets = sitesQuery.map((site) => site.subnet);
|
2025-06-22 17:19:32 -04:00
|
|
|
|
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
|
2025-06-16 22:06:56 -04:00
|
|
|
|
const newSubnet = findNextAvailableCidr(
|
|
|
|
|
subnets,
|
|
|
|
|
blockSize,
|
|
|
|
|
exitNode.address
|
|
|
|
|
);
|
|
|
|
|
if (!newSubnet) {
|
|
|
|
|
logger.error("No available subnets found for the new exit node");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
siteSubnet = newSubnet;
|
|
|
|
|
|
2025-06-11 09:13:38 -04:00
|
|
|
|
await db
|
|
|
|
|
.update(sites)
|
|
|
|
|
.set({
|
|
|
|
|
pubKey: publicKey,
|
2025-06-16 22:06:56 -04:00
|
|
|
|
exitNodeId: exitNodeId,
|
|
|
|
|
subnet: newSubnet
|
2025-06-11 09:13:38 -04:00
|
|
|
|
})
|
|
|
|
|
.where(eq(sites.siteId, siteId))
|
|
|
|
|
.returning();
|
|
|
|
|
} else {
|
|
|
|
|
await db
|
|
|
|
|
.update(sites)
|
|
|
|
|
.set({
|
|
|
|
|
pubKey: publicKey
|
|
|
|
|
})
|
|
|
|
|
.where(eq(sites.siteId, siteId))
|
|
|
|
|
.returning();
|
|
|
|
|
}
|
2024-11-18 22:10:03 -05:00
|
|
|
|
|
|
|
|
|
const [exitNode] = await db
|
|
|
|
|
.select()
|
|
|
|
|
.from(exitNodes)
|
2025-06-11 09:13:38 -04:00
|
|
|
|
.where(eq(exitNodes.exitNodeId, exitNodeIdToQuery))
|
2024-11-18 22:10:03 -05:00
|
|
|
|
.limit(1);
|
|
|
|
|
|
2025-06-16 22:06:56 -04:00
|
|
|
|
if (oldSite.pubKey && oldSite.pubKey !== publicKey) {
|
2024-11-18 22:10:03 -05:00
|
|
|
|
logger.info("Public key mismatch. Deleting old peer...");
|
2025-06-16 22:06:56 -04:00
|
|
|
|
await deletePeer(oldSite.exitNodeId, oldSite.pubKey);
|
2024-11-18 22:10:03 -05:00
|
|
|
|
}
|
|
|
|
|
|
2025-06-16 22:06:56 -04:00
|
|
|
|
if (!siteSubnet) {
|
2024-11-18 22:10:03 -05:00
|
|
|
|
logger.warn("Site has no subnet");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// add the peer to the exit node
|
2025-06-16 22:06:56 -04:00
|
|
|
|
await addPeer(exitNodeIdToQuery, {
|
2024-11-18 22:10:03 -05:00
|
|
|
|
publicKey: publicKey,
|
2025-06-16 22:06:56 -04:00
|
|
|
|
allowedIps: [siteSubnet]
|
2024-11-18 22:10:03 -05:00
|
|
|
|
});
|
|
|
|
|
|
2025-03-08 11:58:48 -05:00
|
|
|
|
// Improved version
|
|
|
|
|
const allResources = await db.transaction(async (tx) => {
|
|
|
|
|
// First get all resources for the site
|
|
|
|
|
const resourcesList = await tx
|
|
|
|
|
.select({
|
|
|
|
|
resourceId: resources.resourceId,
|
|
|
|
|
subdomain: resources.subdomain,
|
|
|
|
|
fullDomain: resources.fullDomain,
|
|
|
|
|
ssl: resources.ssl,
|
|
|
|
|
blockAccess: resources.blockAccess,
|
|
|
|
|
sso: resources.sso,
|
|
|
|
|
emailWhitelistEnabled: resources.emailWhitelistEnabled,
|
|
|
|
|
http: resources.http,
|
|
|
|
|
proxyPort: resources.proxyPort,
|
|
|
|
|
protocol: resources.protocol
|
|
|
|
|
})
|
|
|
|
|
.from(resources)
|
|
|
|
|
.where(eq(resources.siteId, siteId));
|
|
|
|
|
|
|
|
|
|
// Get all enabled targets for these resources in a single query
|
|
|
|
|
const resourceIds = resourcesList.map((r) => r.resourceId);
|
|
|
|
|
const allTargets =
|
|
|
|
|
resourceIds.length > 0
|
|
|
|
|
? await tx
|
|
|
|
|
.select({
|
|
|
|
|
resourceId: targets.resourceId,
|
|
|
|
|
targetId: targets.targetId,
|
|
|
|
|
ip: targets.ip,
|
|
|
|
|
method: targets.method,
|
|
|
|
|
port: targets.port,
|
|
|
|
|
internalPort: targets.internalPort,
|
|
|
|
|
enabled: targets.enabled
|
|
|
|
|
})
|
|
|
|
|
.from(targets)
|
|
|
|
|
.where(
|
|
|
|
|
and(
|
|
|
|
|
inArray(targets.resourceId, resourceIds),
|
|
|
|
|
eq(targets.enabled, true)
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
: [];
|
|
|
|
|
|
|
|
|
|
// Combine the data in JS instead of using SQL for the JSON
|
|
|
|
|
return resourcesList.map((resource) => ({
|
|
|
|
|
...resource,
|
|
|
|
|
targets: allTargets.filter(
|
|
|
|
|
(target) => target.resourceId === resource.resourceId
|
2024-12-24 16:00:02 -05:00
|
|
|
|
)
|
2025-03-08 11:58:48 -05:00
|
|
|
|
}));
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const { tcpTargets, udpTargets } = allResources.reduce(
|
|
|
|
|
(acc, resource) => {
|
|
|
|
|
// Skip resources with no targets
|
|
|
|
|
if (!resource.targets?.length) return acc;
|
|
|
|
|
|
|
|
|
|
// Format valid targets into strings
|
|
|
|
|
const formattedTargets = resource.targets
|
|
|
|
|
.filter(
|
2025-01-28 22:26:45 -05:00
|
|
|
|
(target: Target) =>
|
2025-03-08 11:58:48 -05:00
|
|
|
|
target?.internalPort && target?.ip && target?.port
|
2025-01-28 22:26:45 -05:00
|
|
|
|
)
|
2025-03-08 11:58:48 -05:00
|
|
|
|
.map(
|
2025-01-28 22:26:45 -05:00
|
|
|
|
(target: Target) =>
|
2025-03-08 11:58:48 -05:00
|
|
|
|
`${target.internalPort}:${target.ip}:${target.port}`
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Add to the appropriate protocol array
|
|
|
|
|
if (resource.protocol === "tcp") {
|
|
|
|
|
acc.tcpTargets.push(...formattedTargets);
|
|
|
|
|
} else {
|
|
|
|
|
acc.udpTargets.push(...formattedTargets);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return acc;
|
|
|
|
|
},
|
|
|
|
|
{ tcpTargets: [] as string[], udpTargets: [] as string[] }
|
|
|
|
|
);
|
2024-11-15 21:53:58 -05:00
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
message: {
|
2024-11-18 22:10:03 -05:00
|
|
|
|
type: "newt/wg/connect",
|
2024-11-15 21:53:58 -05:00
|
|
|
|
data: {
|
2024-11-23 16:28:50 -05:00
|
|
|
|
endpoint: `${exitNode.endpoint}:${exitNode.listenPort}`,
|
2024-11-18 22:10:03 -05:00
|
|
|
|
publicKey: exitNode.publicKey,
|
2024-11-23 16:28:50 -05:00
|
|
|
|
serverIP: exitNode.address.split("/")[0],
|
2025-06-16 22:06:56 -04:00
|
|
|
|
tunnelIP: siteSubnet.split("/")[0],
|
2024-11-18 22:10:03 -05:00
|
|
|
|
targets: {
|
|
|
|
|
udp: udpTargets,
|
2024-12-24 16:00:02 -05:00
|
|
|
|
tcp: tcpTargets
|
2024-11-18 22:10:03 -05:00
|
|
|
|
}
|
2024-12-24 16:00:02 -05:00
|
|
|
|
}
|
2024-11-15 21:53:58 -05:00
|
|
|
|
},
|
2024-11-18 22:10:03 -05:00
|
|
|
|
broadcast: false, // Send to all clients
|
2024-12-24 16:00:02 -05:00
|
|
|
|
excludeSender: false // Include sender in broadcast
|
2024-11-15 21:53:58 -05:00
|
|
|
|
};
|
2024-11-18 22:10:03 -05:00
|
|
|
|
};
|
2025-06-19 16:39:44 -04:00
|
|
|
|
|
2025-06-22 17:19:32 -04:00
|
|
|
|
/**
|
|
|
|
|
* Selects the most suitable exit node from a list of ping results.
|
|
|
|
|
*
|
|
|
|
|
* The selection algorithm follows these steps:
|
|
|
|
|
*
|
|
|
|
|
* 1. **Filter Invalid Nodes**: Excludes nodes with errors or zero weight.
|
|
|
|
|
*
|
|
|
|
|
* 2. **Sort by Latency**: Sorts valid nodes in ascending order of latency.
|
|
|
|
|
*
|
|
|
|
|
* 3. **Preferred Selection**:
|
|
|
|
|
* - If the lowest-latency node has sufficient capacity (≥10% weight),
|
|
|
|
|
* check if a previously connected node is also acceptable.
|
|
|
|
|
* - The previously connected node is preferred if its latency is within
|
|
|
|
|
* 30ms or 15% of the best node’s latency.
|
|
|
|
|
*
|
|
|
|
|
* 4. **Fallback to Next Best**:
|
|
|
|
|
* - If the lowest-latency node is under capacity, find the next node
|
|
|
|
|
* with acceptable capacity.
|
|
|
|
|
*
|
|
|
|
|
* 5. **Final Fallback**:
|
|
|
|
|
* - If no nodes meet the capacity threshold, fall back to the node
|
|
|
|
|
* with the highest weight (i.e., most available capacity).
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
function selectBestExitNode(
|
|
|
|
|
pingResults: ExitNodePingResult[]
|
|
|
|
|
): ExitNodePingResult | null {
|
|
|
|
|
const MIN_CAPACITY_THRESHOLD = 0.1;
|
|
|
|
|
const LATENCY_TOLERANCE_MS = 30;
|
|
|
|
|
const LATENCY_TOLERANCE_PERCENT = 0.15;
|
|
|
|
|
|
|
|
|
|
// Filter out invalid nodes
|
|
|
|
|
const validNodes = pingResults.filter((n) => !n.error && n.weight > 0);
|
|
|
|
|
|
|
|
|
|
if (validNodes.length === 0) {
|
|
|
|
|
logger.error("No valid exit nodes available");
|
|
|
|
|
return null;
|
|
|
|
|
}
|
2025-06-19 16:39:44 -04:00
|
|
|
|
|
2025-06-22 17:19:32 -04:00
|
|
|
|
// Sort by latency (ascending)
|
|
|
|
|
const sortedNodes = validNodes
|
|
|
|
|
.slice()
|
|
|
|
|
.sort((a, b) => a.latencyMs - b.latencyMs);
|
|
|
|
|
const lowestLatencyNode = sortedNodes[0];
|
2025-06-19 16:39:44 -04:00
|
|
|
|
|
2025-06-22 17:19:32 -04:00
|
|
|
|
logger.info(
|
|
|
|
|
`Lowest latency node: ${lowestLatencyNode.exitNodeName} (${lowestLatencyNode.latencyMs} ms, weight=${lowestLatencyNode.weight.toFixed(2)})`
|
|
|
|
|
);
|
2025-06-19 16:39:44 -04:00
|
|
|
|
|
2025-06-22 17:19:32 -04:00
|
|
|
|
// If lowest latency node has enough capacity, check if previously connected node is acceptable
|
|
|
|
|
if (lowestLatencyNode.weight >= MIN_CAPACITY_THRESHOLD) {
|
|
|
|
|
const previouslyConnectedNode = sortedNodes.find(
|
|
|
|
|
(n) =>
|
|
|
|
|
n.wasPreviouslyConnected && n.weight >= MIN_CAPACITY_THRESHOLD
|
|
|
|
|
);
|
2025-06-19 16:39:44 -04:00
|
|
|
|
|
2025-06-22 17:19:32 -04:00
|
|
|
|
if (previouslyConnectedNode) {
|
|
|
|
|
const latencyDiff =
|
|
|
|
|
previouslyConnectedNode.latencyMs - lowestLatencyNode.latencyMs;
|
|
|
|
|
const percentDiff = latencyDiff / lowestLatencyNode.latencyMs;
|
|
|
|
|
|
|
|
|
|
if (
|
|
|
|
|
latencyDiff <= LATENCY_TOLERANCE_MS ||
|
|
|
|
|
percentDiff <= LATENCY_TOLERANCE_PERCENT
|
|
|
|
|
) {
|
|
|
|
|
logger.info(
|
|
|
|
|
`Sticking with previously connected node: ${previouslyConnectedNode.exitNodeName} ` +
|
|
|
|
|
`(${previouslyConnectedNode.latencyMs} ms), latency diff = ${latencyDiff.toFixed(1)}ms ` +
|
|
|
|
|
`/ ${(percentDiff * 100).toFixed(1)}%.`
|
|
|
|
|
);
|
|
|
|
|
return previouslyConnectedNode;
|
2025-06-19 16:39:44 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
2025-06-22 17:19:32 -04:00
|
|
|
|
|
|
|
|
|
return lowestLatencyNode;
|
2025-06-19 16:39:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
2025-06-22 17:19:32 -04:00
|
|
|
|
// Otherwise, find the next node (after the lowest) that has enough capacity
|
|
|
|
|
for (let i = 1; i < sortedNodes.length; i++) {
|
|
|
|
|
const node = sortedNodes[i];
|
|
|
|
|
if (node.weight >= MIN_CAPACITY_THRESHOLD) {
|
|
|
|
|
logger.info(
|
|
|
|
|
`Lowest latency node under capacity. Using next best: ${node.exitNodeName} ` +
|
|
|
|
|
`(${node.latencyMs} ms, weight=${node.weight.toFixed(2)})`
|
|
|
|
|
);
|
|
|
|
|
return node;
|
|
|
|
|
}
|
2025-06-19 16:39:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
2025-06-22 17:19:32 -04:00
|
|
|
|
// Fallback: pick the highest weight node
|
|
|
|
|
const fallbackNode = validNodes.reduce((a, b) =>
|
|
|
|
|
a.weight > b.weight ? a : b
|
|
|
|
|
);
|
|
|
|
|
logger.warn(
|
|
|
|
|
`No nodes with ≥10% weight. Falling back to highest capacity node: ${fallbackNode.exitNodeName}`
|
|
|
|
|
);
|
|
|
|
|
return fallbackNode;
|
2025-06-19 16:39:44 -04:00
|
|
|
|
}
|