Move endpoint to per site

This commit is contained in:
Owen 2025-08-04 20:17:35 -07:00
parent d1404a2b07
commit 1a9de1e5c5
No known key found for this signature in database
GPG key ID: 8271FDFFD9E0CCBD
7 changed files with 208 additions and 108 deletions

View file

@ -516,7 +516,7 @@ export const clients = pgTable("clients", {
lastPing: varchar("lastPing"), lastPing: varchar("lastPing"),
type: varchar("type").notNull(), // "olm" type: varchar("type").notNull(), // "olm"
online: boolean("online").notNull().default(false), online: boolean("online").notNull().default(false),
endpoint: varchar("endpoint"), // endpoint: varchar("endpoint"),
lastHolePunch: integer("lastHolePunch"), lastHolePunch: integer("lastHolePunch"),
maxConnections: integer("maxConnections") maxConnections: integer("maxConnections")
}); });
@ -528,7 +528,8 @@ export const clientSites = pgTable("clientSites", {
siteId: integer("siteId") siteId: integer("siteId")
.notNull() .notNull()
.references(() => sites.siteId, { onDelete: "cascade" }), .references(() => sites.siteId, { onDelete: "cascade" }),
isRelayed: boolean("isRelayed").notNull().default(false) isRelayed: boolean("isRelayed").notNull().default(false),
endpoint: varchar("endpoint")
}); });
export const olms = pgTable("olms", { export const olms = pgTable("olms", {

View file

@ -216,7 +216,7 @@ export const clients = sqliteTable("clients", {
lastPing: text("lastPing"), lastPing: text("lastPing"),
type: text("type").notNull(), // "olm" type: text("type").notNull(), // "olm"
online: integer("online", { mode: "boolean" }).notNull().default(false), online: integer("online", { mode: "boolean" }).notNull().default(false),
endpoint: text("endpoint"), // endpoint: text("endpoint"),
lastHolePunch: integer("lastHolePunch") lastHolePunch: integer("lastHolePunch")
}); });
@ -227,7 +227,8 @@ export const clientSites = sqliteTable("clientSites", {
siteId: integer("siteId") siteId: integer("siteId")
.notNull() .notNull()
.references(() => sites.siteId, { onDelete: "cascade" }), .references(() => sites.siteId, { onDelete: "cascade" }),
isRelayed: integer("isRelayed", { mode: "boolean" }).notNull().default(false) isRelayed: integer("isRelayed", { mode: "boolean" }).notNull().default(false),
endpoint: text("endpoint")
}); });
export const olms = sqliteTable("olms", { export const olms = sqliteTable("olms", {

View file

@ -129,7 +129,7 @@ export async function updateClient(
`Adding ${sitesAdded.length} new sites to client ${client.clientId}` `Adding ${sitesAdded.length} new sites to client ${client.clientId}`
); );
for (const siteId of sitesAdded) { for (const siteId of sitesAdded) {
if (!client.subnet || !client.pubKey || !client.endpoint) { if (!client.subnet || !client.pubKey) {
logger.debug( logger.debug(
"Client subnet, pubKey or endpoint is not set" "Client subnet, pubKey or endpoint is not set"
); );
@ -140,10 +140,25 @@ export async function updateClient(
// BUT REALLY WE NEED TO TRACK THE USERS PREFERENCE THAT THEY CHOSE IN THE CLIENTS // BUT REALLY WE NEED TO TRACK THE USERS PREFERENCE THAT THEY CHOSE IN THE CLIENTS
const isRelayed = true; const isRelayed = true;
// get the clientsite
const [clientSite] = await db
.select()
.from(clientSites)
.where(and(
eq(clientSites.clientId, client.clientId),
eq(clientSites.siteId, siteId)
))
.limit(1);
if (!clientSite || !clientSite.endpoint) {
logger.debug("Client site is missing or has no endpoint");
continue;
}
const site = await newtAddPeer(siteId, { const site = await newtAddPeer(siteId, {
publicKey: client.pubKey, publicKey: client.pubKey,
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: isRelayed ? "" : client.endpoint endpoint: isRelayed ? "" : clientSite.endpoint
}); });
if (!site) { if (!site) {
@ -255,7 +270,6 @@ export async function updateClient(
} }
} }
if (client.endpoint) {
// get all sites for this client and join with exit nodes with site.exitNodeId // get all sites for this client and join with exit nodes with site.exitNodeId
const sitesData = await db const sitesData = await db
.select() .select()
@ -272,6 +286,8 @@ export async function updateClient(
let exitNodeDestinations: { let exitNodeDestinations: {
reachableAt: string; reachableAt: string;
sourceIp: string;
sourcePort: number;
destinations: PeerDestination[]; destinations: PeerDestination[];
}[] = []; }[] = [];
@ -282,6 +298,14 @@ export async function updateClient(
); );
continue; continue;
} }
if (!site.clientSites.endpoint) {
logger.warn(
`Site ${site.sites.siteId} has no endpoint, skipping`
);
continue;
}
// find the destinations in the array // find the destinations in the array
let destinations = exitNodeDestinations.find( let destinations = exitNodeDestinations.find(
(d) => d.reachableAt === site.exitNodes?.reachableAt (d) => d.reachableAt === site.exitNodes?.reachableAt
@ -290,6 +314,8 @@ export async function updateClient(
if (!destinations) { if (!destinations) {
destinations = { destinations = {
reachableAt: site.exitNodes?.reachableAt || "", reachableAt: site.exitNodes?.reachableAt || "",
sourceIp: site.clientSites.endpoint.split(":")[0] || "",
sourcePort: parseInt(site.clientSites.endpoint.split(":")[1]) || 0,
destinations: [ destinations: [
{ {
destinationIP: destinationIP:
@ -319,8 +345,8 @@ export async function updateClient(
`Updating destinations for exit node at ${destination.reachableAt}` `Updating destinations for exit node at ${destination.reachableAt}`
); );
const payload = { const payload = {
sourceIp: client.endpoint?.split(":")[0] || "", sourceIp: destination.sourceIp,
sourcePort: parseInt(client.endpoint?.split(":")[1]) || 0, sourcePort: destination.sourcePort,
destinations: destination.destinations destinations: destination.destinations
}; };
logger.info( logger.info(
@ -351,7 +377,6 @@ export async function updateClient(
} }
} }
} }
}
// Fetch the updated client // Fetch the updated client
const [updatedClient] = await trx const [updatedClient] = await trx

View file

@ -78,19 +78,13 @@ export async function getAllRelays(
.where(eq(clientSites.siteId, site.siteId)); .where(eq(clientSites.siteId, site.siteId));
for (const clientSite of clientSitesRes) { for (const clientSite of clientSitesRes) {
// Get client information if (!clientSite.endpoint) {
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, clientSite.clientId));
if (!client || !client.endpoint) {
continue; continue;
} }
// Add this site as a destination for the client // Add this site as a destination for the client
if (!mappings[client.endpoint]) { if (!mappings[clientSite.endpoint]) {
mappings[client.endpoint] = { destinations: [] }; mappings[clientSite.endpoint] = { destinations: [] };
} }
// Add site as a destination for this client // Add site as a destination for this client
@ -100,13 +94,13 @@ export async function getAllRelays(
}; };
// Check if this destination is already in the array to avoid duplicates // Check if this destination is already in the array to avoid duplicates
const isDuplicate = mappings[client.endpoint].destinations.some( const isDuplicate = mappings[clientSite.endpoint].destinations.some(
dest => dest.destinationIP === destination.destinationIP && dest => dest.destinationIP === destination.destinationIP &&
dest.destinationPort === destination.destinationPort dest.destinationPort === destination.destinationPort
); );
if (!isDuplicate) { if (!isDuplicate) {
mappings[client.endpoint].destinations.push(destination); mappings[clientSite.endpoint].destinations.push(destination);
} }
} }

View file

@ -1,8 +1,16 @@
import { Request, Response, NextFunction } from "express"; import { Request, Response, NextFunction } from "express";
import { z } from "zod"; import { z } from "zod";
import { clients, newts, olms, Site, sites, clientSites, exitNodes } from "@server/db"; import {
clients,
newts,
olms,
Site,
sites,
clientSites,
exitNodes
} from "@server/db";
import { db } from "@server/db"; import { db } from "@server/db";
import { eq } from "drizzle-orm"; import { eq, and } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode"; import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors"; import createHttpError from "http-errors";
import logger from "@server/logger"; import logger from "@server/logger";
@ -19,7 +27,8 @@ const updateHolePunchSchema = z.object({
ip: z.string(), ip: z.string(),
port: z.number(), port: z.number(),
timestamp: z.number(), timestamp: z.number(),
reachableAt: z.string().optional() reachableAt: z.string().optional(),
publicKey: z.string()
}); });
// New response type with multi-peer destination support // New response type with multi-peer destination support
@ -45,13 +54,24 @@ export async function updateHolePunch(
); );
} }
const { olmId, newtId, ip, port, timestamp, token, reachableAt } = parsedParams.data; const {
olmId,
newtId,
ip,
port,
timestamp,
token,
reachableAt,
publicKey
} = parsedParams.data;
let currentSiteId: number | undefined; let currentSiteId: number | undefined;
let destinations: PeerDestination[] = []; let destinations: PeerDestination[] = [];
if (olmId) { if (olmId) {
logger.debug(`Got hole punch with ip: ${ip}, port: ${port} for olmId: ${olmId}`); logger.debug(
`Got hole punch with ip: ${ip}, port: ${port} for olmId: ${olmId}${publicKey ? ` with exit node publicKey: ${publicKey}` : ""}`
);
const { session, olm: olmSession } = const { session, olm: olmSession } =
await validateOlmSessionToken(token); await validateOlmSessionToken(token);
@ -62,7 +82,9 @@ export async function updateHolePunch(
} }
if (olmId !== olmSession.olmId) { if (olmId !== olmSession.olmId) {
logger.warn(`Olm ID mismatch: ${olmId} !== ${olmSession.olmId}`); logger.warn(
`Olm ID mismatch: ${olmId} !== ${olmSession.olmId}`
);
return next( return next(
createHttpError(HttpCode.UNAUTHORIZED, "Unauthorized") createHttpError(HttpCode.UNAUTHORIZED, "Unauthorized")
); );
@ -83,12 +105,55 @@ export async function updateHolePunch(
const [client] = await db const [client] = await db
.update(clients) .update(clients)
.set({ .set({
endpoint: `${ip}:${port}`,
lastHolePunch: timestamp lastHolePunch: timestamp
}) })
.where(eq(clients.clientId, olm.clientId)) .where(eq(clients.clientId, olm.clientId))
.returning(); .returning();
// Get the exit node by public key
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.publicKey, publicKey));
if (exitNode) {
// Get sites that are on this specific exit node and connected to this client
const sitesOnExitNode = await db
.select({ siteId: sites.siteId })
.from(sites)
.innerJoin(
clientSites,
eq(sites.siteId, clientSites.siteId)
)
.where(
and(
eq(sites.exitNodeId, exitNode.exitNodeId),
eq(clientSites.clientId, olm.clientId)
)
);
// Update clientSites for each site on this exit node
for (const site of sitesOnExitNode) {
await db
.update(clientSites)
.set({
endpoint: `${ip}:${port}`
})
.where(
and(
eq(clientSites.clientId, olm.clientId),
eq(clientSites.siteId, site.siteId)
)
);
}
logger.debug(
`Updated ${sitesOnExitNode.length} sites on exit node with publicKey: ${publicKey}`
);
} else {
logger.warn(`Exit node not found for publicKey: ${publicKey}`);
}
if (!client) { if (!client) {
logger.warn(`Client not found for olm: ${olmId}`); logger.warn(`Client not found for olm: ${olmId}`);
return next( return next(
@ -101,23 +166,23 @@ export async function updateHolePunch(
// .select() // .select()
// .from(clientSites) // .from(clientSites)
// .where(eq(clientSites.clientId, client.clientId)); // .where(eq(clientSites.clientId, client.clientId));
// if (clientSitePairs.length === 0) { // if (clientSitePairs.length === 0) {
// logger.warn(`No sites found for client: ${client.clientId}`); // logger.warn(`No sites found for client: ${client.clientId}`);
// return next( // return next(
// createHttpError(HttpCode.NOT_FOUND, "No sites found for client") // createHttpError(HttpCode.NOT_FOUND, "No sites found for client")
// ); // );
// } // }
// // Get all sites details // // Get all sites details
// const siteIds = clientSitePairs.map(pair => pair.siteId); // const siteIds = clientSitePairs.map(pair => pair.siteId);
// for (const siteId of siteIds) { // for (const siteId of siteIds) {
// const [site] = await db // const [site] = await db
// .select() // .select()
// .from(sites) // .from(sites)
// .where(eq(sites.siteId, siteId)); // .where(eq(sites.siteId, siteId));
// if (site && site.subnet && site.listenPort) { // if (site && site.subnet && site.listenPort) {
// destinations.push({ // destinations.push({
// destinationIP: site.subnet.split("/")[0], // destinationIP: site.subnet.split("/")[0],
@ -141,7 +206,9 @@ export async function updateHolePunch(
for (const site of sitesData) { for (const site of sitesData) {
if (!site.sites.subnet) { if (!site.sites.subnet) {
logger.warn(`Site ${site.sites.siteId} has no subnet, skipping`); logger.warn(
`Site ${site.sites.siteId} has no subnet, skipping`
);
continue; continue;
} }
// find the destinations in the array // find the destinations in the array
@ -176,51 +243,55 @@ export async function updateHolePunch(
logger.debug(JSON.stringify(exitNodeDestinations, null, 2)); logger.debug(JSON.stringify(exitNodeDestinations, null, 2));
for (const destination of exitNodeDestinations) { // BECAUSE OF HARD NAT YOU DONT WANT TO SEND THE OLM IP AND PORT TO THE ALL THE OTHER EXIT NODES
// if its the current exit node skip it because it is replying with the same data // BECAUSE THEY WILL GET A DIFFERENT IP AND PORT
if (reachableAt && destination.reachableAt == reachableAt) {
logger.debug(`Skipping update for reachableAt: ${reachableAt}`);
continue;
}
try { // for (const destination of exitNodeDestinations) {
const response = await axios.post( // // if its the current exit node skip it because it is replying with the same data
`${destination.reachableAt}/update-destinations`, // if (reachableAt && destination.reachableAt == reachableAt) {
{ // logger.debug(`Skipping update for reachableAt: ${reachableAt}`);
sourceIp: client.endpoint?.split(":")[0] || "", // continue;
sourcePort: parseInt(client.endpoint?.split(":")[1] || "0"), // }
destinations: destination.destinations
},
{
headers: {
"Content-Type": "application/json"
}
}
);
logger.info("Destinations updated:", { // try {
peer: response.data.status // const response = await axios.post(
}); // `${destination.reachableAt}/update-destinations`,
} catch (error) { // {
if (axios.isAxiosError(error)) { // sourceIp: client.endpoint?.split(":")[0] || "",
logger.error( // sourcePort: parseInt(client.endpoint?.split(":")[1] || "0"),
`Error updating destinations (can Pangolin see Gerbil HTTP API?) for exit node at ${destination.reachableAt} (status: ${error.response?.status}): ${JSON.stringify(error.response?.data, null, 2)}` // destinations: destination.destinations
); // },
} else { // {
logger.error( // headers: {
`Error updating destinations for exit node at ${destination.reachableAt}: ${error}` // "Content-Type": "application/json"
); // }
} // }
} // );
}
// logger.info("Destinations updated:", {
// peer: response.data.status
// });
// } catch (error) {
// if (axios.isAxiosError(error)) {
// logger.error(
// `Error updating destinations (can Pangolin see Gerbil HTTP API?) for exit node at ${destination.reachableAt} (status: ${error.response?.status}): ${JSON.stringify(error.response?.data, null, 2)}`
// );
// } else {
// logger.error(
// `Error updating destinations for exit node at ${destination.reachableAt}: ${error}`
// );
// }
// }
// }
// Send the desinations back to the origin // Send the desinations back to the origin
destinations = exitNodeDestinations.find( destinations =
(d) => d.reachableAt === reachableAt exitNodeDestinations.find((d) => d.reachableAt === reachableAt)
)?.destinations || []; ?.destinations || [];
} else if (newtId) { } else if (newtId) {
logger.debug(`Got hole punch with ip: ${ip}, port: ${port} for newtId: ${newtId}`); logger.debug(
`Got hole punch with ip: ${ip}, port: ${port} for newtId: ${newtId}`
);
const { session, newt: newtSession } = const { session, newt: newtSession } =
await validateNewtSessionToken(token); await validateNewtSessionToken(token);
@ -232,7 +303,9 @@ export async function updateHolePunch(
} }
if (newtId !== newtSession.newtId) { if (newtId !== newtSession.newtId) {
logger.warn(`Newt ID mismatch: ${newtId} !== ${newtSession.newtId}`); logger.warn(
`Newt ID mismatch: ${newtId} !== ${newtSession.newtId}`
);
return next( return next(
createHttpError(HttpCode.UNAUTHORIZED, "Unauthorized") createHttpError(HttpCode.UNAUTHORIZED, "Unauthorized")
); );
@ -261,7 +334,7 @@ export async function updateHolePunch(
}) })
.where(eq(sites.siteId, newt.siteId)) .where(eq(sites.siteId, newt.siteId))
.returning(); .returning();
if (!updatedSite || !updatedSite.subnet) { if (!updatedSite || !updatedSite.subnet) {
logger.warn(`Site not found: ${newt.siteId}`); logger.warn(`Site not found: ${newt.siteId}`);
return next( return next(
@ -274,7 +347,7 @@ export async function updateHolePunch(
// .select() // .select()
// .from(clientSites) // .from(clientSites)
// .where(eq(clientSites.siteId, newt.siteId)); // .where(eq(clientSites.siteId, newt.siteId));
// THE NEWT IS NOT SENDING RAW WG TO THE GERBIL SO IDK IF WE REALLY NEED THIS - REMOVING // THE NEWT IS NOT SENDING RAW WG TO THE GERBIL SO IDK IF WE REALLY NEED THIS - REMOVING
// Get client details for each client // Get client details for each client
// for (const pair of sitesClientPairs) { // for (const pair of sitesClientPairs) {
@ -282,7 +355,7 @@ export async function updateHolePunch(
// .select() // .select()
// .from(clients) // .from(clients)
// .where(eq(clients.clientId, pair.clientId)); // .where(eq(clients.clientId, pair.clientId));
// if (client && client.endpoint) { // if (client && client.endpoint) {
// const [host, portStr] = client.endpoint.split(':'); // const [host, portStr] = client.endpoint.split(':');
// if (host && portStr) { // if (host && portStr) {
@ -293,27 +366,27 @@ export async function updateHolePunch(
// } // }
// } // }
// } // }
// If this is a newt/site, also add other sites in the same org // If this is a newt/site, also add other sites in the same org
// if (updatedSite.orgId) { // if (updatedSite.orgId) {
// const orgSites = await db // const orgSites = await db
// .select() // .select()
// .from(sites) // .from(sites)
// .where(eq(sites.orgId, updatedSite.orgId)); // .where(eq(sites.orgId, updatedSite.orgId));
// for (const site of orgSites) { // for (const site of orgSites) {
// // Don't add the current site to the destinations // // Don't add the current site to the destinations
// if (site.siteId !== currentSiteId && site.subnet && site.endpoint && site.listenPort) { // if (site.siteId !== currentSiteId && site.subnet && site.endpoint && site.listenPort) {
// const [host, portStr] = site.endpoint.split(':'); // const [host, portStr] = site.endpoint.split(':');
// if (host && portStr) { // if (host && portStr) {
// destinations.push({ // destinations.push({
// destinationIP: host, // destinationIP: host,
// destinationPort: site.listenPort // destinationPort: site.listenPort
// }); // });
// } // }
// } // }
// } // }
// } // }
} }
// if (destinations.length === 0) { // if (destinations.length === 0) {
@ -336,4 +409,4 @@ export async function updateHolePunch(
) )
); );
} }
} }

View file

@ -157,9 +157,6 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
if (!client.clients.subnet) { if (!client.clients.subnet) {
return false; return false;
} }
if (!client.clients.endpoint) {
return false;
}
return true; return true;
}) })
.map(async (client) => { .map(async (client) => {
@ -215,7 +212,7 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
allowedIps: [`${client.clients.subnet.split("/")[0]}/32`], // we want to only allow from that client allowedIps: [`${client.clients.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: client.clientSites.isRelayed endpoint: client.clientSites.isRelayed
? "" ? ""
: client.clients.endpoint! // if its relayed it should be localhost : client.clientSites.endpoint! // if its relayed it should be localhost
}; };
}) })
); );

View file

@ -8,7 +8,7 @@ import {
olms, olms,
sites sites
} from "@server/db"; } from "@server/db";
import { eq, inArray } from "drizzle-orm"; import { and, eq, inArray } from "drizzle-orm";
import { addPeer, deletePeer } from "../newt/peers"; import { addPeer, deletePeer } from "../newt/peers";
import logger from "@server/logger"; import logger from "@server/logger";
@ -147,15 +147,24 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
continue; continue;
} }
const [clientSite] = await db
.select()
.from(clientSites)
.where(and(
eq(clientSites.clientId, client.clientId),
eq(clientSites.siteId, site.siteId)
))
.limit(1);
// Add the peer to the exit node for this site // Add the peer to the exit node for this site
if (client.endpoint) { if (clientSite.endpoint) {
logger.info( logger.info(
`Adding peer ${publicKey} to site ${site.siteId} with endpoint ${client.endpoint}` `Adding peer ${publicKey} to site ${site.siteId} with endpoint ${clientSite.endpoint}`
); );
await addPeer(site.siteId, { await addPeer(site.siteId, {
publicKey: publicKey, publicKey: publicKey,
allowedIps: [`${client.subnet.split('/')[0]}/32`], // we want to only allow from that client allowedIps: [`${client.subnet.split('/')[0]}/32`], // we want to only allow from that client
endpoint: relay ? "" : client.endpoint endpoint: relay ? "" : clientSite.endpoint
}); });
} else { } else {
logger.warn( logger.warn(