feat(kubernetes): add kubernetes tool (#1929)

Co-authored-by: oussama Dahmaz <dahmaz@MacBook-Pro-de-odahmaz.local>
This commit is contained in:
oussama Dahmaz
2025-03-09 17:46:15 +01:00
committed by GitHub
parent f6f0d7c72b
commit f19aa29607
67 changed files with 3897 additions and 67 deletions

15
packages/api/src/env.ts Normal file
View File

@@ -0,0 +1,15 @@
import { createEnv } from "@t3-oss/env-nextjs";
import { z } from "zod";
import { shouldSkipEnvValidation } from "@homarr/common/env-validation";
export const env = createEnv({
server: {
KUBERNETES_SERVICE_ACCOUNT_NAME: z.string().optional(),
},
runtimeEnv: {
KUBERNETES_SERVICE_ACCOUNT_NAME: process.env.KUBERNETES_SERVICE_ACCOUNT_NAME,
},
skipValidation: shouldSkipEnvValidation(),
emptyStringAsUndefined: true,
});

View File

@@ -0,0 +1,17 @@
import { TRPCError } from "@trpc/server";
import { env } from "@homarr/docker/env";
import { publicProcedure } from "../trpc";
export const dockerMiddleware = () => {
return publicProcedure.use(async ({ next }) => {
if (env.ENABLE_DOCKER) {
return await next();
}
throw new TRPCError({
code: "NOT_FOUND",
message: "Docker route is not available",
});
});
};

View File

@@ -0,0 +1,17 @@
import { TRPCError } from "@trpc/server";
import { env } from "@homarr/docker/env";
import { publicProcedure } from "../trpc";
export const kubernetesMiddleware = () => {
return publicProcedure.use(async ({ next }) => {
if (env.ENABLE_KUBERNETES) {
return await next();
}
throw new TRPCError({
code: "NOT_FOUND",
message: "Kubernetes route is not available",
});
});
};

View File

@@ -10,6 +10,7 @@ import { iconsRouter } from "./router/icons";
import { importRouter } from "./router/import/import-router";
import { integrationRouter } from "./router/integration/integration-router";
import { inviteRouter } from "./router/invite";
import { kubernetesRouter } from "./router/kubernetes/router/kubernetes-router";
import { locationRouter } from "./router/location";
import { logRouter } from "./router/log";
import { mediaRouter } from "./router/medias/media-router";
@@ -39,6 +40,7 @@ export const appRouter = createTRPCRouter({
onboard: onboardRouter,
home: homeRouter,
docker: dockerRouter,
kubernetes: kubernetesRouter,
serverSettings: serverSettingsRouter,
cronJobs: cronJobsRouter,
apiKeys: apiKeysRouter,

View File

@@ -8,6 +8,7 @@ import type { Container, ContainerInfo, ContainerState, Docker, Port } from "@ho
import { logger } from "@homarr/log";
import { createCacheChannel } from "@homarr/redis";
import { dockerMiddleware } from "../../middlewares/docker";
import { createTRPCRouter, permissionRequiredProcedure } from "../../trpc";
const dockerCache = createCacheChannel<{
@@ -15,72 +16,79 @@ const dockerCache = createCacheChannel<{
}>("docker-containers", 5 * 60 * 1000);
export const dockerRouter = createTRPCRouter({
getContainers: permissionRequiredProcedure.requiresPermission("admin").query(async () => {
const result = await dockerCache
.consumeAsync(async () => {
const dockerInstances = DockerSingleton.getInstances();
const containers = await Promise.all(
// Return all the containers of all the instances into only one item
dockerInstances.map(({ instance, host: key }) =>
instance.listContainers({ all: true }).then((containers) =>
containers.map((container) => ({
...container,
instance: key,
})),
getContainers: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(dockerMiddleware())
.query(async () => {
const result = await dockerCache
.consumeAsync(async () => {
const dockerInstances = DockerSingleton.getInstances();
const containers = await Promise.all(
// Return all the containers of all the instances into only one item
dockerInstances.map(({ instance, host: key }) =>
instance.listContainers({ all: true }).then((containers) =>
containers.map((container) => ({
...container,
instance: key,
})),
),
),
),
).then((containers) => containers.flat());
).then((containers) => containers.flat());
const extractImage = (container: ContainerInfo) => container.Image.split("/").at(-1)?.split(":").at(0) ?? "";
const likeQueries = containers.map((container) => like(icons.name, `%${extractImage(container)}%`));
const dbIcons =
likeQueries.length >= 1
? await db.query.icons.findMany({
where: or(...likeQueries),
})
: [];
const extractImage = (container: ContainerInfo) => container.Image.split("/").at(-1)?.split(":").at(0) ?? "";
const likeQueries = containers.map((container) => like(icons.name, `%${extractImage(container)}%`));
const dbIcons =
likeQueries.length >= 1
? await db.query.icons.findMany({
where: or(...likeQueries),
})
: [];
return {
containers: containers.map((container) => ({
...container,
iconUrl:
dbIcons.find((icon) => {
const extractedImage = extractImage(container);
if (!extractedImage) return false;
return icon.name.toLowerCase().includes(extractedImage.toLowerCase());
})?.url ?? null,
})),
};
})
.catch((error) => {
logger.error(error);
return {
isError: true,
error: error as unknown,
};
});
return {
containers: containers.map((container) => ({
...container,
iconUrl:
dbIcons.find((icon) => {
const extractedImage = extractImage(container);
if (!extractedImage) return false;
return icon.name.toLowerCase().includes(extractedImage.toLowerCase());
})?.url ?? null,
})),
};
})
.catch((error) => {
logger.error(error);
return {
isError: true,
error: error as unknown,
};
});
if ("isError" in result) {
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching the containers",
cause: result.error,
});
}
if ("isError" in result) {
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching the containers",
cause: result.error,
});
}
const { data, timestamp } = result;
const { data, timestamp } = result;
return {
containers: sanitizeContainers(data.containers),
timestamp,
};
}),
invalidate: permissionRequiredProcedure.requiresPermission("admin").mutation(async () => {
await dockerCache.invalidateAsync();
return;
}),
return {
containers: sanitizeContainers(data.containers),
timestamp,
};
}),
invalidate: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(dockerMiddleware())
.mutation(async () => {
await dockerCache.invalidateAsync();
return;
}),
startAll: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(dockerMiddleware())
.input(z.object({ ids: z.array(z.string()) }))
.mutation(async ({ input }) => {
await Promise.allSettled(
@@ -94,6 +102,7 @@ export const dockerRouter = createTRPCRouter({
}),
stopAll: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(dockerMiddleware())
.input(z.object({ ids: z.array(z.string()) }))
.mutation(async ({ input }) => {
await Promise.allSettled(
@@ -107,6 +116,7 @@ export const dockerRouter = createTRPCRouter({
}),
restartAll: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(dockerMiddleware())
.input(z.object({ ids: z.array(z.string()) }))
.mutation(async ({ input }) => {
await Promise.allSettled(
@@ -120,6 +130,7 @@ export const dockerRouter = createTRPCRouter({
}),
removeAll: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(dockerMiddleware())
.input(z.object({ ids: z.array(z.string()) }))
.mutation(async ({ input }) => {
await Promise.allSettled(

View File

@@ -0,0 +1,73 @@
import * as fs from "fs";
import { CoreV1Api, KubeConfig, Metrics, NetworkingV1Api, VersionApi } from "@kubernetes/client-node";
import { env } from "../../env";
export class KubernetesClient {
private static instance: KubernetesClient | null = null;
public kubeConfig: KubeConfig;
public coreApi: CoreV1Api;
public networkingApi: NetworkingV1Api;
public metricsApi: Metrics;
public versionApi: VersionApi;
private constructor() {
this.kubeConfig = new KubeConfig();
if (process.env.NODE_ENV === "development") {
this.kubeConfig.loadFromDefault();
} else {
this.kubeConfig.loadFromCluster();
const currentCluster = this.kubeConfig.getCurrentCluster();
if (!currentCluster) throw new Error("No cluster configuration found");
const token = fs.readFileSync("/var/run/secrets/kubernetes.io/serviceaccount/token", "utf8");
const caData = fs.readFileSync("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", "utf8");
const clusterWithCA = {
...currentCluster,
name: `${currentCluster.name}-service-account`,
caData,
};
const serviceAccountUser = {
name: env.KUBERNETES_SERVICE_ACCOUNT_NAME ?? "default-sa",
token,
};
this.kubeConfig.clusters = [];
this.kubeConfig.users = [];
this.kubeConfig.addCluster(clusterWithCA);
this.kubeConfig.addUser(serviceAccountUser);
const currentContext = this.kubeConfig.getCurrentContext();
const originalContext = this.kubeConfig.getContextObject(currentContext);
if (!originalContext) throw new Error("No context found");
const updatedContext = {
...originalContext,
name: `${originalContext.name}-service-account`,
cluster: clusterWithCA.name,
user: serviceAccountUser.name,
};
this.kubeConfig.contexts = [];
this.kubeConfig.addContext(updatedContext);
this.kubeConfig.setCurrentContext(updatedContext.name);
}
this.coreApi = this.kubeConfig.makeApiClient(CoreV1Api);
this.networkingApi = this.kubeConfig.makeApiClient(NetworkingV1Api);
this.metricsApi = new Metrics(this.kubeConfig);
this.versionApi = this.kubeConfig.makeApiClient(VersionApi);
}
public static getInstance(): KubernetesClient {
if (!KubernetesClient.instance) {
KubernetesClient.instance = new KubernetesClient();
}
return KubernetesClient.instance;
}
}

View File

@@ -0,0 +1,41 @@
import type { ResourceParser } from "./resource-parser";
export class CpuResourceParser implements ResourceParser {
private readonly billionthsCore = 1_000_000_000;
private readonly millionthsCore = 1_000_000;
private readonly MiliCore = 1_000;
private readonly ThousandCore = 1_000;
parse(value: string): number {
if (!value.length) {
return NaN;
}
value = value.replace(/,/g, "").trim();
const [, numericValue, unit = ""] = /^([0-9.]+)\s*([a-zA-Z]*)$/.exec(value) ?? [];
if (numericValue === undefined) {
return NaN;
}
const parsedValue = parseFloat(numericValue);
if (isNaN(parsedValue)) {
return NaN;
}
switch (unit.toLowerCase()) {
case "n": // nano-cores (billionths of a core)
return parsedValue / this.billionthsCore; // 1 NanoCPU = 1/1,000,000,000 cores
case "u": // micro-cores (millionths of a core)
return parsedValue / this.millionthsCore; // 1 MicroCPU = 1/1,000,000 cores
case "m": // milli-cores
return parsedValue / this.MiliCore; // 1 milli-core = 1/1000 cores
case "k": // thousands of cores
return parsedValue * this.ThousandCore; // 1 thousand-core = 1000 cores
default: // cores (no unit)
return parsedValue;
}
}
}

View File

@@ -0,0 +1,69 @@
import type { ResourceParser } from "./resource-parser";
export class MemoryResourceParser implements ResourceParser {
private readonly binaryMultipliers: Record<string, number> = {
ki: 1024,
mi: 1024 ** 2,
gi: 1024 ** 3,
ti: 1024 ** 4,
pi: 1024 ** 5,
} as const;
private readonly decimalMultipliers: Record<string, number> = {
k: 1000,
m: 1000 ** 2,
g: 1000 ** 3,
t: 1000 ** 4,
p: 1000 ** 5,
} as const;
parse(value: string): number {
if (!value.length) {
return NaN;
}
value = value.replace(/,/g, "").trim();
const [, numericValue, unit = ""] = /^([0-9.]+)\s*([a-zA-Z]*)$/.exec(value) ?? [];
if (!numericValue) {
return NaN;
}
const parsedValue = parseFloat(numericValue);
if (isNaN(parsedValue)) {
return NaN;
}
const unitLower = unit.toLowerCase();
// Handle binary units (Ki, Mi, Gi, etc.)
if (unitLower in this.binaryMultipliers) {
const multiplier = this.binaryMultipliers[unitLower];
const giMultiplier = this.binaryMultipliers.gi;
if (multiplier !== undefined && giMultiplier !== undefined) {
return (parsedValue * multiplier) / giMultiplier;
}
}
// Handle decimal units (K, M, G, etc.)
if (unitLower in this.decimalMultipliers) {
const multiplier = this.decimalMultipliers[unitLower];
const giMultiplier = this.binaryMultipliers.gi;
if (multiplier !== undefined && giMultiplier !== undefined) {
return (parsedValue * multiplier) / giMultiplier;
}
}
// No unit or unrecognized unit, assume bytes and convert to GiB
const giMultiplier = this.binaryMultipliers.gi;
if (giMultiplier !== undefined) {
return parsedValue / giMultiplier;
}
return NaN; // Return NaN if giMultiplier is undefined
}
}

View File

@@ -0,0 +1,3 @@
export interface ResourceParser {
parse(value: string): number;
}

View File

@@ -0,0 +1,196 @@
import type { V1NodeList, VersionInfo } from "@kubernetes/client-node";
import { TRPCError } from "@trpc/server";
import type { ClusterResourceCount, KubernetesCluster } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
import { CpuResourceParser } from "../resource-parser/cpu-resource-parser";
import { MemoryResourceParser } from "../resource-parser/memory-resource-parser";
export const clusterRouter = createTRPCRouter({
getCluster: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesCluster> => {
const { coreApi, metricsApi, versionApi, kubeConfig } = KubernetesClient.getInstance();
try {
const versionInfo = await versionApi.getCode();
const nodes = await coreApi.listNode();
const nodeMetricsClient = await metricsApi.getNodeMetrics();
const listPodForAllNamespaces = await coreApi.listPodForAllNamespaces();
let totalCPUCapacity = 0;
let totalCPUAllocatable = 0;
let totalCPUUsage = 0;
let totalMemoryCapacity = 0;
let totalMemoryAllocatable = 0;
let totalMemoryUsage = 0;
let totalCapacityPods = 0;
const cpuResourceParser = new CpuResourceParser();
const memoryResourceParser = new MemoryResourceParser();
nodes.items.forEach((node) => {
totalCapacityPods += Number(node.status?.capacity?.pods);
const cpuCapacity = cpuResourceParser.parse(node.status?.capacity?.cpu ?? "0");
const cpuAllocatable = cpuResourceParser.parse(node.status?.allocatable?.cpu ?? "0");
totalCPUCapacity += cpuCapacity;
totalCPUAllocatable += cpuAllocatable;
const memoryCapacity = memoryResourceParser.parse(node.status?.capacity?.memory ?? "0");
const memoryAllocatable = memoryResourceParser.parse(node.status?.allocatable?.memory ?? "0");
totalMemoryCapacity += memoryCapacity;
totalMemoryAllocatable += memoryAllocatable;
const nodeName = node.metadata?.name;
const nodeMetric = nodeMetricsClient.items.find((metric) => metric.metadata.name === nodeName);
if (nodeMetric) {
const cpuUsage = cpuResourceParser.parse(nodeMetric.usage.cpu);
totalCPUUsage += cpuUsage;
const memoryUsage = memoryResourceParser.parse(nodeMetric.usage.memory);
totalMemoryUsage += memoryUsage;
}
});
const reservedCPU = totalCPUCapacity - totalCPUAllocatable;
const reservedMemory = totalMemoryCapacity - totalMemoryAllocatable;
const reservedCPUPercentage = (reservedCPU / totalCPUCapacity) * 100;
const reservedMemoryPercentage = (reservedMemory / totalMemoryCapacity) * 100;
const usagePercentageAllocatable = (totalCPUUsage / totalCPUAllocatable) * 100;
const usagePercentageMemoryAllocatable = (totalMemoryUsage / totalMemoryAllocatable) * 100;
const usedPodsPercentage = (listPodForAllNamespaces.items.length / totalCapacityPods) * 100;
return {
name: kubeConfig.getCurrentContext(),
providers: getProviders(versionInfo, nodes),
kubernetesVersion: versionInfo.gitVersion,
architecture: versionInfo.platform,
nodeCount: nodes.items.length,
capacity: [
{
type: "CPU",
resourcesStats: [
{
percentageValue: Number(reservedCPUPercentage.toFixed(2)),
type: "Reserved",
capacityUnit: "Cores",
usedValue: Number(reservedCPU.toFixed(2)),
maxUsedValue: Number(totalCPUCapacity.toFixed(2)),
},
{
percentageValue: Number(usagePercentageAllocatable.toFixed(2)),
type: "Used",
capacityUnit: "Cores",
usedValue: Number(totalCPUUsage.toFixed(2)),
maxUsedValue: Number(totalCPUAllocatable.toFixed(2)),
},
],
},
{
type: "Memory",
resourcesStats: [
{
percentageValue: Number(reservedMemoryPercentage.toFixed(2)),
type: "Reserved",
capacityUnit: "GiB",
usedValue: Number(reservedMemory.toFixed(2)),
maxUsedValue: Number(totalMemoryCapacity.toFixed(2)),
},
{
percentageValue: Number(usagePercentageMemoryAllocatable.toFixed(2)),
type: "Used",
capacityUnit: "GiB",
usedValue: Number(totalMemoryUsage.toFixed(2)),
maxUsedValue: Number(totalMemoryAllocatable.toFixed(2)),
},
],
},
{
type: "Pods",
resourcesStats: [
{
percentageValue: Number(usedPodsPercentage.toFixed(2)),
type: "Used",
usedValue: listPodForAllNamespaces.items.length,
maxUsedValue: totalCapacityPods,
},
],
},
],
};
} catch (error) {
logger.error("Unable to retrieve cluster", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes cluster",
cause: error,
});
}
}),
getClusterResourceCounts: permissionRequiredProcedure
.requiresPermission("admin")
.query(async (): Promise<ClusterResourceCount[]> => {
const { coreApi, networkingApi } = KubernetesClient.getInstance();
try {
const [pods, ingresses, services, configMaps, namespaces, nodes, secrets, volumes] = await Promise.all([
coreApi.listPodForAllNamespaces(),
networkingApi.listIngressForAllNamespaces(),
coreApi.listServiceForAllNamespaces(),
coreApi.listConfigMapForAllNamespaces(),
coreApi.listNamespace(),
coreApi.listNode(),
coreApi.listSecretForAllNamespaces(),
coreApi.listPersistentVolumeClaimForAllNamespaces(),
]);
return [
{ label: "nodes", count: nodes.items.length },
{ label: "namespaces", count: namespaces.items.length },
{ label: "ingresses", count: ingresses.items.length },
{ label: "services", count: services.items.length },
{ label: "pods", count: pods.items.length },
{ label: "secrets", count: secrets.items.length },
{ label: "configmaps", count: configMaps.items.length },
{ label: "volumes", count: volumes.items.length },
];
} catch (error) {
logger.error("Unable to retrieve cluster resource counts", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes resources count",
cause: error,
});
}
}),
});
function getProviders(versionInfo: VersionInfo, nodes: V1NodeList) {
const providers = new Set<string>();
if (versionInfo.gitVersion.includes("k3s")) providers.add("k3s");
if (versionInfo.gitVersion.includes("gke")) providers.add("GKE");
if (versionInfo.gitVersion.includes("eks")) providers.add("EKS");
if (versionInfo.gitVersion.includes("aks")) providers.add("AKS");
nodes.items.forEach((node) => {
const nodeProviderLabel =
node.metadata?.labels?.["node.kubernetes.io/instance-type"] ?? node.metadata?.labels?.provider ?? "";
if (nodeProviderLabel.includes("aws")) providers.add("EKS");
if (nodeProviderLabel.includes("azure")) providers.add("AKS");
if (nodeProviderLabel.includes("gce")) providers.add("GKE");
if (nodeProviderLabel.includes("k3s")) providers.add("k3s");
});
return Array.from(providers).join(", ");
}

View File

@@ -0,0 +1,36 @@
import { TRPCError } from "@trpc/server";
import type { KubernetesBaseResource } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
export const configMapsRouter = createTRPCRouter({
getConfigMaps: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesBaseResource[]> => {
const { coreApi } = KubernetesClient.getInstance();
try {
const configMaps = await coreApi.listConfigMapForAllNamespaces();
return configMaps.items.map((configMap) => {
return {
name: configMap.metadata?.name ?? "unknown",
namespace: configMap.metadata?.namespace ?? "unknown",
creationTimestamp: configMap.metadata?.creationTimestamp,
};
});
} catch (error) {
logger.error("Unable to retrieve configMaps", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes ConfigMaps",
cause: error,
});
}
}),
});

View File

@@ -0,0 +1,54 @@
import type { V1HTTPIngressPath, V1Ingress, V1IngressRule } from "@kubernetes/client-node";
import { TRPCError } from "@trpc/server";
import type { KubernetesIngress, KubernetesIngressPath, KubernetesIngressRuleAndPath } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
export const ingressesRouter = createTRPCRouter({
getIngresses: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesIngress[]> => {
const { networkingApi } = KubernetesClient.getInstance();
try {
const ingresses = await networkingApi.listIngressForAllNamespaces();
const mapIngress = (ingress: V1Ingress): KubernetesIngress => {
return {
name: ingress.metadata?.name ?? "",
namespace: ingress.metadata?.namespace ?? "",
className: ingress.spec?.ingressClassName ?? "",
rulesAndPaths: getIngressRulesAndPaths(ingress.spec?.rules ?? []),
creationTimestamp: ingress.metadata?.creationTimestamp,
};
};
const getIngressRulesAndPaths = (rules: V1IngressRule[] = []): KubernetesIngressRuleAndPath[] => {
return rules.map((rule) => ({
host: rule.host ?? "",
paths: getPaths(rule.http?.paths ?? []),
}));
};
const getPaths = (paths: V1HTTPIngressPath[] = []): KubernetesIngressPath[] => {
return paths.map((path) => ({
serviceName: path.backend.service?.name ?? "",
port: path.backend.service?.port?.number ?? 0,
}));
};
return ingresses.items.map(mapIngress);
} catch (error) {
logger.error("Unable to retrieve ingresses", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes ingresses",
cause: error,
});
}
}),
});

View File

@@ -0,0 +1,22 @@
import { createTRPCRouter } from "../../../trpc";
import { clusterRouter } from "./cluster";
import { configMapsRouter } from "./configMaps";
import { ingressesRouter } from "./ingresses";
import { namespacesRouter } from "./namespaces";
import { nodesRouter } from "./nodes";
import { podsRouter } from "./pods";
import { secretsRouter } from "./secrets";
import { servicesRouter } from "./services";
import { volumesRouter } from "./volumes";
export const kubernetesRouter = createTRPCRouter({
nodes: nodesRouter,
cluster: clusterRouter,
namespaces: namespacesRouter,
ingresses: ingressesRouter,
services: servicesRouter,
pods: podsRouter,
secrets: secretsRouter,
configMaps: configMapsRouter,
volumes: volumesRouter,
});

View File

@@ -0,0 +1,36 @@
import { TRPCError } from "@trpc/server";
import type { KubernetesNamespace, KubernetesNamespaceState } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
export const namespacesRouter = createTRPCRouter({
getNamespaces: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesNamespace[]> => {
const { coreApi } = KubernetesClient.getInstance();
try {
const namespaces = await coreApi.listNamespace();
return namespaces.items.map((namespace) => {
return {
status: namespace.status?.phase as KubernetesNamespaceState,
name: namespace.metadata?.name ?? "unknown",
creationTimestamp: namespace.metadata?.creationTimestamp,
} satisfies KubernetesNamespace;
});
} catch (error) {
logger.error("Unable to retrieve namespaces", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes namespaces",
cause: error,
});
}
}),
});

View File

@@ -0,0 +1,68 @@
import { TRPCError } from "@trpc/server";
import type { KubernetesNode, KubernetesNodeState } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
import { CpuResourceParser } from "../resource-parser/cpu-resource-parser";
import { MemoryResourceParser } from "../resource-parser/memory-resource-parser";
export const nodesRouter = createTRPCRouter({
getNodes: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesNode[]> => {
const { coreApi, metricsApi } = KubernetesClient.getInstance();
try {
const nodes = await coreApi.listNode();
const nodeMetricsClient = await metricsApi.getNodeMetrics();
const cpuResourceParser = new CpuResourceParser();
const memoryResourceParser = new MemoryResourceParser();
return nodes.items.map((node) => {
const name = node.metadata?.name ?? "unknown";
const readyCondition = node.status?.conditions?.find((condition) => condition.type === "Ready");
const status: KubernetesNodeState = readyCondition?.status === "True" ? "Ready" : "NotReady";
const cpuAllocatable = cpuResourceParser.parse(node.status?.allocatable?.cpu ?? "0");
const memoryAllocatable = memoryResourceParser.parse(node.status?.allocatable?.memory ?? "0");
let cpuUsage = 0;
let memoryUsage = 0;
const nodeMetric = nodeMetricsClient.items.find((metric) => metric.metadata.name === name);
if (nodeMetric) {
cpuUsage += cpuResourceParser.parse(nodeMetric.usage.cpu);
memoryUsage += memoryResourceParser.parse(nodeMetric.usage.memory);
}
const usagePercentageCPUAllocatable = (cpuUsage / cpuAllocatable) * 100;
const usagePercentageMemoryAllocatable = (memoryUsage / memoryAllocatable) * 100;
return {
name,
status,
allocatableCpuPercentage: Number(usagePercentageCPUAllocatable.toFixed(0)),
allocatableRamPercentage: Number(usagePercentageMemoryAllocatable.toFixed(0)),
podsCount: Number(node.status?.capacity?.pods),
operatingSystem: node.status?.nodeInfo?.operatingSystem,
architecture: node.status?.nodeInfo?.architecture,
kubernetesVersion: node.status?.nodeInfo?.kubeletVersion,
creationTimestamp: node.metadata?.creationTimestamp,
};
});
} catch (error) {
logger.error("Unable to retrieve nodes", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes nodes",
cause: error,
});
}
}),
});

View File

@@ -0,0 +1,104 @@
import type { KubeConfig, V1OwnerReference } from "@kubernetes/client-node";
import { AppsV1Api } from "@kubernetes/client-node";
import { TRPCError } from "@trpc/server";
import type { KubernetesPod } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
export const podsRouter = createTRPCRouter({
getPods: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesPod[]> => {
const { coreApi, kubeConfig } = KubernetesClient.getInstance();
try {
const podsResp = await coreApi.listPodForAllNamespaces();
const pods: KubernetesPod[] = [];
for (const pod of podsResp.items) {
const labels = pod.metadata?.labels ?? {};
const ownerRefs = pod.metadata?.ownerReferences ?? [];
let applicationType = "Pod";
if (labels["app.kubernetes.io/managed-by"] === "Helm") {
applicationType = "Helm";
} else {
for (const owner of ownerRefs) {
if (["Deployment", "StatefulSet", "DaemonSet"].includes(owner.kind)) {
applicationType = owner.kind;
break;
} else if (owner.kind === "ReplicaSet") {
const ownerType = await getOwnerKind(kubeConfig, owner, pod.metadata?.namespace ?? "");
if (ownerType) {
applicationType = ownerType;
break;
}
}
}
}
pods.push({
name: pod.metadata?.name ?? "",
namespace: pod.metadata?.namespace ?? "",
image: pod.spec?.containers.map((container) => container.image).join(", "),
applicationType,
status: pod.status?.phase ?? "unknown",
creationTimestamp: pod.metadata?.creationTimestamp,
});
}
return pods;
} catch (error) {
logger.error("Unable to retrieve pods", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes pods",
cause: error,
});
}
}),
});
async function getOwnerKind(
kubeConfig: KubeConfig,
ownerRef: V1OwnerReference,
namespace: string,
): Promise<string | null> {
const { kind, name } = ownerRef;
if (kind === "ReplicaSet") {
const appsApi = kubeConfig.makeApiClient(AppsV1Api);
try {
const rsResp = await appsApi.readNamespacedReplicaSet({
name,
namespace,
});
if (rsResp.metadata?.ownerReferences) {
for (const rsOwner of rsResp.metadata.ownerReferences) {
if (rsOwner.kind === "Deployment") {
return "Deployment";
}
const parentKind = await getOwnerKind(kubeConfig, rsOwner, namespace);
if (parentKind) return parentKind;
}
}
return "ReplicaSet";
} catch (error) {
logger.error("Error reading ReplicaSet:", error);
return null;
}
}
if (["Deployment", "StatefulSet", "DaemonSet"].includes(kind)) {
return kind;
}
return null;
}

View File

@@ -0,0 +1,36 @@
import { TRPCError } from "@trpc/server";
import type { KubernetesSecret } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
export const secretsRouter = createTRPCRouter({
getSecrets: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesSecret[]> => {
const { coreApi } = KubernetesClient.getInstance();
try {
const secrets = await coreApi.listSecretForAllNamespaces();
return secrets.items.map((secret) => {
return {
name: secret.metadata?.name ?? "unknown",
namespace: secret.metadata?.namespace ?? "unknown",
type: secret.type ?? "unknown",
creationTimestamp: secret.metadata?.creationTimestamp,
};
});
} catch (error) {
logger.error("Unable to retrieve secrets", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes secrets",
cause: error,
});
}
}),
});

View File

@@ -0,0 +1,40 @@
import { TRPCError } from "@trpc/server";
import type { KubernetesService } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
export const servicesRouter = createTRPCRouter({
getServices: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesService[]> => {
const { coreApi } = KubernetesClient.getInstance();
try {
const services = await coreApi.listServiceForAllNamespaces();
return services.items.map((service) => {
return {
name: service.metadata?.name ?? "unknown",
namespace: service.metadata?.namespace ?? "",
type: service.spec?.type ?? "",
ports: service.spec?.ports?.map(({ port, protocol }) => `${port}/${protocol}`),
targetPorts: service.spec?.ports?.map(({ targetPort }) => `${targetPort}`),
clusterIP: service.spec?.clusterIP ?? "",
creationTimestamp: service.metadata?.creationTimestamp,
};
});
} catch (error) {
logger.error("Unable to retrieve services", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes services",
cause: error,
});
}
}),
});

View File

@@ -0,0 +1,42 @@
import { TRPCError } from "@trpc/server";
import type { KubernetesVolume } from "@homarr/definitions";
import { logger } from "@homarr/log";
import { kubernetesMiddleware } from "../../../middlewares/kubernetes";
import { createTRPCRouter, permissionRequiredProcedure } from "../../../trpc";
import { KubernetesClient } from "../kubernetes-client";
export const volumesRouter = createTRPCRouter({
getVolumes: permissionRequiredProcedure
.requiresPermission("admin")
.unstable_concat(kubernetesMiddleware())
.query(async (): Promise<KubernetesVolume[]> => {
const { coreApi } = KubernetesClient.getInstance();
try {
const volumes = await coreApi.listPersistentVolumeClaimForAllNamespaces();
return volumes.items.map((volume) => {
return {
name: volume.metadata?.name ?? "unknown",
namespace: volume.metadata?.namespace ?? "unknown",
accessModes: volume.status?.accessModes?.map((accessMode) => accessMode) ?? [],
storage: volume.status?.capacity?.storage ?? "",
storageClassName: volume.spec?.storageClassName ?? "",
volumeMode: volume.spec?.volumeMode ?? "",
volumeName: volume.spec?.volumeName ?? "",
status: volume.status?.phase ?? "",
creationTimestamp: volume.metadata?.creationTimestamp,
};
});
} catch (error) {
logger.error("Unable to retrieve volumes", error);
throw new TRPCError({
code: "INTERNAL_SERVER_ERROR",
message: "An error occurred while fetching Kubernetes Volumes",
cause: error,
});
}
}),
});

View File

@@ -24,6 +24,12 @@ vi.mock("@homarr/redis", () => ({
}),
}));
vi.mock("@homarr/docker/env", () => ({
env: {
ENABLE_DOCKER: true,
},
}));
const createSessionWithPermissions = (...permissions: GroupPermissionKey[]) =>
({
user: {

View File

@@ -0,0 +1,56 @@
import { describe, expect, it } from "vitest";
import { CpuResourceParser } from "../../../kubernetes/resource-parser/cpu-resource-parser";
describe("CpuResourceParser", () => {
const parser = new CpuResourceParser();
it("should return NaN for empty or invalid input", () => {
expect(parser.parse("")).toBeNaN();
expect(parser.parse(" ")).toBeNaN();
expect(parser.parse("abc")).toBeNaN();
});
it("should parse CPU values without a unit (cores)", () => {
expect(parser.parse("1")).toBe(1);
expect(parser.parse("2.5")).toBe(2.5);
expect(parser.parse("10")).toBe(10);
});
it("should parse CPU values with milli-core unit ('m')", () => {
expect(parser.parse("500m")).toBe(0.5); // 500 milli-cores = 0.5 cores
expect(parser.parse("250m")).toBe(0.25);
expect(parser.parse("1000m")).toBe(1);
});
it("should parse CPU values with kilo-core unit ('k')", () => {
expect(parser.parse("1k")).toBe(1000); // 1 kilo-core = 1000 cores
expect(parser.parse("2k")).toBe(2000);
expect(parser.parse("0.5k")).toBe(500);
});
it("should parse CPU values with nano-core unit ('n')", () => {
// Adjust the expected values for nano-cores to account for floating-point precision
expect(parser.parse("1000000000n")).toBe(1); // 1 NanoCPU = 1/1,000,000,000 cores
expect(parser.parse("500000000n")).toBe(0.5);
expect(parser.parse("0.000000001n")).toBe(0.000000000000000001); // Tiny value
});
it("should parse CPU values with micro-core unit ('u')", () => {
// Adjust the expected values for micro-cores to account for floating-point precision
expect(parser.parse("1000000u")).toBe(1); // 1 MicroCPU = 1/1,000,000 cores
expect(parser.parse("500000u")).toBe(0.5);
expect(parser.parse("0.000001u")).toBe(0.000000000001); // Tiny value
});
it("should handle input with commas", () => {
expect(parser.parse("1,000")).toBe(1000); // 1,000 cores
expect(parser.parse("1,500m")).toBe(1.5); // 1,500 milli-cores = 1.5 cores
});
it("should ignore leading and trailing whitespace", () => {
expect(parser.parse(" 1 ")).toBe(1);
expect(parser.parse(" 500m ")).toBe(0.5);
expect(parser.parse(" 2k ")).toBe(2000);
});
});

View File

@@ -0,0 +1,61 @@
import { describe, expect, it } from "vitest";
import { MemoryResourceParser } from "../../../kubernetes/resource-parser/memory-resource-parser";
const BYTES_IN_GIB = 1024 ** 3; // 1 GiB in bytes
const BYTES_IN_MIB = 1024 ** 2; // 1 MiB in bytes
const BYTES_IN_KIB = 1024; // 1 KiB in bytes
const KI = "Ki";
const MI = "Mi";
const GI = "Gi";
const TI = "Ti";
const PI = "Pi";
describe("MemoryResourceParser", () => {
const parser = new MemoryResourceParser();
it("should parse values without units as bytes and convert to GiB", () => {
expect(parser.parse("1073741824")).toBe(1); // 1 GiB
expect(parser.parse("2147483648")).toBe(2); // 2 GiB
});
it("should parse binary units (Ki, Mi, Gi, Ti, Pi) into GiB", () => {
expect(parser.parse(`1024${KI}`)).toBeCloseTo(1 / 1024); // 1 MiB = 1/1024 GiB
expect(parser.parse(`1${MI}`)).toBeCloseTo(1 / 1024); // 1 MiB = 1/1024 GiB
expect(parser.parse(`1${GI}`)).toBe(1); // 1 GiB
expect(parser.parse(`1${TI}`)).toBe(BYTES_IN_KIB); // 1 TiB = 1024 GiB
expect(parser.parse(`1${PI}`)).toBe(BYTES_IN_MIB); // 1 PiB = 1024^2 GiB
});
it("should parse decimal units (K, M, G, T, P) into GiB", () => {
expect(parser.parse("1000K")).toBeCloseTo(1000 / BYTES_IN_GIB); // 1000 KB
expect(parser.parse("1M")).toBeCloseTo(1 / BYTES_IN_KIB); // 1 MB = 1/1024 GiB
expect(parser.parse("1G")).toBeCloseTo(0.9313225746154785); // 1 GB ≈ 0.931 GiB
expect(parser.parse("1T")).toBeCloseTo(931.3225746154785); // 1 TB ≈ 931.32 GiB
expect(parser.parse("1P")).toBeCloseTo(931322.5746154785); // 1 PB ≈ 931,322.57 GiB
});
it("should handle invalid input and return NaN", () => {
expect(parser.parse("")).toBeNaN();
expect(parser.parse(" ")).toBeNaN();
expect(parser.parse("abc")).toBeNaN();
});
it("should handle commas in input and convert to GiB", () => {
expect(parser.parse("1,073,741,824")).toBe(1); // 1 GiB
expect(parser.parse("1,024Ki")).toBeCloseTo(1 / BYTES_IN_KIB); // 1 MiB
});
it("should handle lowercase and uppercase units", () => {
expect(parser.parse("1ki")).toBeCloseTo(1 / BYTES_IN_KIB); // 1 MiB
expect(parser.parse("1KI")).toBeCloseTo(1 / BYTES_IN_KIB);
expect(parser.parse("1Mi")).toBeCloseTo(1 / BYTES_IN_KIB);
expect(parser.parse("1m")).toBeCloseTo(1 / BYTES_IN_KIB);
});
it("should assume bytes for unrecognized or no units and convert to GiB", () => {
expect(parser.parse("1073741824")).toBe(1); // 1 GiB
expect(parser.parse("42")).toBeCloseTo(42 / BYTES_IN_GIB); // 42 bytes in GiB
expect(parser.parse("42unknown")).toBeCloseTo(42 / BYTES_IN_GIB); // Invalid unit = bytes
});
});