diff --git a/.cursor/mcp.json b/.cursor/mcp.json
index 9b3221784d..da39e4ffaf 100644
--- a/.cursor/mcp.json
+++ b/.cursor/mcp.json
@@ -1,7 +1,3 @@
{
- "mcpServers": {
- "trigger.dev": {
- "url": "http://localhost:3333/sse"
- }
- }
-}
\ No newline at end of file
+ "mcpServers": {}
+}
diff --git a/.gitignore b/.gitignore
index 9bee46fc27..6f435d0400 100644
--- a/.gitignore
+++ b/.gitignore
@@ -63,4 +63,5 @@ apps/**/public/build
/packages/core/src/package.json
/packages/trigger-sdk/src/package.json
/packages/python/src/package.json
-.claude
\ No newline at end of file
+.claude
+.mcp.log
\ No newline at end of file
diff --git a/README.md b/README.md
index dab0551dc0..22181725bc 100644
--- a/README.md
+++ b/README.md
@@ -1,44 +1,75 @@
-
-
-
-
-
-
-### Open source background jobs and AI infrastructure
-[Discord](https://trigger.dev/discord) | [Website](https://trigger.dev) | [Issues](https://github.com/triggerdotdev/trigger.dev/issues) | [Docs](https://trigger.dev/docs)
+
-[](https://twitter.com/triggerdotdev)
+### Build and deploy fullyβmanaged AI agents and workflows
+
+[Website](https://trigger.dev) | [Docs](https://trigger.dev/docs) | [Issues](https://github.com/triggerdotdev/trigger.dev/issues) | [Feature requests](https://triggerdev.featurebase.app/) | [Public roadmap](https://triggerdev.featurebase.app/roadmap) | [Self-hosting](https://trigger.dev/docs/self-hosting/overview)
+
+[](https://github.com/triggerdotdev/trigger.dev)
+[](https://github.com/triggerdotdev/trigger.dev/blob/main/LICENSE)
+[](https://www.npmjs.com/package/@trigger.dev/sdk)
+[](https://www.npmjs.com/package/@trigger.dev/sdk)
+
+[](https://twitter.com/triggerdotdev)
+[](https://discord.gg/nkqV9xBYWy)
+[](https://github.com/triggerdotdev/trigger.dev)
## About Trigger.dev
-Trigger.dev is an open source platform and SDK which allows you to create long-running background jobs. Write normal async code, deploy, and never hit a timeout.
+Trigger.dev is the open-source platform for building AI workflows in TypeScript. Long-running tasks with retries, queues, observability, and elastic scaling.
+
+## The platform designed for building AI agents
+
+Build [AI agents](https://trigger.dev/product/ai-agents) using all the frameworks, services and LLMs you're used to, deploy them to Trigger.dev and get durable, long-running tasks with retries, queues, observability, and elastic scaling out of the box.
+
+- **Long-running without timeouts**: Execute your tasks with absolutely no timeouts, unlike AWS Lambda, Vercel, and other serverless platforms.
+
+- **Durability, retries & queues**: Build rock solid agents and AI applications using our durable tasks, retries, queues and idempotency.
-### Key features:
+- **True runtime freedom**: Customize your deployed tasks with system packages β run browsers, Python scripts, FFmpeg and more.
-- JavaScript and TypeScript SDK
-- No timeouts
-- Retries (with exponential backoff)
-- Queues and concurrency controls
-- Schedules and crons
-- Full Observability; logs, live trace views, advanced filtering
-- React hooks to interact with the Trigger API from your React app
-- Pipe LLM streams straight to your users through the Realtime API
-- Trigger tasks and display the run status and metadata anywhere in your app
-- Custom alerts, get notified by email, Slack or webhooks
-- No infrastructure to manage
-- Elastic (scaling)
-- Works with your existing tech stack
+- **Human-in-the-loop**: Programmatically pause your tasks until a human can approve, reject or give feedback.
-## In your codebase
+- **Realtime apps & streaming**: Move your background jobs to the foreground by subscribing to runs or streaming AI responses to your app.
+
+- **Observability & monitoring**: Each run has full tracing and logs. Configure error alerts to catch bugs fast.
+
+## Key features:
+
+- **[JavaScript and TypeScript SDK](https://trigger.dev/docs/tasks/overview)** - Build background tasks using familiar programming models
+- **[Long-running tasks](https://trigger.dev/docs/runs/max-duration)** - Handle resource-heavy tasks without timeouts
+- **[Durable cron schedules](https://trigger.dev/docs/tasks/scheduled#scheduled-tasks-cron)** - Create and attach recurring schedules of up to a year
+- **[Trigger.dev Realtime](https://trigger.dev/docs/realtime/overview)** - Trigger, subscribe to, and get real-time updates for runs, with LLM streaming support
+- **[Build extensions](https://trigger.dev/docs/config/extensions/overview#build-extensions)** - Hook directly into the build system and customize the build process. Run Python scripts, FFmpeg, browsers, and more.
+- **[React hooks](https://trigger.dev/docs/frontend/react-hooks#react-hooks)** - Interact with the Trigger.dev API on your frontend using our React hooks package
+- **[Batch triggering](https://trigger.dev/docs/triggering#tasks-batchtrigger)** - Use batchTrigger() to initiate multiple runs of a task with custom payloads and options
+- **[Structured inputs / outputs](https://trigger.dev/docs/tasks/schemaTask#schematask)** - Define precise data schemas for your tasks with runtime payload validation
+- **[Waits](https://trigger.dev/docs/wait)** - Add waits to your tasks to pause execution for a specified duration
+- **[Preview branches](https://trigger.dev/docs/deployment/preview-branches)** - Create isolated environments for testing and development. Integrates with Vercel and git workflows
+- **[Waitpoints](https://trigger.dev/docs/wait-for-token#wait-for-token)** - Add human-in-the-loop judgment at critical decision points without disrupting workflow
+- **[Concurrency & queues](https://trigger.dev/docs/queue-concurrency#concurrency-and-queues)** - Set concurrency rules to manage how multiple tasks execute
+- **[Multiple environments](https://trigger.dev/docs/how-it-works#dev-mode)** - Support for DEV, PREVIEW, STAGING, and PROD environments
+- **[No infrastructure to manage](https://trigger.dev/docs/how-it-works#trigger-dev-architecture)** - Auto-scaling infrastructure that eliminates timeouts and server management
+- **[Automatic retries](https://trigger.dev/docs/errors-retrying)** - If your task encounters an uncaught error, we automatically attempt to run it again
+- **[Checkpointing](https://trigger.dev/docs/how-it-works#the-checkpoint-resume-system)** - Tasks are inherently durable, thanks to our checkpointing feature
+- **[Versioning](https://trigger.dev/docs/versioning)** - Atomic versioning allows you to deploy new versions without affecting running tasks
+- **[Machines](https://trigger.dev/docs/machines)** - Configure the number of vCPUs and GBs of RAM you want the task to use
+- **[Observability & monitoring](https://trigger.dev/product/observability-and-monitoring)** - Monitor every aspect of your tasks' performance with comprehensive logging and visualization tools
+- **[Logging & tracing](https://trigger.dev/docs/logging)** - Comprehensive logging and tracing for all your tasks
+- **[Tags](https://trigger.dev/docs/tags#tags)** - Attach up to ten tags to each run, allowing you to filter via the dashboard, realtime, and the SDK
+- **[Run metadata](https://trigger.dev/docs/runs/metadata#run-metadata)** - Attach metadata to runs which updates as the run progresses and is available to use in your frontend for live updates
+- **[Bulk actions](https://trigger.dev/docs/bulk-actions)** - Perform actions on multiple runs simultaneously, including replaying and cancelling
+- **[Real-time alerts](https://trigger.dev/docs/troubleshooting-alerts#alerts)** - Choose your preferred notification method for run failures and deployments
+
+## Write tasks in your codebase
Create tasks where they belong: in your codebase. Version control, localhost, test and review like you're already used to.
```ts
-import { task } from "@trigger.dev/sdk/v3";
+import { task } from "@trigger.dev/sdk";
//1. You need to export each task
export const helloWorld = task({
@@ -58,13 +89,13 @@ Use our SDK to write tasks in your codebase. There's no infrastructure to manage
## Environments
-We support `Development`, `Staging`, and `Production` environments, allowing you to test your tasks before deploying them to production.
+We support `Development`, `Staging`, `Preview`, and `Production` environments, allowing you to test your tasks before deploying them to production.
## Full visibility of every job run
View every task in every run so you can tell exactly what happened. We provide a full trace view of every task run so you can see what happened at every step.
-
+
# Getting started
@@ -73,14 +104,19 @@ The quickest way to get started is to create an account and project in our [web
### Useful links:
- [Quick start](https://trigger.dev/docs/quick-start) - get up and running in minutes
-- [How it works](https://trigger.dev/docs/v3/how-it-works) - understand how Trigger.dev works under the hood
+- [How it works](https://trigger.dev/docs/how-it-works) - understand how Trigger.dev works under the hood
- [Guides and examples](https://trigger.dev/docs/guides/introduction) - walk-through guides and code examples for popular frameworks and use cases
## Self-hosting
-If you prefer to self-host Trigger.dev, you can follow our [self-hosting guide](https://trigger.dev/docs/v3/open-source-self-hosting#overview).
+If you prefer to self-host Trigger.dev, you can follow our [self-hosting guides](https://trigger.dev/docs/self-hosting/overview):
+
+- [Docker self-hosting guide](https://trigger.dev/docs/self-hosting/docker) - use Docker Compose to spin up a Trigger.dev instance
+- [Kubernetes self-hosting guide](https://trigger.dev/docs/self-hosting/kubernetes) - use our official Helm chart to deploy Trigger.dev to your Kubernetes cluster
+
+## Support and community
-We also have a dedicated self-hosting channel in our [Discord server](https://trigger.dev/discord) for support.
+We have a large active community in our official [Discord server](https://trigger.dev/discord) for support, including a dedicated channel for self-hosting.
## Development
diff --git a/apps/supervisor/package.json b/apps/supervisor/package.json
index 9cce9d5feb..e9609bf154 100644
--- a/apps/supervisor/package.json
+++ b/apps/supervisor/package.json
@@ -13,6 +13,7 @@
"typecheck": "tsc --noEmit"
},
"dependencies": {
+ "@aws-sdk/client-ecr": "^3.839.0",
"@kubernetes/client-node": "^1.0.0",
"@trigger.dev/core": "workspace:*",
"dockerode": "^4.0.6",
diff --git a/apps/supervisor/src/env.ts b/apps/supervisor/src/env.ts
index dfe5237912..a225af5ea1 100644
--- a/apps/supervisor/src/env.ts
+++ b/apps/supervisor/src/env.ts
@@ -76,6 +76,7 @@ const Env = z.object({
KUBERNETES_IMAGE_PULL_SECRETS: z.string().optional(), // csv
KUBERNETES_EPHEMERAL_STORAGE_SIZE_LIMIT: z.string().default("10Gi"),
KUBERNETES_EPHEMERAL_STORAGE_SIZE_REQUEST: z.string().default("2Gi"),
+ KUBERNETES_STRIP_IMAGE_DIGEST: BoolEnv.default(false),
// Placement tags settings
PLACEMENT_TAGS_ENABLED: BoolEnv.default(false),
diff --git a/apps/supervisor/src/workloadManager/docker.ts b/apps/supervisor/src/workloadManager/docker.ts
index 6aa74a7ecc..4ebbe11ca7 100644
--- a/apps/supervisor/src/workloadManager/docker.ts
+++ b/apps/supervisor/src/workloadManager/docker.ts
@@ -8,14 +8,16 @@ import { env } from "../env.js";
import { getDockerHostDomain, getRunnerId, normalizeDockerHostUrl } from "../util.js";
import Docker from "dockerode";
import { tryCatch } from "@trigger.dev/core";
+import { ECRAuthService } from "./ecrAuth.js";
export class DockerWorkloadManager implements WorkloadManager {
private readonly logger = new SimpleStructuredLogger("docker-workload-manager");
private readonly docker: Docker;
private readonly runnerNetworks: string[];
- private readonly auth?: Docker.AuthConfig;
+ private readonly staticAuth?: Docker.AuthConfig;
private readonly platformOverride?: string;
+ private readonly ecrAuthService?: ECRAuthService;
constructor(private opts: WorkloadManagerOptions) {
this.docker = new Docker({
@@ -44,13 +46,18 @@ export class DockerWorkloadManager implements WorkloadManager {
url: env.DOCKER_REGISTRY_URL,
});
- this.auth = {
+ this.staticAuth = {
username: env.DOCKER_REGISTRY_USERNAME,
password: env.DOCKER_REGISTRY_PASSWORD,
serveraddress: env.DOCKER_REGISTRY_URL,
};
+ } else if (ECRAuthService.hasAWSCredentials()) {
+ this.logger.info("π AWS credentials found, initializing ECR auth service");
+ this.ecrAuthService = new ECRAuthService();
} else {
- this.logger.warn("π No Docker registry credentials provided, skipping auth");
+ this.logger.warn(
+ "π No Docker registry credentials or AWS credentials provided, skipping auth"
+ );
}
}
@@ -160,9 +167,12 @@ export class DockerWorkloadManager implements WorkloadManager {
imageArchitecture: inspectResult?.Architecture,
});
+ // Get auth config (static or ECR)
+ const authConfig = await this.getAuthConfig();
+
// Ensure the image is present
const [createImageError, imageResponseReader] = await tryCatch(
- this.docker.createImage(this.auth, {
+ this.docker.createImage(authConfig, {
fromImage: imageRef,
...(this.platformOverride ? { platform: this.platformOverride } : {}),
})
@@ -216,6 +226,26 @@ export class DockerWorkloadManager implements WorkloadManager {
logger.debug("create succeeded", { startResult, containerId: container.id });
}
+ /**
+ * Get authentication config for Docker operations
+ * Uses static credentials if available, otherwise attempts ECR auth
+ */
+ private async getAuthConfig(): Promise {
+ // Use static credentials if available
+ if (this.staticAuth) {
+ return this.staticAuth;
+ }
+
+ // Use ECR auth if service is available
+ if (this.ecrAuthService) {
+ const ecrAuth = await this.ecrAuthService.getAuthConfig();
+ return ecrAuth || undefined;
+ }
+
+ // No auth available
+ return undefined;
+ }
+
private async attachContainerToNetworks({
containerId,
networkNames,
diff --git a/apps/supervisor/src/workloadManager/ecrAuth.ts b/apps/supervisor/src/workloadManager/ecrAuth.ts
new file mode 100644
index 0000000000..33e98f6319
--- /dev/null
+++ b/apps/supervisor/src/workloadManager/ecrAuth.ts
@@ -0,0 +1,144 @@
+import { ECRClient, GetAuthorizationTokenCommand } from "@aws-sdk/client-ecr";
+import { SimpleStructuredLogger } from "@trigger.dev/core/v3/utils/structuredLogger";
+import { tryCatch } from "@trigger.dev/core";
+import Docker from "dockerode";
+
+interface ECRTokenCache {
+ token: string;
+ username: string;
+ serverAddress: string;
+ expiresAt: Date;
+}
+
+export class ECRAuthService {
+ private readonly logger = new SimpleStructuredLogger("ecr-auth-service");
+ private readonly ecrClient: ECRClient;
+ private tokenCache: ECRTokenCache | null = null;
+
+ constructor() {
+ this.ecrClient = new ECRClient();
+
+ this.logger.info("π ECR Auth Service initialized", {
+ region: this.ecrClient.config.region,
+ });
+ }
+
+ /**
+ * Check if we have AWS credentials configured
+ */
+ static hasAWSCredentials(): boolean {
+ if (process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) {
+ return true;
+ }
+
+ if (
+ process.env.AWS_PROFILE ||
+ process.env.AWS_ROLE_ARN ||
+ process.env.AWS_WEB_IDENTITY_TOKEN_FILE
+ ) {
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
+ * Check if the current token is still valid with a 10-minute buffer
+ */
+ private isTokenValid(): boolean {
+ if (!this.tokenCache) {
+ return false;
+ }
+
+ const now = new Date();
+ const bufferMs = 10 * 60 * 1000; // 10 minute buffer before expiration
+ return now < new Date(this.tokenCache.expiresAt.getTime() - bufferMs);
+ }
+
+ /**
+ * Get a fresh ECR authorization token from AWS
+ */
+ private async fetchNewToken(): Promise {
+ const [error, response] = await tryCatch(
+ this.ecrClient.send(new GetAuthorizationTokenCommand({}))
+ );
+
+ if (error) {
+ this.logger.error("Failed to get ECR authorization token", { error });
+ return null;
+ }
+
+ const authData = response.authorizationData?.[0];
+ if (!authData?.authorizationToken || !authData.proxyEndpoint) {
+ this.logger.error("Invalid ECR authorization response", { authData });
+ return null;
+ }
+
+ // Decode the base64 token to get username:password
+ const decoded = Buffer.from(authData.authorizationToken, "base64").toString("utf-8");
+ const [username, password] = decoded.split(":", 2);
+
+ if (!username || !password) {
+ this.logger.error("Failed to parse ECR authorization token");
+ return null;
+ }
+
+ const expiresAt = authData.expiresAt || new Date(Date.now() + 12 * 60 * 60 * 1000); // Default 12 hours
+
+ const tokenCache: ECRTokenCache = {
+ token: password,
+ username,
+ serverAddress: authData.proxyEndpoint,
+ expiresAt,
+ };
+
+ this.logger.info("π Successfully fetched ECR token", {
+ username,
+ serverAddress: authData.proxyEndpoint,
+ expiresAt: expiresAt.toISOString(),
+ });
+
+ return tokenCache;
+ }
+
+ /**
+ * Get ECR auth config for Docker operations
+ * Returns cached token if valid, otherwise fetches a new one
+ */
+ async getAuthConfig(): Promise {
+ // Check if cached token is still valid
+ if (this.isTokenValid()) {
+ this.logger.debug("Using cached ECR token");
+ return {
+ username: this.tokenCache!.username,
+ password: this.tokenCache!.token,
+ serveraddress: this.tokenCache!.serverAddress,
+ };
+ }
+
+ // Fetch new token
+ this.logger.info("Fetching new ECR authorization token");
+ const newToken = await this.fetchNewToken();
+
+ if (!newToken) {
+ return null;
+ }
+
+ // Cache the new token
+ this.tokenCache = newToken;
+
+ return {
+ username: newToken.username,
+ password: newToken.token,
+ serveraddress: newToken.serverAddress,
+ };
+ }
+
+ /**
+ * Clear the cached token (useful for testing or forcing refresh)
+ */
+ clearCache(): void {
+ this.tokenCache = null;
+ this.logger.debug("ECR token cache cleared");
+ }
+}
diff --git a/apps/supervisor/src/workloadManager/kubernetes.ts b/apps/supervisor/src/workloadManager/kubernetes.ts
index e738177cbc..b38e6c5b46 100644
--- a/apps/supervisor/src/workloadManager/kubernetes.ts
+++ b/apps/supervisor/src/workloadManager/kubernetes.ts
@@ -49,6 +49,20 @@ export class KubernetesWorkloadManager implements WorkloadManager {
};
}
+ private stripImageDigest(imageRef: string): string {
+ if (!env.KUBERNETES_STRIP_IMAGE_DIGEST) {
+ return imageRef;
+ }
+
+ const atIndex = imageRef.lastIndexOf("@");
+
+ if (atIndex === -1) {
+ return imageRef;
+ }
+
+ return imageRef.substring(0, atIndex);
+ }
+
async create(opts: WorkloadManagerCreateOptions) {
this.logger.log("[KubernetesWorkloadManager] Creating container", { opts });
@@ -74,7 +88,7 @@ export class KubernetesWorkloadManager implements WorkloadManager {
containers: [
{
name: "run-controller",
- image: opts.image,
+ image: this.stripImageDigest(opts.image),
ports: [
{
containerPort: 8000,
diff --git a/apps/webapp/app/components/Shortcuts.tsx b/apps/webapp/app/components/Shortcuts.tsx
index 8349ed970f..718166b55f 100644
--- a/apps/webapp/app/components/Shortcuts.tsx
+++ b/apps/webapp/app/components/Shortcuts.tsx
@@ -123,7 +123,7 @@ function ShortcutContent() {
-
+
diff --git a/apps/webapp/app/db.server.ts b/apps/webapp/app/db.server.ts
index c99b2e2c43..8435182e63 100644
--- a/apps/webapp/app/db.server.ts
+++ b/apps/webapp/app/db.server.ts
@@ -201,6 +201,7 @@ function getClient() {
message: log.message,
target: log.target,
},
+ ignoreError: true,
});
});
}
diff --git a/apps/webapp/app/env.server.ts b/apps/webapp/app/env.server.ts
index 1a49acddbc..fdd343c90b 100644
--- a/apps/webapp/app/env.server.ts
+++ b/apps/webapp/app/env.server.ts
@@ -410,6 +410,19 @@ const EnvironmentSchema = z.object({
MARQS_QUEUE_AGE_RANDOMIZATION_BIAS: z.coerce.number().default(0.25),
MARQS_REUSE_SNAPSHOT_COUNT: z.coerce.number().int().default(0),
MARQS_MAXIMUM_ENV_COUNT: z.coerce.number().int().optional(),
+ MARQS_SHARED_WORKER_QUEUE_CONSUMER_INTERVAL_MS: z.coerce.number().int().default(250),
+ MARQS_SHARED_WORKER_QUEUE_MAX_MESSAGE_COUNT: z.coerce.number().int().default(10),
+
+ MARQS_SHARED_WORKER_QUEUE_EAGER_DEQUEUE_ENABLED: z.string().default("0"),
+ MARQS_WORKER_ENABLED: z.string().default("0"),
+ MARQS_WORKER_COUNT: z.coerce.number().int().default(2),
+ MARQS_WORKER_CONCURRENCY_LIMIT: z.coerce.number().int().default(50),
+ MARQS_WORKER_CONCURRENCY_TASKS_PER_WORKER: z.coerce.number().int().default(5),
+ MARQS_WORKER_POLL_INTERVAL_MS: z.coerce.number().int().default(100),
+ MARQS_WORKER_IMMEDIATE_POLL_INTERVAL_MS: z.coerce.number().int().default(100),
+ MARQS_WORKER_SHUTDOWN_TIMEOUT_MS: z.coerce.number().int().default(60_000),
+ MARQS_SHARED_WORKER_QUEUE_COOLOFF_COUNT_THRESHOLD: z.coerce.number().int().default(10),
+ MARQS_SHARED_WORKER_QUEUE_COOLOFF_PERIOD_MS: z.coerce.number().int().default(5_000),
PROD_TASK_HEARTBEAT_INTERVAL_MS: z.coerce.number().int().optional(),
@@ -436,6 +449,7 @@ const EnvironmentSchema = z.object({
EVENT_LOOP_MONITOR_ENABLED: z.string().default("1"),
MAXIMUM_LIVE_RELOADING_EVENTS: z.coerce.number().int().default(1000),
MAXIMUM_TRACE_SUMMARY_VIEW_COUNT: z.coerce.number().int().default(25_000),
+ MAXIMUM_TRACE_DETAILED_SUMMARY_VIEW_COUNT: z.coerce.number().int().default(10_000),
TASK_PAYLOAD_OFFLOAD_THRESHOLD: z.coerce.number().int().default(524_288), // 512KB
TASK_PAYLOAD_MAXIMUM_SIZE: z.coerce.number().int().default(3_145_728), // 3MB
BATCH_TASK_PAYLOAD_MAXIMUM_SIZE: z.coerce.number().int().default(1_000_000), // 1MB
@@ -1059,6 +1073,8 @@ const EnvironmentSchema = z.object({
// AI Run Filter
AI_RUN_FILTER_MODEL: z.string().optional(),
+
+ EVENT_LOOP_MONITOR_THRESHOLD_MS: z.coerce.number().int().default(100),
});
export type Environment = z.infer;
diff --git a/apps/webapp/app/eventLoopMonitor.server.ts b/apps/webapp/app/eventLoopMonitor.server.ts
index db25a28137..42e982bdb9 100644
--- a/apps/webapp/app/eventLoopMonitor.server.ts
+++ b/apps/webapp/app/eventLoopMonitor.server.ts
@@ -1,10 +1,12 @@
import { createHook } from "node:async_hooks";
import { singleton } from "./utils/singleton";
import { tracer } from "./v3/tracer.server";
+import { env } from "./env.server";
+import { context, Context } from "@opentelemetry/api";
-const THRESHOLD_NS = 1e8; // 100ms
+const THRESHOLD_NS = env.EVENT_LOOP_MONITOR_THRESHOLD_MS * 1e6;
-const cache = new Map();
+const cache = new Map();
function init(asyncId: number, type: string, triggerAsyncId: number, resource: any) {
cache.set(asyncId, {
@@ -26,6 +28,7 @@ function before(asyncId: number) {
cache.set(asyncId, {
...cached,
start: process.hrtime(),
+ parentCtx: context.active(),
});
}
@@ -47,13 +50,17 @@ function after(asyncId: number) {
if (diffNs > THRESHOLD_NS) {
const time = diffNs / 1e6; // in ms
- const newSpan = tracer.startSpan("event-loop-blocked", {
- startTime: new Date(new Date().getTime() - time),
- attributes: {
- asyncType: cached.type,
- label: "EventLoopMonitor",
+ const newSpan = tracer.startSpan(
+ "event-loop-blocked",
+ {
+ startTime: new Date(new Date().getTime() - time),
+ attributes: {
+ asyncType: cached.type,
+ label: "EventLoopMonitor",
+ },
},
- });
+ cached.parentCtx
+ );
newSpan.end();
}
diff --git a/apps/webapp/app/models/runtimeEnvironment.server.ts b/apps/webapp/app/models/runtimeEnvironment.server.ts
index adde2db5ca..67119acd08 100644
--- a/apps/webapp/app/models/runtimeEnvironment.server.ts
+++ b/apps/webapp/app/models/runtimeEnvironment.server.ts
@@ -37,7 +37,7 @@ export async function findEnvironmentByApiKey(
if (environment.type === "PREVIEW") {
if (!branchName) {
- logger.error("findEnvironmentByApiKey(): Preview env with no branch name provided", {
+ logger.warn("findEnvironmentByApiKey(): Preview env with no branch name provided", {
environmentId: environment.id,
});
return null;
diff --git a/apps/webapp/app/root.tsx b/apps/webapp/app/root.tsx
index f46d3a65ff..d481a69ab4 100644
--- a/apps/webapp/app/root.tsx
+++ b/apps/webapp/app/root.tsx
@@ -23,7 +23,7 @@ export const links: LinksFunction = () => {
export const meta: MetaFunction = ({ data }) => {
const typedData = data as UseDataFunctionReturn;
return [
- { title: `Trigger.dev${appEnvTitleTag(typedData.appEnv)}` },
+ { title: typedData?.appEnv ? `Trigger.dev${appEnvTitleTag(typedData.appEnv)}` : "Trigger.dev" },
{
name: "viewport",
content: "width=1024, initial-scale=1",
@@ -84,11 +84,13 @@ export function ErrorBoundary() {
-
-
-
-
-
+
+
+
+
+
+
+