;
+}
+```
+
+
+
+### get
+
+
Promise<T | null>`} />
+
+
+### set
+
+ Promise<void>`} />
+
+
+### delete
+
+ Promise<void>`} />
+
+
+### incr
+
+ Promise<FixedWindowConsumeResult>`} />
+
+
+### ttl
+
+ Promise<number | null>`} />
+
+
+### expire
+
+ Promise<void>`} />
+
+
+### zAdd
+
+ Promise<void>`} />
+
+
+### zRemRangeByScore
+
+ Promise<void>`} />
+
+
+### zCard
+
+ Promise<number>`} />
+
+
+### zRangeByScore
+
+ Promise<string[]>`} />
+
+
+### consumeFixedWindow
+
+ Promise<FixedWindowConsumeResult>`} />
+
+
+### consumeSlidingWindowLog
+
+ Promise<SlidingWindowConsumeResult>`} />
+
+
+### deleteByPrefix
+
+ Promise<void>`} />
+
+
+### deleteByPattern
+
+ Promise<void>`} />
+
+
+### keysByPrefix
+
+ Promise<string[]>`} />
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/rate-limit-store-value.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/rate-limit-store-value.mdx
new file mode 100644
index 00000000..35b31b00
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/rate-limit-store-value.mdx
@@ -0,0 +1,59 @@
+---
+title: "RateLimitStoreValue"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitStoreValue
+
+
+
+Aggregate results stored on the environment store for downstream handlers.
+
+```ts title="Signature"
+interface RateLimitStoreValue {
+ limited: boolean;
+ remaining: number;
+ resetAt: number;
+ retryAfter: number;
+ results: RateLimitResult[];
+}
+```
+
+
+
+### limited
+
+
+
+
+### remaining
+
+
+
+
+### resetAt
+
+
+
+
+### retryAfter
+
+
+
+
+### results
+
+RateLimitResult[]`} />
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/rate-limit-window-config.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/rate-limit-window-config.mdx
new file mode 100644
index 00000000..13f2228e
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/rate-limit-window-config.mdx
@@ -0,0 +1,77 @@
+---
+title: "RateLimitWindowConfig"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitWindowConfig
+
+
+
+Per-window overrides when a limiter defines multiple windows.
+
+```ts title="Signature"
+interface RateLimitWindowConfig {
+ id?: string;
+ maxRequests?: number;
+ interval?: DurationLike;
+ algorithm?: RateLimitAlgorithmType;
+ burst?: number;
+ refillRate?: number;
+ leakRate?: number;
+ violations?: ViolationOptions;
+}
+```
+
+
+
+### id
+
+
+
+
+### maxRequests
+
+
+
+
+### interval
+
+DurationLike`} />
+
+
+### algorithm
+
+RateLimitAlgorithmType`} />
+
+
+### burst
+
+
+
+
+### refillRate
+
+
+
+
+### leakRate
+
+
+
+
+### violations
+
+ViolationOptions`} />
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/reset-all-rate-limits-params.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/reset-all-rate-limits-params.mdx
new file mode 100644
index 00000000..4f8043a5
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/reset-all-rate-limits-params.mdx
@@ -0,0 +1,77 @@
+---
+title: "ResetAllRateLimitsParams"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## ResetAllRateLimitsParams
+
+
+
+Parameters for batch resets by scope, prefix, or pattern.
+
+```ts title="Signature"
+interface ResetAllRateLimitsParams {
+ scope?: RateLimitScope;
+ userId?: string;
+ guildId?: string;
+ channelId?: string;
+ commandName?: string;
+ keyPrefix?: string;
+ pattern?: string;
+ prefix?: string;
+}
+```
+
+
+
+### scope
+
+RateLimitScope`} />
+
+
+### userId
+
+
+
+
+### guildId
+
+
+
+
+### channelId
+
+
+
+
+### commandName
+
+
+
+
+### keyPrefix
+
+
+
+
+### pattern
+
+
+
+
+### prefix
+
+
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/reset-rate-limit-params.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/reset-rate-limit-params.mdx
new file mode 100644
index 00000000..de8af909
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/reset-rate-limit-params.mdx
@@ -0,0 +1,71 @@
+---
+title: "ResetRateLimitParams"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## ResetRateLimitParams
+
+
+
+Parameters for resetting a single key or scope-derived key.
+
+```ts title="Signature"
+interface ResetRateLimitParams {
+ key?: string;
+ scope?: RateLimitScope;
+ userId?: string;
+ guildId?: string;
+ channelId?: string;
+ commandName?: string;
+ keyPrefix?: string;
+}
+```
+
+
+
+### key
+
+
+
+
+### scope
+
+RateLimitScope`} />
+
+
+### userId
+
+
+
+
+### guildId
+
+
+
+
+### channelId
+
+
+
+
+### commandName
+
+
+
+
+### keyPrefix
+
+
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/resolve-scope-key-params.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/resolve-scope-key-params.mdx
new file mode 100644
index 00000000..874f7b89
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/resolve-scope-key-params.mdx
@@ -0,0 +1,65 @@
+---
+title: "ResolveScopeKeyParams"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## ResolveScopeKeyParams
+
+
+
+Inputs for resolving a scope-based key from a command/source.
+
+```ts title="Signature"
+interface ResolveScopeKeyParams {
+ ctx: Context;
+ source: Interaction | Message;
+ command: LoadedCommand;
+ scope: RateLimitScope;
+ keyPrefix?: string;
+ keyResolver?: RateLimitKeyResolver;
+}
+```
+
+
+
+### ctx
+
+Context`} />
+
+
+### source
+
+
+
+
+### command
+
+LoadedCommand`} />
+
+
+### scope
+
+RateLimitScope`} />
+
+
+### keyPrefix
+
+
+
+
+### keyResolver
+
+RateLimitKeyResolver`} />
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/resolved-limiter-config.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/resolved-limiter-config.mdx
new file mode 100644
index 00000000..e9886c49
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/resolved-limiter-config.mdx
@@ -0,0 +1,83 @@
+---
+title: "ResolvedLimiterConfig"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## ResolvedLimiterConfig
+
+
+
+Limiter configuration after defaults are applied.
+
+```ts title="Signature"
+interface ResolvedLimiterConfig {
+ maxRequests: number;
+ intervalMs: number;
+ algorithm: RateLimitAlgorithmType;
+ scope: RateLimitScope;
+ burst: number;
+ refillRate: number;
+ leakRate: number;
+ violations?: ViolationOptions;
+ windowId?: string;
+}
+```
+
+
+
+### maxRequests
+
+
+
+
+### intervalMs
+
+
+
+
+### algorithm
+
+RateLimitAlgorithmType`} />
+
+
+### scope
+
+RateLimitScope`} />
+
+
+### burst
+
+
+
+
+### refillRate
+
+
+
+
+### leakRate
+
+
+
+
+### violations
+
+ViolationOptions`} />
+
+
+### windowId
+
+
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/resolved-scope-key.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/resolved-scope-key.mdx
new file mode 100644
index 00000000..3a8ef55e
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/resolved-scope-key.mdx
@@ -0,0 +1,41 @@
+---
+title: "ResolvedScopeKey"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## ResolvedScopeKey
+
+
+
+Resolved key paired with its scope for aggregation.
+
+```ts title="Signature"
+interface ResolvedScopeKey {
+ scope: RateLimitScope;
+ key: string;
+}
+```
+
+
+
+### scope
+
+RateLimitScope`} />
+
+
+### key
+
+
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/sliding-window-consume-result.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/sliding-window-consume-result.mdx
new file mode 100644
index 00000000..3b43136d
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/sliding-window-consume-result.mdx
@@ -0,0 +1,47 @@
+---
+title: "SlidingWindowConsumeResult"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## SlidingWindowConsumeResult
+
+
+
+Storage result for sliding-window log consumes.
+
+```ts title="Signature"
+interface SlidingWindowConsumeResult {
+ allowed: boolean;
+ count: number;
+ resetAt: number;
+}
+```
+
+
+
+### allowed
+
+
+
+
+### count
+
+
+
+
+### resetAt
+
+
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/token-bucket-config.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/token-bucket-config.mdx
new file mode 100644
index 00000000..ec3dca28
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/token-bucket-config.mdx
@@ -0,0 +1,47 @@
+---
+title: "TokenBucketConfig"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## TokenBucketConfig
+
+
+
+
+
+```ts title="Signature"
+interface TokenBucketConfig {
+ capacity: number;
+ refillRate: number;
+ scope: RateLimitResult['scope'];
+}
+```
+
+
+
+### capacity
+
+
+
+Maximum tokens available when the bucket is full.
+### refillRate
+
+
+
+Tokens added per second during refill.
+### scope
+
+RateLimitResult['scope']`} />
+
+Scope reported in rate-limit results.
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/interfaces/violation-options.mdx b/apps/website/docs/api-reference/ratelimit/interfaces/violation-options.mdx
new file mode 100644
index 00000000..b5db461e
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/interfaces/violation-options.mdx
@@ -0,0 +1,53 @@
+---
+title: "ViolationOptions"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## ViolationOptions
+
+
+
+Escalation settings for repeated violations.
+
+```ts title="Signature"
+interface ViolationOptions {
+ escalate?: boolean;
+ maxViolations?: number;
+ escalationMultiplier?: number;
+ resetAfter?: DurationLike;
+}
+```
+
+
+
+### escalate
+
+
+
+
+### maxViolations
+
+
+
+
+### escalationMultiplier
+
+
+
+
+### resetAfter
+
+DurationLike`} />
+
+
+
+
+
diff --git a/apps/website/docs/api-reference/ratelimit/types/duration-like.mdx b/apps/website/docs/api-reference/ratelimit/types/duration-like.mdx
new file mode 100644
index 00000000..42047870
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/duration-like.mdx
@@ -0,0 +1,22 @@
+---
+title: "DurationLike"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## DurationLike
+
+
+
+Duration input accepted by configs: milliseconds or a duration string.
+
+```ts title="Signature"
+type DurationLike = number | string
+```
diff --git a/apps/website/docs/api-reference/ratelimit/types/index.mdx b/apps/website/docs/api-reference/ratelimit/types/index.mdx
new file mode 100644
index 00000000..224a2db8
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/index.mdx
@@ -0,0 +1,16 @@
+---
+title: "Type Aliases"
+isDefaultIndex: true
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+import DocCardList from '@theme/DocCardList';
+
+
\ No newline at end of file
diff --git a/apps/website/docs/api-reference/ratelimit/types/rate-limit-algorithm-type.mdx b/apps/website/docs/api-reference/ratelimit/types/rate-limit-algorithm-type.mdx
new file mode 100644
index 00000000..27a29129
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/rate-limit-algorithm-type.mdx
@@ -0,0 +1,22 @@
+---
+title: "RateLimitAlgorithmType"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitAlgorithmType
+
+
+
+Literal union of algorithm identifiers.
+
+```ts title="Signature"
+type RateLimitAlgorithmType = (typeof RATE_LIMIT_ALGORITHMS)[number]
+```
diff --git a/apps/website/docs/api-reference/ratelimit/types/rate-limit-exemption-scope.mdx b/apps/website/docs/api-reference/ratelimit/types/rate-limit-exemption-scope.mdx
new file mode 100644
index 00000000..806dbddf
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/rate-limit-exemption-scope.mdx
@@ -0,0 +1,22 @@
+---
+title: "RateLimitExemptionScope"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitExemptionScope
+
+
+
+Literal union of exemption scopes.
+
+```ts title="Signature"
+type RateLimitExemptionScope = (typeof RATE_LIMIT_EXEMPTION_SCOPES)[number]
+```
diff --git a/apps/website/docs/api-reference/ratelimit/types/rate-limit-key-resolver.mdx b/apps/website/docs/api-reference/ratelimit/types/rate-limit-key-resolver.mdx
new file mode 100644
index 00000000..2308fd4c
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/rate-limit-key-resolver.mdx
@@ -0,0 +1,26 @@
+---
+title: "RateLimitKeyResolver"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitKeyResolver
+
+
+
+Custom key builder for the `custom` scope.
+
+```ts title="Signature"
+type RateLimitKeyResolver = (
+ ctx: Context,
+ command: LoadedCommand,
+ source: Interaction | Message,
+) => string
+```
diff --git a/apps/website/docs/api-reference/ratelimit/types/rate-limit-response-handler.mdx b/apps/website/docs/api-reference/ratelimit/types/rate-limit-response-handler.mdx
new file mode 100644
index 00000000..d8418d11
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/rate-limit-response-handler.mdx
@@ -0,0 +1,25 @@
+---
+title: "RateLimitResponseHandler"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitResponseHandler
+
+
+
+Override for responding when a command is rate-limited.
+
+```ts title="Signature"
+type RateLimitResponseHandler = (
+ ctx: Context,
+ info: RateLimitStoreValue,
+) => Promise | void
+```
diff --git a/apps/website/docs/api-reference/ratelimit/types/rate-limit-role-limit-strategy.mdx b/apps/website/docs/api-reference/ratelimit/types/rate-limit-role-limit-strategy.mdx
new file mode 100644
index 00000000..5bd03bdc
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/rate-limit-role-limit-strategy.mdx
@@ -0,0 +1,22 @@
+---
+title: "RateLimitRoleLimitStrategy"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitRoleLimitStrategy
+
+
+
+Strategy for choosing among matching role-based overrides.
+
+```ts title="Signature"
+type RateLimitRoleLimitStrategy = 'highest' | 'lowest' | 'first'
+```
diff --git a/apps/website/docs/api-reference/ratelimit/types/rate-limit-scope.mdx b/apps/website/docs/api-reference/ratelimit/types/rate-limit-scope.mdx
new file mode 100644
index 00000000..e64bb379
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/rate-limit-scope.mdx
@@ -0,0 +1,22 @@
+---
+title: "RateLimitScope"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitScope
+
+
+
+Literal union of supported key scopes.
+
+```ts title="Signature"
+type RateLimitScope = (typeof RATE_LIMIT_SCOPES)[number]
+```
diff --git a/apps/website/docs/api-reference/ratelimit/types/rate-limit-storage-config.mdx b/apps/website/docs/api-reference/ratelimit/types/rate-limit-storage-config.mdx
new file mode 100644
index 00000000..46f3ffe0
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/types/rate-limit-storage-config.mdx
@@ -0,0 +1,25 @@
+---
+title: "RateLimitStorageConfig"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RateLimitStorageConfig
+
+
+
+Storage configuration: direct instance or `{ driver }` wrapper for parity.
+
+```ts title="Signature"
+type RateLimitStorageConfig = | RateLimitStorage
+ | {
+ driver: RateLimitStorage;
+ }
+```
diff --git a/apps/website/docs/api-reference/ratelimit/variables/-ckitirl.mdx b/apps/website/docs/api-reference/ratelimit/variables/-ckitirl.mdx
new file mode 100644
index 00000000..e914ae97
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/variables/-ckitirl.mdx
@@ -0,0 +1,19 @@
+---
+title: "$ckitirl"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## $ckitirl
+
+
+
+Wrapper symbol injected by the compiler plugin.
+
diff --git a/apps/website/docs/api-reference/ratelimit/variables/default_key_prefix.mdx b/apps/website/docs/api-reference/ratelimit/variables/default_key_prefix.mdx
new file mode 100644
index 00000000..0ea96f6a
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/variables/default_key_prefix.mdx
@@ -0,0 +1,19 @@
+---
+title: "DEFAULT_KEY_PREFIX"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## DEFAULT_KEY_PREFIX
+
+
+
+Default prefix for storage keys; can be overridden per config.
+
diff --git a/apps/website/docs/api-reference/ratelimit/variables/default_limiter.mdx b/apps/website/docs/api-reference/ratelimit/variables/default_limiter.mdx
new file mode 100644
index 00000000..f4bc1194
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/variables/default_limiter.mdx
@@ -0,0 +1,19 @@
+---
+title: "DEFAULT_LIMITER"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## DEFAULT_LIMITER
+
+
+
+Default limiter used when no explicit configuration is provided.
+
diff --git a/apps/website/docs/api-reference/ratelimit/variables/index.mdx b/apps/website/docs/api-reference/ratelimit/variables/index.mdx
new file mode 100644
index 00000000..44c32427
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/variables/index.mdx
@@ -0,0 +1,16 @@
+---
+title: "Variables"
+isDefaultIndex: true
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+import DocCardList from '@theme/DocCardList';
+
+
\ No newline at end of file
diff --git a/apps/website/docs/api-reference/ratelimit/variables/rate_limit_algorithms.mdx b/apps/website/docs/api-reference/ratelimit/variables/rate_limit_algorithms.mdx
new file mode 100644
index 00000000..f38c27c3
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/variables/rate_limit_algorithms.mdx
@@ -0,0 +1,19 @@
+---
+title: "RATE_LIMIT_ALGORITHMS"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RATE_LIMIT_ALGORITHMS
+
+
+
+Algorithm identifiers used to select the limiter implementation.
+
diff --git a/apps/website/docs/api-reference/ratelimit/variables/rate_limit_exemption_scopes.mdx b/apps/website/docs/api-reference/ratelimit/variables/rate_limit_exemption_scopes.mdx
new file mode 100644
index 00000000..0a85b4bc
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/variables/rate_limit_exemption_scopes.mdx
@@ -0,0 +1,19 @@
+---
+title: "RATE_LIMIT_EXEMPTION_SCOPES"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RATE_LIMIT_EXEMPTION_SCOPES
+
+
+
+Scopes eligible for temporary exemptions stored in rate limit storage.
+
diff --git a/apps/website/docs/api-reference/ratelimit/variables/rate_limit_scopes.mdx b/apps/website/docs/api-reference/ratelimit/variables/rate_limit_scopes.mdx
new file mode 100644
index 00000000..7c1f28d7
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/variables/rate_limit_scopes.mdx
@@ -0,0 +1,19 @@
+---
+title: "RATE_LIMIT_SCOPES"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RATE_LIMIT_SCOPES
+
+
+
+Scopes used to build rate limit keys and apply per-scope limits.
+
diff --git a/apps/website/docs/api-reference/ratelimit/variables/ratelimit_store_key.mdx b/apps/website/docs/api-reference/ratelimit/variables/ratelimit_store_key.mdx
new file mode 100644
index 00000000..c9d713e9
--- /dev/null
+++ b/apps/website/docs/api-reference/ratelimit/variables/ratelimit_store_key.mdx
@@ -0,0 +1,19 @@
+---
+title: "RATELIMIT_STORE_KEY"
+isDefaultIndex: false
+generated: true
+---
+
+import MemberInfo from '@site/src/components/MemberInfo';
+import GenerationInfo from '@site/src/components/GenerationInfo';
+import MemberDescription from '@site/src/components/MemberDescription';
+
+
+
+
+## RATELIMIT_STORE_KEY
+
+
+
+Store key used to stash aggregated results in CommandKit envs.
+
diff --git a/apps/website/docs/guide/05-official-plugins/07-commandkit-ratelimit.mdx b/apps/website/docs/guide/05-official-plugins/07-commandkit-ratelimit.mdx
new file mode 100644
index 00000000..53d488c2
--- /dev/null
+++ b/apps/website/docs/guide/05-official-plugins/07-commandkit-ratelimit.mdx
@@ -0,0 +1,798 @@
+---
+title: '@commandkit/ratelimit'
+description: Official CommandKit rate limiting plugin with detailed runtime, storage, and behavior reference.
+---
+
+`@commandkit/ratelimit` is the official CommandKit plugin for advanced rate limiting. It provides multi-window policies, role overrides, queueing, exemptions, and multiple algorithms while keeping command handlers lean.
+
+The `ratelimit()` factory returns two plugins in order: the compiler plugin for the "use ratelimit" directive and the runtime plugin that enforces limits. Runtime options must be configured before the runtime plugin activates.
+
+## Installation
+
+Install the ratelimit plugin to get started:
+
+```bash npm2yarn
+npm install @commandkit/ratelimit
+```
+
+## Setup
+
+Add the ratelimit plugin to your CommandKit configuration and define a runtime config file.
+
+### Quick start
+
+Create an auto-loaded runtime config file (for example `src/ratelimit.ts`) and configure the default limiter:
+
+```ts title="src/ratelimit.ts"
+import { configureRatelimit } from '@commandkit/ratelimit';
+
+configureRatelimit({
+ defaultLimiter: {
+ maxRequests: 5,
+ interval: '1m',
+ scope: 'user',
+ algorithm: 'fixed-window',
+ },
+});
+```
+
+Register the plugin in your config:
+
+```ts title="commandkit.config.ts"
+import { defineConfig } from 'commandkit';
+import { ratelimit } from '@commandkit/ratelimit';
+
+export default defineConfig({
+ plugins: [ratelimit()],
+});
+```
+
+The runtime plugin auto-loads `ratelimit.ts` or `ratelimit.js` on startup before commands execute.
+
+## Runtime configuration lifecycle
+
+### Runtime lifecycle diagram
+
+```mermaid
+graph TD
+ A[App startup] --> B[Auto-load ratelimit.ts/js]
+ B --> C["configureRatelimit()"]
+ C --> D["Runtime plugin activate()"]
+ D --> E[Resolve storage]
+ E --> F[Resolve limiter config]
+ F --> G[Consume algorithm]
+ G --> H[Aggregate result]
+ H --> I[Default response / hooks / events]
+```
+
+### `configureRatelimit` is required
+
+`RateLimitPlugin.activate()` throws if `configureRatelimit()` was not called. This is enforced to avoid silently running without your intended defaults.
+
+:::warning Runtime configuration required
+
+Make sure `configureRatelimit()` runs at startup (for example in `ratelimit.ts` or `ratelimit.js`) before the runtime plugin activates. If it does not, the plugin will throw on startup.
+
+:::
+
+### How configuration is stored
+
+`configureRatelimit()` merges your config into an in-memory object and sets the configured flag. `getRateLimitConfig()` returns the current object, and `isRateLimitConfigured()` returns whether initialization has happened. If a runtime context is already active, `configureRatelimit()` updates it immediately.
+
+### Runtime storage selection
+
+Storage is resolved in this order:
+
+| Order | Source | Notes |
+| --- | --- | --- |
+| 1 | Limiter `storage` override | `RateLimitLimiterConfig.storage` for the command being executed. |
+| 2 | Plugin `storage` option | `RateLimitPluginOptions.storage`. |
+| 3 | Process default | Set via `setRateLimitStorage()` or `setDriver()`. |
+| 4 | Default memory storage | Used unless `initializeDefaultStorage` or `initializeDefaultDriver` is `false`. |
+
+If no storage is resolved and defaults are disabled, the plugin logs once and stores an empty result without limiting.
+
+### Runtime helpers
+
+These helpers are process-wide:
+
+| Helper | Purpose |
+| --- | --- |
+| `configureRatelimit` | Set runtime options and update active runtime state. |
+| `getRateLimitConfig` | Read the merged in-memory runtime config. |
+| `isRateLimitConfigured` | Check whether `configureRatelimit()` was called. |
+| `setRateLimitStorage` | Set the default storage for the process. |
+| `getRateLimitStorage` | Get the process default storage (or `null`). |
+| `setDriver` / `getDriver` | Aliases for `setRateLimitStorage` / `getRateLimitStorage`. |
+| `setRateLimitRuntime` | Set the active runtime context for APIs and directives. |
+| `getRateLimitRuntime` | Get the active runtime context (or `null`). |
+
+## Basic usage
+
+Use command metadata or the `use ratelimit` directive to enable rate limiting.
+This section focuses on command metadata; see the directive section for
+function-level usage.
+
+### Command metadata and enablement
+
+Enable rate limiting by setting `metadata.ratelimit`:
+
+```ts title="src/app/commands/ping.ts"
+export const metadata = {
+ ratelimit: {
+ maxRequests: 3,
+ interval: '10s',
+ scope: 'user',
+ algorithm: 'sliding-window',
+ },
+};
+```
+
+`metadata.ratelimit` can be one of:
+
+| Value | Meaning |
+| --- | --- |
+| `false` or `undefined` | Plugin does nothing for this command. |
+| `true` | Enable rate limiting using resolved defaults. |
+| `RateLimitCommandConfig` | Enable rate limiting with command-level overrides. |
+
+If `env.context` is missing in the execution environment, the plugin skips rate limiting.
+
+### Named limiter example
+
+```ts title="commandkit.config.ts"
+configureRatelimit({
+ limiters: {
+ heavy: { maxRequests: 1, interval: '10s', algorithm: 'fixed-window' },
+ },
+});
+```
+
+```ts title="src/app/commands/report.ts"
+export const metadata = {
+ ratelimit: {
+ limiter: 'heavy',
+ scope: 'user',
+ },
+};
+```
+
+## Configuration reference
+
+### RateLimitPluginOptions
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `defaultLimiter` | `RateLimitLimiterConfig` | `DEFAULT_LIMITER` when unset | Base limiter for all commands and directives. |
+| `limiters` | `Record` | `undefined` | Named limiter presets. |
+| `storage` | `RateLimitStorageConfig` | `undefined` | Resolved before default storage. |
+| `keyPrefix` | `string` | `undefined` | Prepended before `rl:`. |
+| `keyResolver` | `RateLimitKeyResolver` | `undefined` | Used for `custom` scope when the limiter does not override it. |
+| `bypass` | `RateLimitBypassOptions` | `undefined` | Permanent allowlists and optional check. |
+| `hooks` | `RateLimitHooks` | `undefined` | Lifecycle callbacks. |
+| `onRateLimited` | `RateLimitResponseHandler` | `undefined` | Overrides default reply. |
+| `queue` | `RateLimitQueueOptions` | `undefined` | If any queue config exists, `enabled` defaults to `true`. |
+| `roleLimits` | `Record` | `undefined` | Base role limits. |
+| `roleLimitStrategy` | `RateLimitRoleLimitStrategy` | `highest` when resolving | Used when multiple roles match. |
+| `initializeDefaultStorage` | `boolean` | `true` | Disable to prevent memory fallback. |
+| `initializeDefaultDriver` | `boolean` | `true` | Alias for `initializeDefaultStorage`. |
+
+### RateLimitLimiterConfig
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `maxRequests` | `number` | `10` when missing or `<= 0` | Used by fixed and sliding windows. |
+| `interval` | `DurationLike` | `60s` when missing or invalid | Parsed and clamped to `>= 1ms`. |
+| `scope` | `RateLimitScope` or `RateLimitScope[]` | `user` | Arrays are deduplicated. |
+| `algorithm` | `RateLimitAlgorithmType` | `fixed-window` | Unknown values fall back to fixed-window. |
+| `burst` | `number` | `maxRequests` when missing or `<= 0` | Capacity for token or leaky buckets. |
+| `refillRate` | `number` | `maxRequests / intervalSeconds` | Must be `> 0` for token bucket. |
+| `leakRate` | `number` | `maxRequests / intervalSeconds` | Must be `> 0` for leaky bucket. |
+| `keyResolver` | `RateLimitKeyResolver` | `undefined` | Used only for `custom` scope. |
+| `keyPrefix` | `string` | `undefined` | Overrides plugin prefix for this limiter. |
+| `storage` | `RateLimitStorageConfig` | `undefined` | Overrides storage for this limiter. |
+| `violations` | `ViolationOptions` | `undefined` | Enables escalation unless `escalate` is `false`. |
+| `queue` | `RateLimitQueueOptions` | `undefined` | Overrides queue settings at this layer. |
+| `windows` | `RateLimitWindowConfig[]` | `undefined` | Enables multi-window behavior. |
+| `roleLimits` | `Record` | `undefined` | Role overrides at this layer. |
+| `roleLimitStrategy` | `RateLimitRoleLimitStrategy` | `highest` when resolving | Used when role limits match. |
+
+### RateLimitWindowConfig
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `id` | `string` | `w1`, `w2`, ... | Auto-generated if empty or missing. |
+| `maxRequests` | `number` | Inherits from base limiter | Applies only to this window. |
+| `interval` | `DurationLike` | Inherits from base limiter | Parsed like the base limiter. |
+| `algorithm` | `RateLimitAlgorithmType` | Inherits from base limiter | Usually keep consistent across windows. |
+| `burst` | `number` | Inherits from base limiter | Used for token or leaky buckets. |
+| `refillRate` | `number` | Inherits from base limiter | Must be `> 0` for token bucket. |
+| `leakRate` | `number` | Inherits from base limiter | Must be `> 0` for leaky bucket. |
+| `violations` | `ViolationOptions` | Inherits from base limiter | Overrides escalation for this window. |
+
+### RateLimitQueueOptions
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `enabled` | `boolean` | `true` when any queue config exists | Otherwise `false`. |
+| `maxSize` | `number` | `3` and clamped to `>= 1` | Queue size is pending plus running. |
+| `timeout` | `DurationLike` | `30s` and clamped to `>= 1ms` | Applies per queued task. |
+| `deferInteraction` | `boolean` | `true` unless explicitly `false` | Only used for interactions. |
+| `ephemeral` | `boolean` | `true` unless explicitly `false` | Applies to deferred replies. |
+| `concurrency` | `number` | `1` and clamped to `>= 1` | Controls per-key queue concurrency. |
+
+### ViolationOptions
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `escalate` | `boolean` | `true` when `violations` is set | Set `false` to disable escalation. |
+| `maxViolations` | `number` | `5` | Maximum escalation steps. |
+| `escalationMultiplier` | `number` | `2` | Multiplies cooldown per violation. |
+| `resetAfter` | `DurationLike` | `1h` | TTL for violation state. |
+
+### RateLimitCommandConfig
+
+`RateLimitCommandConfig` extends `RateLimitLimiterConfig` and adds:
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `limiter` | `string` | `undefined` | References a named limiter in `limiters`. |
+
+### Result shapes
+
+RateLimitStoreValue:
+
+| Field | Type | Meaning |
+| --- | --- | --- |
+| `limited` | `boolean` | `true` if any scope or window was limited. |
+| `remaining` | `number` | Minimum remaining across all results. |
+| `resetAt` | `number` | Latest reset timestamp across all results. |
+| `retryAfter` | `number` | Max retry delay across limited results. |
+| `results` | `RateLimitResult[]` | Individual results per scope and window. |
+
+RateLimitResult:
+
+| Field | Type | Meaning |
+| --- | --- | --- |
+| `key` | `string` | Storage key used for the limiter. |
+| `scope` | `RateLimitScope` | Scope applied for the limiter. |
+| `algorithm` | `RateLimitAlgorithmType` | Algorithm used for the limiter. |
+| `windowId` | `string` | Present for multi-window limits. |
+| `limited` | `boolean` | Whether this limiter hit its limit. |
+| `remaining` | `number` | Remaining requests or capacity. |
+| `resetAt` | `number` | Absolute reset timestamp in ms. |
+| `retryAfter` | `number` | Delay until retry is allowed, in ms. |
+| `limit` | `number` | `maxRequests` for fixed and sliding, `burst` for token and leaky buckets. |
+
+## Limiter resolution and role strategy
+
+Limiter configuration is layered in this exact order, with later layers overriding earlier ones:
+
+| Order | Source | Notes |
+| --- | --- | --- |
+| 1 | `DEFAULT_LIMITER` | Base defaults. |
+| 2 | `defaultLimiter` | Runtime defaults. |
+| 3 | Named limiter | When `metadata.ratelimit.limiter` is set. |
+| 4 | Command overrides | `metadata.ratelimit` config. |
+| 5 | Role override | Selected by role strategy. |
+
+### Limiter resolution diagram
+
+```mermaid
+graph TD
+ A[DEFAULT_LIMITER] --> B[defaultLimiter]
+ B --> C[Named limiter]
+ C --> D[Command overrides]
+ D --> E["Role override (strategy)"]
+```
+
+Role limits are merged in this order, with later maps overriding earlier ones for the same role id:
+
+| Order | Source |
+| --- | --- |
+| 1 | Plugin `roleLimits` |
+| 2 | `defaultLimiter.roleLimits` |
+| 3 | Named limiter `roleLimits` |
+| 4 | Command `roleLimits` |
+
+Role strategies:
+
+| Strategy | Selection rule |
+| --- | --- |
+| `highest` | Picks the role with the highest request rate (`maxRequests / intervalMs`). |
+| `lowest` | Picks the role with the lowest request rate. |
+| `first` | Uses insertion order of the merged role limits object. |
+
+For multi-window limiters, the score uses the minimum rate across windows.
+
+## Scopes and keying
+
+Supported scopes:
+
+| Scope | Required IDs | Key format (without `keyPrefix`) | Skip behavior |
+| --- | --- | --- | --- |
+| `user` | `userId` | `rl:user:{userId}:{commandName}` | Skips if `userId` is missing. |
+| `guild` | `guildId` | `rl:guild:{guildId}:{commandName}` | Skips if `guildId` is missing. |
+| `channel` | `channelId` | `rl:channel:{channelId}:{commandName}` | Skips if `channelId` is missing. |
+| `global` | none | `rl:global:{commandName}` | Never skipped. |
+| `user-guild` | `userId`, `guildId` | `rl:user:{userId}:guild:{guildId}:{commandName}` | Skips if either id is missing. |
+| `custom` | `keyResolver` | `keyResolver(ctx, command, source)` | Skips if resolver is missing or returns falsy. |
+
+Keying notes:
+
+- `DEFAULT_KEY_PREFIX` is always included in the base format.
+- `keyPrefix` is concatenated before `rl:` as-is, so include a trailing separator if you want one.
+- Multi-window limits append `:w:{windowId}`.
+
+### Exemption keys
+
+Temporary exemptions are stored under `rl:exempt:{scope}:{id}` (plus optional `keyPrefix`).
+
+| Exemption scope | Key format | Notes |
+| --- | --- | --- |
+| `user` | `rl:exempt:user:{userId}` | Resolved from the source user id. |
+| `guild` | `rl:exempt:guild:{guildId}` | Resolved from the guild id. |
+| `role` | `rl:exempt:role:{roleId}` | Resolved from all member roles. |
+| `channel` | `rl:exempt:channel:{channelId}` | Resolved from the channel id. |
+| `category` | `rl:exempt:category:{categoryId}` | Resolved from the parent category id. |
+
+## Algorithms
+
+### Algorithm matrix
+
+| Algorithm | Required config | Storage requirements | `limit` value | Notes |
+| --- | --- | --- | --- | --- |
+| `fixed-window` | `maxRequests`, `interval` | `consumeFixedWindow` or `incr` or `get` and `set` | `maxRequests` | Fallback uses per-process lock and optimistic versioning. |
+| `sliding-window` | `maxRequests`, `interval` | `consumeSlidingWindowLog` or `zRemRangeByScore` + `zCard` + `zAdd` | `maxRequests` | Throws if sorted-set support is missing. |
+| `token-bucket` | `burst`, `refillRate` | `get` and `set` | `burst` | Throws if `refillRate <= 0`. |
+| `leaky-bucket` | `burst`, `leakRate` | `get` and `set` | `burst` | Throws if `leakRate <= 0`. |
+
+### Fixed window
+
+Execution path:
+
+1. If `consumeFixedWindow` exists, it is used.
+2. Else if `incr` exists, it is used.
+3. Else a fallback uses `get` and `set` with a per-process lock.
+
+The limiter is considered limited when `count > maxRequests`. The fallback path retries up to five times with optimistic versioning and is serialized only within the current process.
+
+#### Fixed window fallback diagram
+
+```mermaid
+graph TD
+ A[Consume fixed-window] --> B{consumeFixedWindow?}
+ B -- Yes --> C[Use consumeFixedWindow]
+ B -- No --> D{incr?}
+ D -- Yes --> E[Use incr]
+ D -- No --> F["get + set fallback (per-process lock)"]
+```
+
+### Sliding window log
+
+Execution path:
+
+1. If `consumeSlidingWindowLog` exists, it is used (atomic).
+2. Else a sorted-set fallback uses `zRemRangeByScore`, `zCard`, and `zAdd`.
+
+If sorted-set methods are missing, the algorithm throws. If `zRangeByScore` is available, it is used to compute an accurate oldest timestamp for `resetAt`; otherwise `resetAt` defaults to `now + window`. The fallback is serialized per process but is not atomic across processes.
+
+#### Sliding window fallback diagram
+
+```mermaid
+graph TD
+ A[Consume sliding-window] --> B{consumeSlidingWindowLog?}
+ B -- Yes --> C[Use consumeSlidingWindowLog]
+ B -- No --> D{zset methods?}
+ D -- No --> E[Throw error]
+ D -- Yes --> F[zRemRangeByScore + zCard + zAdd fallback]
+```
+
+### Token bucket
+
+Token bucket uses a stored `tokens` and `lastRefill` state. On each consume, tokens refill based on elapsed time and `refillRate`. If the bucket has fewer than one token, the request is limited and `retryAfter` is computed from the time required to refill one token.
+
+### Leaky bucket
+
+Leaky bucket uses a stored `level` and `lastLeak` state. Each request adds one token, and the bucket drains at `leakRate`. If adding would exceed `capacity`, the request is limited and `retryAfter` is computed from the time required to drain the overflow.
+
+### Multi-window limits
+
+Use `windows` to enforce multiple windows simultaneously:
+
+```ts
+configureRatelimit({
+ defaultLimiter: {
+ scope: 'user',
+ algorithm: 'sliding-window',
+ windows: [
+ { id: 'short', maxRequests: 10, interval: '1m' },
+ { id: 'long', maxRequests: 1000, interval: '1d' },
+ ],
+ },
+});
+```
+
+If a window `id` is omitted, the plugin generates `w1`, `w2`, and so on. Window ids are part of the storage key and appear in results.
+
+## Storage
+
+### Storage interface
+
+Required methods:
+
+| Method | Used by | Notes |
+| --- | --- | --- |
+| `get` | All algorithms | Returns stored value or `null`. |
+| `set` | All algorithms | Optional `ttlMs` controls expiry. |
+| `delete` | Resets and algorithm resets | Removes stored state. |
+
+Optional methods and features:
+
+| Method | Feature | Notes |
+| --- | --- | --- |
+| `consumeFixedWindow` | Fixed-window atomic consume | Used before `incr` and fallback. |
+| `incr` | Fixed-window efficiency | Returns count and TTL. |
+| `consumeSlidingWindowLog` | Sliding-window atomic consume | Preferred over sorted-set fallback. |
+| `zAdd` / `zRemRangeByScore` / `zCard` | Sliding-window fallback | Required when `consumeSlidingWindowLog` is absent. |
+| `zRangeByScore` | Sliding-window reset accuracy | Improves `resetAt` computation. |
+| `ttl` | Exemption listing | Used for `expiresInMs`. |
+| `expire` | Sliding-window fallback | Keeps sorted-set keys from growing indefinitely. |
+| `deleteByPrefix` / `deleteByPattern` | Resets | Required by `resetAllRateLimits` and HMR. |
+| `keysByPrefix` | Exemption listing | Required for listing without a specific id. |
+
+### Capability matrix
+
+| Feature | Requires | Memory | Redis | Fallback |
+| --- | --- | --- | --- | --- |
+| Fixed-window atomic consume | `consumeFixedWindow` | Yes | Yes | Conditional (both storages) |
+| Fixed-window `incr` | `incr` | Yes | Yes | Conditional (both storages) |
+| Sliding-window atomic consume | `consumeSlidingWindowLog` | Yes | Yes | Conditional (both storages) |
+| Sliding-window fallback | `zAdd` + `zRemRangeByScore` + `zCard` | Yes | Yes | Conditional (both storages) |
+| TTL visibility | `ttl` | Yes | Yes | Conditional (both storages) |
+| Prefix or pattern deletes | `deleteByPrefix` or `deleteByPattern` | Yes | Yes | Conditional (both storages) |
+| Exemption listing | `keysByPrefix` | Yes | Yes | Conditional (both storages) |
+
+### Capability overview diagram
+
+```mermaid
+graph TD
+ A[Storage API] --> B[Required: get / set / delete]
+ A --> C[Optional methods]
+ C --> D[Fixed window atomic: consumeFixedWindow / incr]
+ C --> E[Sliding window atomic: consumeSlidingWindowLog]
+ C --> F[Sliding window fallback: zAdd + zRemRangeByScore + zCard]
+ C --> G[Listing & TTL: keysByPrefix / ttl]
+ C --> H[Bulk reset: deleteByPrefix / deleteByPattern]
+ I[Fallback storage] --> J[Uses primary + secondary]
+ J --> K[Each optional method must exist on both]
+```
+
+### Memory storage
+
+```ts
+import { MemoryRateLimitStorage, setRateLimitStorage } from '@commandkit/ratelimit';
+
+setRateLimitStorage(new MemoryRateLimitStorage());
+```
+
+Notes:
+
+- In-memory only; not safe for multi-process deployments.
+- Implements TTL and sorted-set helpers.
+- `deleteByPattern` supports a simple `*` wildcard, not full glob syntax.
+
+:::warning Single-process only
+
+Memory storage is per process. For multiple bot shards or instances, use a shared storage like Redis.
+
+:::
+
+### Redis storage
+
+```ts
+import { RedisRateLimitStorage } from '@commandkit/ratelimit/redis';
+import { setRateLimitStorage } from '@commandkit/ratelimit';
+
+setRateLimitStorage(
+ new RedisRateLimitStorage({ host: 'localhost', port: 6379 }),
+);
+```
+
+Notes:
+
+- Stores values as JSON.
+- Uses Lua scripts for atomic fixed and sliding windows.
+- Uses `SCAN` for prefix and pattern deletes and listing.
+
+### Fallback storage
+
+```ts
+import { FallbackRateLimitStorage } from '@commandkit/ratelimit/fallback';
+import { MemoryRateLimitStorage } from '@commandkit/ratelimit/memory';
+import { RedisRateLimitStorage } from '@commandkit/ratelimit/redis';
+import { setRateLimitStorage } from '@commandkit/ratelimit';
+
+const primary = new RedisRateLimitStorage({ host: 'localhost', port: 6379 });
+const secondary = new MemoryRateLimitStorage();
+
+setRateLimitStorage(new FallbackRateLimitStorage(primary, secondary));
+```
+
+Notes:
+
+- Every optional method must exist on both storages or the fallback wrapper throws.
+- Primary errors are logged at most once per `cooldownMs` window (default 30s).
+
+## Queue mode
+
+Queue mode retries commands instead of rejecting immediately.
+
+:::tip Use queueing to smooth bursts
+
+Queueing is useful for smoothing short bursts, but it changes response timing. Disable it with `queue: { enabled: false }` if you want strict, immediate rate-limit responses.
+
+:::
+
+### Queue defaults and clamps
+
+| Field | Default | Clamp | Notes |
+| --- | --- | --- | --- |
+| `enabled` | `true` if any queue config exists | n/a | Otherwise `false`. |
+| `maxSize` | `3` | `>= 1` | Queue size is pending plus running. |
+| `timeout` | `30s` | `>= 1ms` | Per queued task. |
+| `deferInteraction` | `true` | n/a | Only applies to interactions. |
+| `ephemeral` | `true` | n/a | Applies to deferred replies. |
+| `concurrency` | `1` | `>= 1` | Per queue key. |
+
+### Queue flow
+
+1. Rate limit is evaluated and an aggregate result is computed.
+2. If limited and queueing is enabled, the plugin tries to enqueue.
+3. If the queue is full, it falls back to immediate rate-limit handling.
+4. When queued, the interaction is deferred if it is repliable and not already replied or deferred.
+5. The queued task waits `retryAfter`, then re-checks the limiter; if still limited it waits at least 250ms and retries until timeout.
+
+### Queue flow diagram
+
+```mermaid
+graph TD
+ A[Evaluate limiter] --> B{Limited?}
+ B -- No --> C[Allow command]
+ B -- Yes --> D{Queue enabled?}
+ D -- No --> E[Rate-limit response]
+ D -- Yes --> F{Queue has capacity?}
+ F -- No --> E
+ F -- Yes --> G[Enqueue + defer if repliable]
+ G --> H[Wait retryAfter]
+ H --> I{Still limited?}
+ I -- No --> C
+ I -- Yes --> J[Wait >= 250ms]
+ J --> K{Timed out?}
+ K -- No --> H
+ K -- Yes --> E
+```
+
+## Violations and escalation
+
+Violation escalation is stored under `violation:{key}` and uses these defaults:
+
+| Option | Default | Meaning |
+| --- | --- | --- |
+| `maxViolations` | `5` | Maximum escalation steps. |
+| `escalationMultiplier` | `2` | Multiplier per repeated violation. |
+| `resetAfter` | `1h` | TTL for violation state. |
+| `escalate` | `true` when `violations` is set | Set `false` to disable escalation. |
+
+Formula:
+
+`cooldown = baseRetryAfter * multiplier^(count - 1)`
+
+If escalation produces a later `resetAt` than the algorithm returned, the result is updated so `resetAt` and `retryAfter` stay accurate.
+
+## Bypass and exemptions
+
+Bypass order is always:
+
+1. `bypass.userIds`, `bypass.guildIds`, and `bypass.roleIds`.
+2. Temporary exemptions stored in storage.
+3. `bypass.check(source)`.
+
+Bypass example:
+
+```ts
+configureRatelimit({
+ bypass: {
+ userIds: ['USER_ID'],
+ guildIds: ['GUILD_ID'],
+ roleIds: ['ROLE_ID'],
+ check: (source) => source.channelId === 'ALLOWLIST_CHANNEL',
+ },
+});
+```
+
+Temporary exemptions:
+
+```ts
+import { grantRateLimitExemption } from '@commandkit/ratelimit';
+
+await grantRateLimitExemption({
+ scope: 'user',
+ id: 'USER_ID',
+ duration: '1h',
+});
+```
+
+Listing behavior:
+
+- `listRateLimitExemptions({ scope, id })` reads a single key directly.
+- `listRateLimitExemptions({ scope })` scans by prefix and requires `keysByPrefix`.
+- `expiresInMs` is `null` when `ttl` is not supported.
+
+## Responses, hooks, and events
+
+### Default response behavior
+
+| Source | Conditions | Action |
+| --- | --- | --- |
+| Message | Channel is sendable | `reply()` with cooldown embed. |
+| Interaction | Repliable and not replied/deferred | `reply()` with ephemeral cooldown embed. |
+| Interaction | Repliable and already replied/deferred | `followUp()` with ephemeral cooldown embed. |
+| Interaction | Not repliable | No response. |
+
+The default embed title is `:hourglass_flowing_sand: You are on cooldown` and the description uses a relative timestamp based on `resetAt`.
+
+### Hooks
+
+| Hook | Called when | Notes |
+| --- | --- | --- |
+| `onAllowed` | Command is allowed | Receives the first result. |
+| `onRateLimited` | Command is limited | Receives the first limited result. |
+| `onViolation` | A violation is recorded | Receives key and violation count. |
+| `onReset` | `resetRateLimit` succeeds | Not called by `resetAllRateLimits`. |
+| `onStorageError` | Storage operation fails | `fallbackUsed` is `false` in runtime plugin paths. |
+
+### Analytics events
+
+The runtime plugin calls `ctx.commandkit.analytics.track(...)` with:
+
+| Event name | When |
+| --- | --- |
+| `ratelimit_allowed` | After an allowed consume. |
+| `ratelimit_hit` | After a limited consume. |
+| `ratelimit_violation` | When escalation records a violation. |
+
+### Event bus
+
+A `ratelimited` event is emitted on the `ratelimits` channel:
+
+```ts
+commandkit.events
+ .to('ratelimits')
+ .on('ratelimited', ({ key, result, source, aggregate, commandName, queued }) => {
+ console.log(key, commandName, queued, aggregate.retryAfter);
+ });
+```
+
+Payload fields include `key`, `result`, `source`, `aggregate`, `commandName`, and `queued`.
+
+## Resets and HMR
+
+### `resetRateLimit`
+
+`resetRateLimit` clears the base key, its `violation:` key, and any window variants. It accepts either a raw `key` or a scope-derived key.
+
+| Mode | Required params | Notes |
+| --- | --- | --- |
+| Direct | `key` | Resets `key`, `violation:key`, and window variants. |
+| Scoped | `scope` + `commandName` + required ids | Throws if identifiers are missing. |
+
+### `resetAllRateLimits`
+
+`resetAllRateLimits` supports several modes and requires storage delete helpers:
+
+| Mode | Required params | Storage requirement |
+| --- | --- | --- |
+| Pattern | `pattern` | `deleteByPattern` |
+| Prefix | `prefix` | `deleteByPrefix` |
+| Command name | `commandName` | `deleteByPattern` |
+| Scope | `scope` + required ids | `deleteByPrefix` |
+
+### HMR reset behavior
+
+When a command file is hot-reloaded, the plugin deletes keys that match:
+
+- `*:{commandName}`
+- `violation:*:{commandName}`
+- `*:{commandName}:w:*`
+- `violation:*:{commandName}:w:*`
+
+HMR reset requires `deleteByPattern`. If the storage does not support pattern deletes, nothing is cleared.
+
+## Directive: `use ratelimit`
+
+The compiler plugin (`UseRateLimitDirectivePlugin`) uses `CommonDirectiveTransformer` with `directive = "use ratelimit"` and `importName = "$ckitirl"`. It transforms async functions only.
+
+The runtime wrapper:
+
+- Uses the runtime default limiter (merged with `DEFAULT_LIMITER`).
+- Generates a per-function key `rl:fn:{uuid}` and applies `keyPrefix` if present.
+- Aggregates results across windows and throws `RateLimitError` when limited.
+- Caches the wrapper per function and exposes it as `globalThis.$ckitirl`.
+
+Example:
+
+```ts
+import { RateLimitError } from '@commandkit/ratelimit';
+
+const heavy = async () => {
+ 'use ratelimit';
+ return 'ok';
+};
+
+try {
+ await heavy();
+} catch (error) {
+ if (error instanceof RateLimitError) {
+ console.log(error.result.retryAfter);
+ }
+}
+```
+
+## Defaults and edge cases
+
+### Defaults
+
+| Setting | Default |
+| --- | --- |
+| `maxRequests` | `10` |
+| `interval` | `60s` |
+| `algorithm` | `fixed-window` |
+| `scope` | `user` |
+| `DEFAULT_KEY_PREFIX` | `rl:` |
+| `RATELIMIT_STORE_KEY` | `ratelimit` |
+| `roleLimitStrategy` | `highest` |
+| `queue.maxSize` | `3` |
+| `queue.timeout` | `30s` |
+| `queue.deferInteraction` | `true` |
+| `queue.ephemeral` | `true` |
+| `queue.concurrency` | `1` |
+| `initializeDefaultStorage` | `true` |
+
+### Edge cases
+
+1. If no storage is configured and default storage is disabled, the plugin logs once and stores an empty result without limiting.
+2. If no scope key can be resolved, the plugin stores an empty result and skips limiting.
+3. If storage errors occur during consume, `onStorageError` is invoked and the plugin skips limiting for that execution.
+4. For token and leaky buckets, `limit` equals `burst`. For fixed and sliding windows, `limit` equals `maxRequests`.
+
+## Duration parsing
+
+`DurationLike` accepts numbers (milliseconds) or strings parsed by `ms`, plus custom units for weeks and months.
+
+| Unit | Meaning |
+| --- | --- |
+| `ms`, `s`, `m`, `h`, `d` | Standard `ms` units. |
+| `w`, `week`, `weeks` | 7 days. |
+| `mo`, `month`, `months` | 30 days. |
+
+## Exports
+
+| Export | Description |
+| --- | --- |
+| `ratelimit` | Plugin factory returning compiler + runtime plugins. |
+| `RateLimitPlugin` | Runtime plugin class. |
+| `UseRateLimitDirectivePlugin` | Compiler plugin for `use ratelimit`. |
+| `RateLimitEngine` | Algorithm coordinator with escalation handling. |
+| Algorithm classes | Fixed, sliding, token bucket, and leaky bucket implementations. |
+| Storage classes | Memory, Redis, and fallback storage. |
+| Runtime helpers | `configureRatelimit`, `setRateLimitStorage`, `getRateLimitRuntime`, and more. |
+| API helpers | `getRateLimitInfo`, resets, and exemption helpers. |
+| `RateLimitError` | Error thrown by the directive wrapper. |
+
+Subpath exports:
+
+- `@commandkit/ratelimit/redis`
+- `@commandkit/ratelimit/memory`
+- `@commandkit/ratelimit/fallback`
+
diff --git a/packages/ratelimit/LICENSE b/packages/ratelimit/LICENSE
new file mode 100644
index 00000000..b4151c1e
--- /dev/null
+++ b/packages/ratelimit/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) Neplex
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/packages/ratelimit/README.md b/packages/ratelimit/README.md
new file mode 100644
index 00000000..1eb30eaf
--- /dev/null
+++ b/packages/ratelimit/README.md
@@ -0,0 +1,801 @@
+# @commandkit/ratelimit
+
+`@commandkit/ratelimit` is the official CommandKit plugin for advanced rate limiting. It provides multi-window policies, role overrides, queueing, exemptions, and multiple algorithms while keeping command handlers lean.
+
+The `ratelimit()` factory returns two plugins in order: the compiler plugin for the "use ratelimit" directive and the runtime plugin that enforces limits. Runtime options must be configured before the runtime plugin activates.
+
+## Table of contents
+
+1. [Installation](#installation)
+2. [Setup](#setup)
+3. [Runtime configuration lifecycle](#runtime-configuration-lifecycle)
+4. [Basic usage](#basic-usage)
+5. [Configuration reference](#configuration-reference)
+6. [Limiter resolution and role strategy](#limiter-resolution-and-role-strategy)
+7. [Scopes and keying](#scopes-and-keying)
+8. [Algorithms](#algorithms)
+9. [Storage](#storage)
+10. [Queue mode](#queue-mode)
+11. [Violations and escalation](#violations-and-escalation)
+12. [Bypass and exemptions](#bypass-and-exemptions)
+13. [Responses, hooks, and events](#responses-hooks-and-events)
+14. [Resets and HMR](#resets-and-hmr)
+15. [Directive: `use ratelimit`](#directive-use-ratelimit)
+16. [Defaults and edge cases](#defaults-and-edge-cases)
+17. [Duration parsing](#duration-parsing)
+18. [Exports](#exports)
+
+## Installation
+
+Install the ratelimit plugin to get started:
+
+```bash
+npm install @commandkit/ratelimit
+```
+
+## Setup
+
+Add the ratelimit plugin to your CommandKit configuration and define a runtime config file.
+
+### Quick start
+
+Create an auto-loaded runtime config file (for example `ratelimit.ts`) and configure the default limiter:
+
+```ts
+// ratelimit.ts
+import { configureRatelimit } from '@commandkit/ratelimit';
+
+configureRatelimit({
+ defaultLimiter: {
+ maxRequests: 5,
+ interval: '1m',
+ scope: 'user',
+ algorithm: 'fixed-window',
+ },
+});
+```
+
+Register the plugin in your config:
+
+```ts
+// commandkit.config.ts
+import { defineConfig } from 'commandkit';
+import { ratelimit } from '@commandkit/ratelimit';
+
+export default defineConfig({
+ plugins: [ratelimit()],
+});
+```
+
+The runtime plugin auto-loads `ratelimit.ts` or `ratelimit.js` on startup before commands execute.
+
+## Runtime configuration lifecycle
+
+### Runtime lifecycle diagram
+
+```mermaid
+graph TD
+ A[App startup] --> B[Auto-load ratelimit.ts/js]
+ B --> C["configureRatelimit()"]
+ C --> D["Runtime plugin activate()"]
+ D --> E[Resolve storage]
+ E --> F[Resolve limiter config]
+ F --> G[Consume algorithm]
+ G --> H[Aggregate result]
+ H --> I[Default response / hooks / events]
+```
+
+### `configureRatelimit` is required
+
+`RateLimitPlugin.activate()` throws if `configureRatelimit()` was not called. This is enforced to avoid silently running without your intended defaults.
+
+### How configuration is stored
+
+`configureRatelimit()` merges your config into an in-memory object and sets the configured flag. `getRateLimitConfig()` returns the current object, and `isRateLimitConfigured()` returns whether initialization has happened. If a runtime context is already active, `configureRatelimit()` updates it immediately.
+
+### Runtime storage selection
+
+Storage is resolved in this order:
+
+| Order | Source | Notes |
+| --- | --- | --- |
+| 1 | Limiter `storage` override | `RateLimitLimiterConfig.storage` for the command being executed. |
+| 2 | Plugin `storage` option | `RateLimitPluginOptions.storage`. |
+| 3 | Process default | Set via `setRateLimitStorage()` or `setDriver()`. |
+| 4 | Default memory storage | Used unless `initializeDefaultStorage` or `initializeDefaultDriver` is `false`. |
+
+If no storage is resolved and defaults are disabled, the plugin logs once and stores an empty result without limiting.
+
+### Runtime helpers
+
+These helpers are process-wide:
+
+| Helper | Purpose |
+| --- | --- |
+| `configureRatelimit` | Set runtime options and update active runtime state. |
+| `getRateLimitConfig` | Read the merged in-memory runtime config. |
+| `isRateLimitConfigured` | Check whether `configureRatelimit()` was called. |
+| `setRateLimitStorage` | Set the default storage for the process. |
+| `getRateLimitStorage` | Get the process default storage (or `null`). |
+| `setDriver` / `getDriver` | Aliases for `setRateLimitStorage` / `getRateLimitStorage`. |
+| `setRateLimitRuntime` | Set the active runtime context for APIs and directives. |
+| `getRateLimitRuntime` | Get the active runtime context (or `null`). |
+
+## Basic usage
+
+Use command metadata or the `use ratelimit` directive to enable rate limiting.
+This section focuses on command metadata; see the directive section for
+function-level usage.
+
+### Command metadata and enablement
+
+Enable rate limiting by setting `metadata.ratelimit`:
+
+```ts
+export const metadata = {
+ ratelimit: {
+ maxRequests: 3,
+ interval: '10s',
+ scope: 'user',
+ algorithm: 'sliding-window',
+ },
+};
+```
+
+`metadata.ratelimit` can be one of:
+
+| Value | Meaning |
+| --- | --- |
+| `false` or `undefined` | Plugin does nothing for this command. |
+| `true` | Enable rate limiting using resolved defaults. |
+| `RateLimitCommandConfig` | Enable rate limiting with command-level overrides. |
+
+If `env.context` is missing in the execution environment, the plugin skips rate limiting.
+
+### Named limiter example
+
+```ts
+configureRatelimit({
+ limiters: {
+ heavy: { maxRequests: 1, interval: '10s', algorithm: 'fixed-window' },
+ },
+});
+```
+
+```ts
+export const metadata = {
+ ratelimit: {
+ limiter: 'heavy',
+ scope: 'user',
+ },
+};
+```
+
+## Configuration reference
+
+### RateLimitPluginOptions
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `defaultLimiter` | `RateLimitLimiterConfig` | `DEFAULT_LIMITER` when unset | Base limiter for all commands and directives. |
+| `limiters` | `Record` | `undefined` | Named limiter presets. |
+| `storage` | `RateLimitStorageConfig` | `undefined` | Resolved before default storage. |
+| `keyPrefix` | `string` | `undefined` | Prepended before `rl:`. |
+| `keyResolver` | `RateLimitKeyResolver` | `undefined` | Used for `custom` scope when the limiter does not override it. |
+| `bypass` | `RateLimitBypassOptions` | `undefined` | Permanent allowlists and optional check. |
+| `hooks` | `RateLimitHooks` | `undefined` | Lifecycle callbacks. |
+| `onRateLimited` | `RateLimitResponseHandler` | `undefined` | Overrides default reply. |
+| `queue` | `RateLimitQueueOptions` | `undefined` | If any queue config exists, `enabled` defaults to `true`. |
+| `roleLimits` | `Record` | `undefined` | Base role limits. |
+| `roleLimitStrategy` | `RateLimitRoleLimitStrategy` | `highest` when resolving | Used when multiple roles match. |
+| `initializeDefaultStorage` | `boolean` | `true` | Disable to prevent memory fallback. |
+| `initializeDefaultDriver` | `boolean` | `true` | Alias for `initializeDefaultStorage`. |
+
+### RateLimitLimiterConfig
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `maxRequests` | `number` | `10` when missing or `<= 0` | Used by fixed and sliding windows. |
+| `interval` | `DurationLike` | `60s` when missing or invalid | Parsed and clamped to `>= 1ms`. |
+| `scope` | `RateLimitScope` or `RateLimitScope[]` | `user` | Arrays are deduplicated. |
+| `algorithm` | `RateLimitAlgorithmType` | `fixed-window` | Unknown values fall back to fixed-window. |
+| `burst` | `number` | `maxRequests` when missing or `<= 0` | Capacity for token or leaky buckets. |
+| `refillRate` | `number` | `maxRequests / intervalSeconds` | Must be `> 0` for token bucket. |
+| `leakRate` | `number` | `maxRequests / intervalSeconds` | Must be `> 0` for leaky bucket. |
+| `keyResolver` | `RateLimitKeyResolver` | `undefined` | Used only for `custom` scope. |
+| `keyPrefix` | `string` | `undefined` | Overrides plugin prefix for this limiter. |
+| `storage` | `RateLimitStorageConfig` | `undefined` | Overrides storage for this limiter. |
+| `violations` | `ViolationOptions` | `undefined` | Enables escalation unless `escalate` is `false`. |
+| `queue` | `RateLimitQueueOptions` | `undefined` | Overrides queue settings at this layer. |
+| `windows` | `RateLimitWindowConfig[]` | `undefined` | Enables multi-window behavior. |
+| `roleLimits` | `Record` | `undefined` | Role overrides at this layer. |
+| `roleLimitStrategy` | `RateLimitRoleLimitStrategy` | `highest` when resolving | Used when role limits match. |
+
+### RateLimitWindowConfig
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `id` | `string` | `w1`, `w2`, ... | Auto-generated if empty or missing. |
+| `maxRequests` | `number` | Inherits from base limiter | Applies only to this window. |
+| `interval` | `DurationLike` | Inherits from base limiter | Parsed like the base limiter. |
+| `algorithm` | `RateLimitAlgorithmType` | Inherits from base limiter | Usually keep consistent across windows. |
+| `burst` | `number` | Inherits from base limiter | Used for token or leaky buckets. |
+| `refillRate` | `number` | Inherits from base limiter | Must be `> 0` for token bucket. |
+| `leakRate` | `number` | Inherits from base limiter | Must be `> 0` for leaky bucket. |
+| `violations` | `ViolationOptions` | Inherits from base limiter | Overrides escalation for this window. |
+
+### RateLimitQueueOptions
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `enabled` | `boolean` | `true` when any queue config exists | Otherwise `false`. |
+| `maxSize` | `number` | `3` and clamped to `>= 1` | Queue size is pending plus running. |
+| `timeout` | `DurationLike` | `30s` and clamped to `>= 1ms` | Per queued task. |
+| `deferInteraction` | `boolean` | `true` unless explicitly `false` | Only used for interactions. |
+| `ephemeral` | `boolean` | `true` unless explicitly `false` | Applies to deferred replies. |
+| `concurrency` | `number` | `1` and clamped to `>= 1` | Per queue key. |
+
+### ViolationOptions
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `escalate` | `boolean` | `true` when `violations` is set | Set `false` to disable escalation. |
+| `maxViolations` | `number` | `5` | Maximum escalation steps. |
+| `escalationMultiplier` | `number` | `2` | Multiplies cooldown per repeated violation. |
+| `resetAfter` | `DurationLike` | `1h` | TTL for violation state. |
+
+### RateLimitCommandConfig
+
+`RateLimitCommandConfig` extends `RateLimitLimiterConfig` and adds:
+
+| Field | Type | Default or resolution | Notes |
+| --- | --- | --- | --- |
+| `limiter` | `string` | `undefined` | References a named limiter in `limiters`. |
+
+### Result shapes
+
+RateLimitStoreValue:
+
+| Field | Type | Meaning |
+| --- | --- | --- |
+| `limited` | `boolean` | `true` if any scope or window was limited. |
+| `remaining` | `number` | Minimum remaining across all results. |
+| `resetAt` | `number` | Latest reset timestamp across all results. |
+| `retryAfter` | `number` | Max retry delay across limited results. |
+| `results` | `RateLimitResult[]` | Individual results per scope and window. |
+
+RateLimitResult:
+
+| Field | Type | Meaning |
+| --- | --- | --- |
+| `key` | `string` | Storage key used for the limiter. |
+| `scope` | `RateLimitScope` | Scope applied for the limiter. |
+| `algorithm` | `RateLimitAlgorithmType` | Algorithm used for the limiter. |
+| `windowId` | `string` | Present for multi-window limits. |
+| `limited` | `boolean` | Whether this limiter hit its limit. |
+| `remaining` | `number` | Remaining requests or capacity. |
+| `resetAt` | `number` | Absolute reset timestamp in ms. |
+| `retryAfter` | `number` | Delay until retry is allowed, in ms. |
+| `limit` | `number` | `maxRequests` for fixed and sliding, `burst` for token and leaky buckets. |
+
+## Limiter resolution and role strategy
+
+Limiter configuration is layered in this exact order, with later layers overriding earlier ones:
+
+| Order | Source | Notes |
+| --- | --- | --- |
+| 1 | `DEFAULT_LIMITER` | Base defaults. |
+| 2 | `defaultLimiter` | Runtime defaults. |
+| 3 | Named limiter | When `metadata.ratelimit.limiter` is set. |
+| 4 | Command overrides | `metadata.ratelimit` config. |
+| 5 | Role override | Selected by role strategy. |
+
+### Limiter resolution diagram
+
+```mermaid
+graph TD
+ A[DEFAULT_LIMITER] --> B[defaultLimiter]
+ B --> C[Named limiter]
+ C --> D[Command overrides]
+ D --> E["Role override (strategy)"]
+```
+
+Role limits are merged in this order, with later maps overriding earlier ones for the same role id:
+
+| Order | Source |
+| --- | --- |
+| 1 | Plugin `roleLimits` |
+| 2 | `defaultLimiter.roleLimits` |
+| 3 | Named limiter `roleLimits` |
+| 4 | Command `roleLimits` |
+
+Role strategies:
+
+| Strategy | Selection rule |
+| --- | --- |
+| `highest` | Picks the role with the highest request rate (`maxRequests / intervalMs`). |
+| `lowest` | Picks the role with the lowest request rate. |
+| `first` | Uses insertion order of the merged role limits object. |
+
+For multi-window limiters, the score uses the minimum rate across windows.
+
+## Scopes and keying
+
+Supported scopes:
+
+| Scope | Required IDs | Key format (without `keyPrefix`) | Skip behavior |
+| --- | --- | --- | --- |
+| `user` | `userId` | `rl:user:{userId}:{commandName}` | Skips if `userId` is missing. |
+| `guild` | `guildId` | `rl:guild:{guildId}:{commandName}` | Skips if `guildId` is missing. |
+| `channel` | `channelId` | `rl:channel:{channelId}:{commandName}` | Skips if `channelId` is missing. |
+| `global` | none | `rl:global:{commandName}` | Never skipped. |
+| `user-guild` | `userId`, `guildId` | `rl:user:{userId}:guild:{guildId}:{commandName}` | Skips if either id is missing. |
+| `custom` | `keyResolver` | `keyResolver(ctx, command, source)` | Skips if resolver is missing or returns falsy. |
+
+Keying notes:
+
+- `DEFAULT_KEY_PREFIX` is always included in the base format.
+- `keyPrefix` is concatenated before `rl:` as-is, so include a trailing separator if you want one.
+- Multi-window limits append `:w:{windowId}`.
+
+### Exemption keys
+
+Temporary exemptions are stored under `rl:exempt:{scope}:{id}` (plus optional `keyPrefix`).
+
+| Exemption scope | Key format | Notes |
+| --- | --- | --- |
+| `user` | `rl:exempt:user:{userId}` | Resolved from the source user id. |
+| `guild` | `rl:exempt:guild:{guildId}` | Resolved from the guild id. |
+| `role` | `rl:exempt:role:{roleId}` | Resolved from all member roles. |
+| `channel` | `rl:exempt:channel:{channelId}` | Resolved from the channel id. |
+| `category` | `rl:exempt:category:{categoryId}` | Resolved from the parent category id. |
+
+## Algorithms
+
+### Algorithm matrix
+
+| Algorithm | Required config | Storage requirements | `limit` value | Notes |
+| --- | --- | --- | --- | --- |
+| `fixed-window` | `maxRequests`, `interval` | `consumeFixedWindow` or `incr` or `get` and `set` | `maxRequests` | Fallback uses per-process lock and optimistic versioning. |
+| `sliding-window` | `maxRequests`, `interval` | `consumeSlidingWindowLog` or `zRemRangeByScore` + `zCard` + `zAdd` | `maxRequests` | Throws if sorted-set support is missing. |
+| `token-bucket` | `burst`, `refillRate` | `get` and `set` | `burst` | Throws if `refillRate <= 0`. |
+| `leaky-bucket` | `burst`, `leakRate` | `get` and `set` | `burst` | Throws if `leakRate <= 0`. |
+
+### Fixed window
+
+Execution path:
+
+1. If `consumeFixedWindow` exists, it is used.
+2. Else if `incr` exists, it is used.
+3. Else a fallback uses `get` and `set` with a per-process lock.
+
+The limiter is considered limited when `count > maxRequests`. The fallback path retries up to five times with optimistic versioning and is serialized only within the current process.
+
+#### Fixed window fallback diagram
+
+```mermaid
+graph TD
+ A[Consume fixed-window] --> B{consumeFixedWindow?}
+ B -- Yes --> C[Use consumeFixedWindow]
+ B -- No --> D{incr?}
+ D -- Yes --> E[Use incr]
+ D -- No --> F["get + set fallback (per-process lock)"]
+```
+
+### Sliding window log
+
+Execution path:
+
+1. If `consumeSlidingWindowLog` exists, it is used (atomic).
+2. Else a sorted-set fallback uses `zRemRangeByScore`, `zCard`, and `zAdd`.
+
+If sorted-set methods are missing, the algorithm throws. If `zRangeByScore` is available, it is used to compute an accurate oldest timestamp for `resetAt`; otherwise `resetAt` defaults to `now + window`. The fallback is serialized per process but is not atomic across processes.
+
+#### Sliding window fallback diagram
+
+```mermaid
+graph TD
+ A[Consume sliding-window] --> B{consumeSlidingWindowLog?}
+ B -- Yes --> C[Use consumeSlidingWindowLog]
+ B -- No --> D{zset methods?}
+ D -- No --> E[Throw error]
+ D -- Yes --> F[zRemRangeByScore + zCard + zAdd fallback]
+```
+
+### Token bucket
+
+Token bucket uses a stored `tokens` and `lastRefill` state. On each consume, tokens refill based on elapsed time and `refillRate`. If the bucket has fewer than one token, the request is limited and `retryAfter` is computed from the time required to refill one token.
+
+### Leaky bucket
+
+Leaky bucket uses a stored `level` and `lastLeak` state. Each request adds one token, and the bucket drains at `leakRate`. If adding would exceed `capacity`, the request is limited and `retryAfter` is computed from the time required to drain the overflow.
+
+### Multi-window limits
+
+Use `windows` to enforce multiple windows simultaneously:
+
+```ts
+configureRatelimit({
+ defaultLimiter: {
+ scope: 'user',
+ algorithm: 'sliding-window',
+ windows: [
+ { id: 'short', maxRequests: 10, interval: '1m' },
+ { id: 'long', maxRequests: 1000, interval: '1d' },
+ ],
+ },
+});
+```
+
+If a window `id` is omitted, the plugin generates `w1`, `w2`, and so on. Window ids are part of the storage key and appear in results.
+
+## Storage
+
+### Storage interface
+
+Required methods:
+
+| Method | Used by | Notes |
+| --- | --- | --- |
+| `get` | All algorithms | Returns stored value or `null`. |
+| `set` | All algorithms | Optional `ttlMs` controls expiry. |
+| `delete` | Resets and algorithm resets | Removes stored state. |
+
+Optional methods and features:
+
+| Method | Feature | Notes |
+| --- | --- | --- |
+| `consumeFixedWindow` | Fixed-window atomic consume | Used before `incr` and fallback. |
+| `incr` | Fixed-window efficiency | Returns count and TTL. |
+| `consumeSlidingWindowLog` | Sliding-window atomic consume | Preferred over sorted-set fallback. |
+| `zAdd` / `zRemRangeByScore` / `zCard` | Sliding-window fallback | Required when `consumeSlidingWindowLog` is absent. |
+| `zRangeByScore` | Sliding-window reset accuracy | Improves `resetAt` computation. |
+| `ttl` | Exemption listing | Used for `expiresInMs`. |
+| `expire` | Sliding-window fallback | Keeps sorted-set keys from growing indefinitely. |
+| `deleteByPrefix` / `deleteByPattern` | Resets | Required by `resetAllRateLimits` and HMR. |
+| `keysByPrefix` | Exemption listing | Required for listing without a specific id. |
+
+### Capability matrix
+
+| Feature | Requires | Memory | Redis | Fallback |
+| --- | --- | --- | --- | --- |
+| Fixed-window atomic consume | `consumeFixedWindow` | Yes | Yes | Conditional (both storages) |
+| Fixed-window `incr` | `incr` | Yes | Yes | Conditional (both storages) |
+| Sliding-window atomic consume | `consumeSlidingWindowLog` | Yes | Yes | Conditional (both storages) |
+| Sliding-window fallback | `zAdd` + `zRemRangeByScore` + `zCard` | Yes | Yes | Conditional (both storages) |
+| TTL visibility | `ttl` | Yes | Yes | Conditional (both storages) |
+| Prefix or pattern deletes | `deleteByPrefix` or `deleteByPattern` | Yes | Yes | Conditional (both storages) |
+| Exemption listing | `keysByPrefix` | Yes | Yes | Conditional (both storages) |
+
+### Capability overview diagram
+
+```mermaid
+graph TD
+ A[Storage API] --> B[Required: get / set / delete]
+ A --> C[Optional methods]
+ C --> D[Fixed window atomic: consumeFixedWindow / incr]
+ C --> E[Sliding window atomic: consumeSlidingWindowLog]
+ C --> F[Sliding window fallback: zAdd + zRemRangeByScore + zCard]
+ C --> G[Listing & TTL: keysByPrefix / ttl]
+ C --> H[Bulk reset: deleteByPrefix / deleteByPattern]
+ I[Fallback storage] --> J[Uses primary + secondary]
+ J --> K[Each optional method must exist on both]
+```
+
+### Memory storage
+
+```ts
+import { MemoryRateLimitStorage, setRateLimitStorage } from '@commandkit/ratelimit';
+
+setRateLimitStorage(new MemoryRateLimitStorage());
+```
+
+Notes:
+
+- In-memory only; not safe for multi-process deployments.
+- Implements TTL and sorted-set helpers.
+- `deleteByPattern` supports a simple `*` wildcard, not full glob syntax.
+
+### Redis storage
+
+```ts
+import { RedisRateLimitStorage } from '@commandkit/ratelimit/redis';
+import { setRateLimitStorage } from '@commandkit/ratelimit';
+
+setRateLimitStorage(
+ new RedisRateLimitStorage({ host: 'localhost', port: 6379 }),
+);
+```
+
+Notes:
+
+- Stores values as JSON.
+- Uses Lua scripts for atomic fixed and sliding windows.
+- Uses `SCAN` for prefix and pattern deletes and listing.
+
+### Fallback storage
+
+```ts
+import { FallbackRateLimitStorage } from '@commandkit/ratelimit/fallback';
+import { MemoryRateLimitStorage } from '@commandkit/ratelimit/memory';
+import { RedisRateLimitStorage } from '@commandkit/ratelimit/redis';
+import { setRateLimitStorage } from '@commandkit/ratelimit';
+
+const primary = new RedisRateLimitStorage({ host: 'localhost', port: 6379 });
+const secondary = new MemoryRateLimitStorage();
+
+setRateLimitStorage(new FallbackRateLimitStorage(primary, secondary));
+```
+
+Notes:
+
+- Every optional method must exist on both storages or the fallback wrapper throws.
+- Primary errors are logged at most once per `cooldownMs` window (default 30s).
+
+## Queue mode
+
+Queue mode retries commands instead of rejecting immediately.
+
+### Queue defaults and clamps
+
+| Field | Default | Clamp | Notes |
+| --- | --- | --- | --- |
+| `enabled` | `true` if any queue config exists | n/a | Otherwise `false`. |
+| `maxSize` | `3` | `>= 1` | Queue size is pending plus running. |
+| `timeout` | `30s` | `>= 1ms` | Per queued task. |
+| `deferInteraction` | `true` | n/a | Only applies to interactions. |
+| `ephemeral` | `true` | n/a | Applies to deferred replies. |
+| `concurrency` | `1` | `>= 1` | Per queue key. |
+
+### Queue flow
+
+1. Rate limit is evaluated and an aggregate result is computed.
+2. If limited and queueing is enabled, the plugin tries to enqueue.
+3. If the queue is full, it falls back to immediate rate-limit handling.
+4. When queued, the interaction is deferred if it is repliable and not already replied or deferred.
+5. The queued task waits `retryAfter`, then re-checks the limiter; if still limited it waits at least 250ms and retries until timeout.
+
+### Queue flow diagram
+
+```mermaid
+graph TD
+ A[Evaluate limiter] --> B{Limited?}
+ B -- No --> C[Allow command]
+ B -- Yes --> D{Queue enabled?}
+ D -- No --> E[Rate-limit response]
+ D -- Yes --> F{Queue has capacity?}
+ F -- No --> E
+ F -- Yes --> G[Enqueue + defer if repliable]
+ G --> H[Wait retryAfter]
+ H --> I{Still limited?}
+ I -- No --> C
+ I -- Yes --> J[Wait >= 250ms]
+ J --> K{Timed out?}
+ K -- No --> H
+ K -- Yes --> E
+```
+
+## Violations and escalation
+
+Violation escalation is stored under `violation:{key}` and uses these defaults:
+
+| Option | Default | Meaning |
+| --- | --- | --- |
+| `maxViolations` | `5` | Maximum escalation steps. |
+| `escalationMultiplier` | `2` | Multiplier per repeated violation. |
+| `resetAfter` | `1h` | TTL for violation state. |
+| `escalate` | `true` when `violations` is set | Set `false` to disable escalation. |
+
+Formula:
+
+`cooldown = baseRetryAfter * multiplier^(count - 1)`
+
+If escalation produces a later `resetAt` than the algorithm returned, the result is updated so `resetAt` and `retryAfter` stay accurate.
+
+## Bypass and exemptions
+
+Bypass order is always:
+
+1. `bypass.userIds`, `bypass.guildIds`, and `bypass.roleIds`.
+2. Temporary exemptions stored in storage.
+3. `bypass.check(source)`.
+
+Bypass example:
+
+```ts
+configureRatelimit({
+ bypass: {
+ userIds: ['USER_ID'],
+ guildIds: ['GUILD_ID'],
+ roleIds: ['ROLE_ID'],
+ check: (source) => source.channelId === 'ALLOWLIST_CHANNEL',
+ },
+});
+```
+
+Temporary exemptions:
+
+```ts
+import { grantRateLimitExemption } from '@commandkit/ratelimit';
+
+await grantRateLimitExemption({
+ scope: 'user',
+ id: 'USER_ID',
+ duration: '1h',
+});
+```
+
+Listing behavior:
+
+- `listRateLimitExemptions({ scope, id })` reads a single key directly.
+- `listRateLimitExemptions({ scope })` scans by prefix and requires `keysByPrefix`.
+- `expiresInMs` is `null` when `ttl` is not supported.
+
+## Responses, hooks, and events
+
+### Default response behavior
+
+| Source | Conditions | Action |
+| --- | --- | --- |
+| Message | Channel is sendable | `reply()` with cooldown embed. |
+| Interaction | Repliable and not replied/deferred | `reply()` with ephemeral cooldown embed. |
+| Interaction | Repliable and already replied/deferred | `followUp()` with ephemeral cooldown embed. |
+| Interaction | Not repliable | No response. |
+
+The default embed title is `:hourglass_flowing_sand: You are on cooldown` and the description uses a relative timestamp based on `resetAt`.
+
+### Hooks
+
+| Hook | Called when | Notes |
+| --- | --- | --- |
+| `onAllowed` | Command is allowed | Receives the first result. |
+| `onRateLimited` | Command is limited | Receives the first limited result. |
+| `onViolation` | A violation is recorded | Receives key and violation count. |
+| `onReset` | `resetRateLimit` succeeds | Not called by `resetAllRateLimits`. |
+| `onStorageError` | Storage operation fails | `fallbackUsed` is `false` in runtime plugin paths. |
+
+### Analytics events
+
+The runtime plugin calls `ctx.commandkit.analytics.track(...)` with:
+
+| Event name | When |
+| --- | --- |
+| `ratelimit_allowed` | After an allowed consume. |
+| `ratelimit_hit` | After a limited consume. |
+| `ratelimit_violation` | When escalation records a violation. |
+
+### Event bus
+
+A `ratelimited` event is emitted on the `ratelimits` channel:
+
+```ts
+commandkit.events
+ .to('ratelimits')
+ .on('ratelimited', ({ key, result, source, aggregate, commandName, queued }) => {
+ console.log(key, commandName, queued, aggregate.retryAfter);
+ });
+```
+
+Payload fields include `key`, `result`, `source`, `aggregate`, `commandName`, and `queued`.
+
+## Resets and HMR
+
+### `resetRateLimit`
+
+`resetRateLimit` clears the base key, its `violation:` key, and any window variants. It accepts either a raw `key` or a scope-derived key.
+
+| Mode | Required params | Notes |
+| --- | --- | --- |
+| Direct | `key` | Resets `key`, `violation:key`, and window variants. |
+| Scoped | `scope` + `commandName` + required ids | Throws if identifiers are missing. |
+
+### `resetAllRateLimits`
+
+`resetAllRateLimits` supports several modes and requires storage delete helpers:
+
+| Mode | Required params | Storage requirement |
+| --- | --- | --- |
+| Pattern | `pattern` | `deleteByPattern` |
+| Prefix | `prefix` | `deleteByPrefix` |
+| Command name | `commandName` | `deleteByPattern` |
+| Scope | `scope` + required ids | `deleteByPrefix` |
+
+### HMR reset behavior
+
+When a command file is hot-reloaded, the plugin deletes keys that match:
+
+- `*:{commandName}`
+- `violation:*:{commandName}`
+- `*:{commandName}:w:*`
+- `violation:*:{commandName}:w:*`
+
+HMR reset requires `deleteByPattern`. If the storage does not support pattern deletes, nothing is cleared.
+
+## Directive: `use ratelimit`
+
+The compiler plugin (`UseRateLimitDirectivePlugin`) uses `CommonDirectiveTransformer` with `directive = "use ratelimit"` and `importName = "$ckitirl"`. It transforms async functions only.
+
+The runtime wrapper:
+
+- Uses the runtime default limiter (merged with `DEFAULT_LIMITER`).
+- Generates a per-function key `rl:fn:{uuid}` and applies `keyPrefix` if present.
+- Aggregates results across windows and throws `RateLimitError` when limited.
+- Caches the wrapper per function and exposes it as `globalThis.$ckitirl`.
+
+Example:
+
+```ts
+import { RateLimitError } from '@commandkit/ratelimit';
+
+const heavy = async () => {
+ 'use ratelimit';
+ return 'ok';
+};
+
+try {
+ await heavy();
+} catch (error) {
+ if (error instanceof RateLimitError) {
+ console.log(error.result.retryAfter);
+ }
+}
+```
+
+## Defaults and edge cases
+
+### Defaults
+
+| Setting | Default |
+| --- | --- |
+| `maxRequests` | `10` |
+| `interval` | `60s` |
+| `algorithm` | `fixed-window` |
+| `scope` | `user` |
+| `DEFAULT_KEY_PREFIX` | `rl:` |
+| `RATELIMIT_STORE_KEY` | `ratelimit` |
+| `roleLimitStrategy` | `highest` |
+| `queue.maxSize` | `3` |
+| `queue.timeout` | `30s` |
+| `queue.deferInteraction` | `true` |
+| `queue.ephemeral` | `true` |
+| `queue.concurrency` | `1` |
+| `initializeDefaultStorage` | `true` |
+
+### Edge cases
+
+1. If no storage is configured and default storage is disabled, the plugin logs once and stores an empty result without limiting.
+2. If no scope key can be resolved, the plugin stores an empty result and skips limiting.
+3. If storage errors occur during consume, `onStorageError` is invoked and the plugin skips limiting for that execution.
+4. For token and leaky buckets, `limit` equals `burst`. For fixed and sliding windows, `limit` equals `maxRequests`.
+
+## Duration parsing
+
+`DurationLike` accepts numbers (milliseconds) or strings parsed by `ms`, plus custom units for weeks and months.
+
+| Unit | Meaning |
+| --- | --- |
+| `ms`, `s`, `m`, `h`, `d` | Standard `ms` units. |
+| `w`, `week`, `weeks` | 7 days. |
+| `mo`, `month`, `months` | 30 days. |
+
+## Exports
+
+| Export | Description |
+| --- | --- |
+| `ratelimit` | Plugin factory returning compiler + runtime plugins. |
+| `RateLimitPlugin` | Runtime plugin class. |
+| `UseRateLimitDirectivePlugin` | Compiler plugin for `use ratelimit`. |
+| `RateLimitEngine` | Algorithm coordinator with escalation handling. |
+| Algorithm classes | Fixed, sliding, token bucket, and leaky bucket implementations. |
+| Storage classes | Memory, Redis, and fallback storage. |
+| Runtime helpers | `configureRatelimit`, `setRateLimitStorage`, `getRateLimitRuntime`, and more. |
+| API helpers | `getRateLimitInfo`, resets, and exemption helpers. |
+| `RateLimitError` | Error thrown by the directive wrapper. |
+
+Subpath exports:
+
+- `@commandkit/ratelimit/redis`
+- `@commandkit/ratelimit/memory`
+- `@commandkit/ratelimit/fallback`
+
+
diff --git a/packages/ratelimit/package.json b/packages/ratelimit/package.json
new file mode 100644
index 00000000..a23b508a
--- /dev/null
+++ b/packages/ratelimit/package.json
@@ -0,0 +1,65 @@
+{
+ "name": "@commandkit/ratelimit",
+ "version": "0.0.0",
+ "description": "CommandKit plugin that provides advanced rate limiting",
+ "main": "dist/index.js",
+ "types": "dist/index.d.ts",
+ "files": [
+ "dist"
+ ],
+ "exports": {
+ ".": {
+ "import": "./dist/index.js",
+ "types": "./dist/index.d.ts"
+ },
+ "./redis": {
+ "import": "./dist/providers/redis.js",
+ "types": "./dist/providers/redis.d.ts"
+ },
+ "./memory": {
+ "import": "./dist/providers/memory.js",
+ "types": "./dist/providers/memory.d.ts"
+ },
+ "./fallback": {
+ "import": "./dist/providers/fallback.js",
+ "types": "./dist/providers/fallback.d.ts"
+ }
+ },
+ "scripts": {
+ "check-types": "tsc --noEmit",
+ "build": "tsc",
+ "test": "vitest"
+ },
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/neplextech/commandkit.git",
+ "directory": "packages/ratelimit"
+ },
+ "keywords": [
+ "commandkit",
+ "ratelimit",
+ "rate limiting"
+ ],
+ "contributors": [
+ "Twilight ",
+ "Avraj "
+ ],
+ "license": "MIT",
+ "bugs": {
+ "url": "https://github.com/neplextech/commandkit/issues"
+ },
+ "homepage": "https://commandkit.dev",
+ "dependencies": {
+ "ioredis": "^5.10.0",
+ "ms": "^2.1.3"
+ },
+ "devDependencies": {
+ "@types/ms": "^2.1.0",
+ "commandkit": "workspace:*",
+ "discord.js": "catalog:discordjs",
+ "directive-to-hof": "^0.0.3",
+ "tsconfig": "workspace:*",
+ "typescript": "catalog:build",
+ "vitest": "^4.0.18"
+ }
+}
diff --git a/packages/ratelimit/spec/algorithms.test.ts b/packages/ratelimit/spec/algorithms.test.ts
new file mode 100644
index 00000000..efe34920
--- /dev/null
+++ b/packages/ratelimit/spec/algorithms.test.ts
@@ -0,0 +1,226 @@
+/**
+ * Algorithm integration tests.
+ *
+ * Fake timers keep limiter math deterministic and avoid flakiness.
+ */
+
+import { afterEach, describe, expect, test, vi } from 'vitest';
+import { MemoryRateLimitStorage } from '../src/storage/memory';
+import { FixedWindowAlgorithm } from '../src/engine/algorithms/fixed-window';
+import { SlidingWindowLogAlgorithm } from '../src/engine/algorithms/sliding-window';
+import { TokenBucketAlgorithm } from '../src/engine/algorithms/token-bucket';
+import { LeakyBucketAlgorithm } from '../src/engine/algorithms/leaky-bucket';
+import type { RateLimitStorage } from '../src/types';
+
+const scope = 'user' as const;
+const delay = (ms = 0) => new Promise((resolve) => setTimeout(resolve, ms));
+
+/**
+ * Test storage that delays sorted-set calls to simulate contention.
+ *
+ * @implements RateLimitStorage
+ */
+class DelayedSlidingWindowStorage implements RateLimitStorage {
+ private readonly kv = new Map();
+ private readonly zset = new MemoryRateLimitStorage();
+
+ async get(key: string): Promise {
+ return (this.kv.get(key) as T) ?? null;
+ }
+
+ async set(key: string, value: T): Promise {
+ this.kv.set(key, value);
+ }
+
+ async delete(key: string): Promise {
+ this.kv.delete(key);
+ await this.zset.delete(key);
+ }
+
+ async zAdd(key: string, score: number, member: string): Promise {
+ await delay();
+ await this.zset.zAdd!(key, score, member);
+ }
+
+ async zRemRangeByScore(key: string, min: number, max: number): Promise {
+ await delay();
+ await this.zset.zRemRangeByScore!(key, min, max);
+ }
+
+ async zCard(key: string): Promise {
+ await delay();
+ return this.zset.zCard!(key);
+ }
+
+ async zRangeByScore(
+ key: string,
+ min: number,
+ max: number,
+ ): Promise {
+ await delay();
+ return this.zset.zRangeByScore!(key, min, max);
+ }
+
+ async expire(key: string, ttlMs: number): Promise {
+ await delay();
+ await this.zset.expire!(key, ttlMs);
+ }
+}
+
+/**
+ * Test storage that delays key/value calls for fixed-window tests.
+ *
+ * @implements RateLimitStorage
+ */
+class DelayedFixedWindowStorage implements RateLimitStorage {
+ private readonly kv = new Map();
+
+ async get(key: string): Promise {
+ await delay();
+ return (this.kv.get(key) as T) ?? null;
+ }
+
+ async set(key: string, value: T): Promise {
+ await delay();
+ this.kv.set(key, value);
+ }
+
+ async delete(key: string): Promise {
+ this.kv.delete(key);
+ }
+}
+
+afterEach(() => {
+ vi.useRealTimers();
+});
+
+describe('FixedWindowAlgorithm', () => {
+ test('limits after max requests and resets after interval', async () => {
+ vi.useFakeTimers();
+ vi.setSystemTime(0);
+
+ const storage = new MemoryRateLimitStorage();
+ const algorithm = new FixedWindowAlgorithm(storage, {
+ maxRequests: 2,
+ intervalMs: 1000,
+ scope,
+ });
+
+ const r1 = await algorithm.consume('key');
+ const r2 = await algorithm.consume('key');
+ const r3 = await algorithm.consume('key');
+
+ expect(r1.limited).toBe(false);
+ expect(r2.limited).toBe(false);
+ expect(r3.limited).toBe(true);
+ expect(r3.retryAfter).toBeGreaterThan(0);
+
+ vi.advanceTimersByTime(1000);
+ const r4 = await algorithm.consume('key');
+ expect(r4.limited).toBe(false);
+ });
+
+ test('serializes fallback consumes per key', async () => {
+ const storage = new DelayedFixedWindowStorage();
+ const algorithm = new FixedWindowAlgorithm(storage, {
+ maxRequests: 1,
+ intervalMs: 1000,
+ scope,
+ });
+
+ const results = await Promise.all([
+ algorithm.consume('key'),
+ algorithm.consume('key'),
+ ]);
+
+ const limitedCount = results.filter((result) => result.limited).length;
+ expect(limitedCount).toBe(1);
+ });
+});
+
+describe('SlidingWindowLogAlgorithm', () => {
+ test('enforces window and allows after it passes', async () => {
+ vi.useFakeTimers();
+ vi.setSystemTime(0);
+
+ const storage = new MemoryRateLimitStorage();
+ const algorithm = new SlidingWindowLogAlgorithm(storage, {
+ maxRequests: 2,
+ intervalMs: 1000,
+ scope,
+ });
+
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ expect((await algorithm.consume('key')).limited).toBe(true);
+
+ await vi.advanceTimersByTimeAsync(1000);
+
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ });
+
+ test('serializes fallback log consumes per key', async () => {
+ const storage = new DelayedSlidingWindowStorage();
+ const algorithm = new SlidingWindowLogAlgorithm(storage, {
+ maxRequests: 1,
+ intervalMs: 1000,
+ scope,
+ });
+
+ const results = await Promise.all([
+ algorithm.consume('key'),
+ algorithm.consume('key'),
+ ]);
+
+ const limitedCount = results.filter((result) => result.limited).length;
+ expect(limitedCount).toBe(1);
+ });
+});
+
+describe('TokenBucketAlgorithm', () => {
+ test('refills over time', async () => {
+ vi.useFakeTimers();
+ vi.setSystemTime(0);
+
+ const storage = new MemoryRateLimitStorage();
+ const algorithm = new TokenBucketAlgorithm(storage, {
+ capacity: 2,
+ refillRate: 1,
+ scope,
+ });
+
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ const limited = await algorithm.consume('key');
+ expect(limited.limited).toBe(true);
+ expect(limited.retryAfter).toBeGreaterThan(0);
+
+ await vi.advanceTimersByTimeAsync(1000);
+
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ });
+});
+
+describe('LeakyBucketAlgorithm', () => {
+ test('drains over time', async () => {
+ vi.useFakeTimers();
+ vi.setSystemTime(0);
+
+ const storage = new MemoryRateLimitStorage();
+ const algorithm = new LeakyBucketAlgorithm(storage, {
+ capacity: 2,
+ leakRate: 1,
+ scope,
+ });
+
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ const limited = await algorithm.consume('key');
+ expect(limited.limited).toBe(true);
+ expect(limited.retryAfter).toBeGreaterThan(0);
+
+ await vi.advanceTimersByTimeAsync(1000);
+
+ expect((await algorithm.consume('key')).limited).toBe(false);
+ });
+});
diff --git a/packages/ratelimit/spec/api.test.ts b/packages/ratelimit/spec/api.test.ts
new file mode 100644
index 00000000..07a21198
--- /dev/null
+++ b/packages/ratelimit/spec/api.test.ts
@@ -0,0 +1,142 @@
+/**
+ * API helper tests.
+ *
+ * Uses in-memory storage to keep exemption/reset tests isolated.
+ */
+
+import { afterEach, describe, expect, test } from 'vitest';
+import { MemoryRateLimitStorage } from '../src/storage/memory';
+import {
+ grantRateLimitExemption,
+ listRateLimitExemptions,
+ resetAllRateLimits,
+ resetRateLimit,
+ revokeRateLimitExemption,
+} from '../src/api';
+import { setRateLimitRuntime, setRateLimitStorage } from '../src/runtime';
+import type { RateLimitRuntimeContext, RateLimitStorage } from '../src/types';
+import { buildExemptionKey } from '../src/utils/keys';
+
+/**
+ * Configure runtime + storage for API helpers under test.
+ */
+function setRuntime(storage: RateLimitStorage) {
+ setRateLimitStorage(storage);
+ const runtime: RateLimitRuntimeContext = {
+ storage,
+ defaultLimiter: {},
+ };
+ setRateLimitRuntime(runtime);
+}
+
+afterEach(() => {
+ setRateLimitRuntime(null);
+ setRateLimitStorage(null as unknown as RateLimitStorage);
+});
+
+describe('ratelimit API', () => {
+ test('grant/list/revoke exemptions', async () => {
+ const storage = new MemoryRateLimitStorage();
+ setRuntime(storage);
+
+ await grantRateLimitExemption({
+ scope: 'user',
+ id: 'user-1',
+ duration: '1h',
+ });
+
+ const list = await listRateLimitExemptions({ scope: 'user', id: 'user-1' });
+ expect(list).toHaveLength(1);
+ expect(list[0]?.id).toBe('user-1');
+
+ await revokeRateLimitExemption({ scope: 'user', id: 'user-1' });
+ const after = await listRateLimitExemptions({
+ scope: 'user',
+ id: 'user-1',
+ });
+ expect(after).toHaveLength(0);
+ });
+
+ test('resetRateLimit removes violations and window variants', async () => {
+ const storage = new MemoryRateLimitStorage();
+ setRuntime(storage);
+
+ const key = 'rl:user:user-1:ping';
+ await storage.set(key, { count: 1 }, 1000);
+ await storage.set(`violation:${key}`, { count: 1 }, 1000);
+ await storage.set(`${key}:w:short`, { count: 1 }, 1000);
+ await storage.set(`violation:${key}:w:short`, { count: 1 }, 1000);
+
+ await resetRateLimit({ key });
+
+ expect(await storage.get(key)).toBeNull();
+ expect(await storage.get(`violation:${key}`)).toBeNull();
+ expect(await storage.get(`${key}:w:short`)).toBeNull();
+ expect(await storage.get(`violation:${key}:w:short`)).toBeNull();
+ });
+
+ test('resetAllRateLimits supports commandName pattern deletes', async () => {
+ const storage = new MemoryRateLimitStorage();
+ setRuntime(storage);
+
+ const keys = [
+ 'rl:user:user-1:ping',
+ 'rl:user:user-2:ping',
+ 'rl:user:user-3:pong',
+ ];
+
+ for (const key of keys) {
+ await storage.set(key, { count: 1 }, 1000);
+ }
+
+ await resetAllRateLimits({ commandName: 'ping' });
+
+ expect(await storage.get('rl:user:user-1:ping')).toBeNull();
+ expect(await storage.get('rl:user:user-2:ping')).toBeNull();
+ expect(await storage.get('rl:user:user-3:pong')).not.toBeNull();
+ });
+
+ test('resetAllRateLimits throws when pattern deletes are unsupported', async () => {
+ const storage: RateLimitStorage = {
+ get: async () => null,
+ set: async () => undefined,
+ delete: async () => undefined,
+ };
+
+ setRuntime(storage);
+
+ await expect(resetAllRateLimits({ commandName: 'ping' })).rejects.toThrow(
+ 'Storage does not support pattern deletes',
+ );
+ });
+
+ test('throws when storage is missing', async () => {
+ setRateLimitRuntime(null);
+ setRateLimitStorage(null as unknown as RateLimitStorage);
+
+ await expect(
+ grantRateLimitExemption({
+ scope: 'user',
+ id: 'user-1',
+ duration: '1h',
+ }),
+ ).rejects.toThrow('Rate limit storage not configured');
+ });
+
+ test('listRateLimitExemptions uses prefix listing', async () => {
+ const storage = new MemoryRateLimitStorage();
+ setRuntime(storage);
+
+ const keyPrefix = 'custom:';
+ const userKey = buildExemptionKey('user', 'user-1', keyPrefix);
+ const guildKey = buildExemptionKey('guild', 'guild-1', keyPrefix);
+
+ await storage.set(userKey, true, 1000);
+ await storage.set(guildKey, true, 1000);
+
+ const list = await listRateLimitExemptions({ keyPrefix });
+ expect(list.map((entry) => entry.key).sort()).toEqual(
+ [guildKey, userKey].sort(),
+ );
+ });
+});
diff --git a/packages/ratelimit/spec/directive.test.ts b/packages/ratelimit/spec/directive.test.ts
new file mode 100644
index 00000000..0e54f7ce
--- /dev/null
+++ b/packages/ratelimit/spec/directive.test.ts
@@ -0,0 +1,85 @@
+import { afterEach, beforeEach, describe, expect, test } from 'vitest';
+import { RateLimitPlugin } from '../src/plugin';
+import { MemoryRateLimitStorage } from '../src/storage/memory';
+import { RateLimitError } from '../src/errors';
+import { configureRatelimit } from '../src/configure';
+import { createRuntimeContext } from './helpers';
+import { setRateLimitRuntime, setRateLimitStorage } from '../src/runtime';
+import type { RateLimitStorage } from '../src/types';
+
+describe('RateLimit directive', () => {
+ beforeEach(() => {
+ configureRatelimit({});
+ });
+
+ afterEach(() => {
+ setRateLimitRuntime(null);
+ setRateLimitStorage(null as unknown as RateLimitStorage);
+ });
+
+ test('enforces limits via runtime plugin', async () => {
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({
+ storage,
+ defaultLimiter: { maxRequests: 1, interval: 1000 },
+ });
+
+ const runtime = createRuntimeContext();
+ await plugin.activate(runtime.ctx as any);
+
+ const arrow = async () => {
+ 'use ratelimit';
+ return 'ok';
+ };
+
+ async function declared() {
+ 'use ratelimit';
+ return 'ok';
+ }
+
+ const expressed = async function () {
+ 'use ratelimit';
+ return 'ok';
+ };
+
+ const obj = {
+ async method() {
+ 'use ratelimit';
+ return 'ok';
+ },
+ };
+
+ const cases = [arrow, declared, expressed, obj.method];
+
+ for (const fn of cases) {
+ await fn();
+ let thrown: unknown;
+ try {
+ await fn();
+ } catch (error) {
+ thrown = error;
+ }
+
+ expect(thrown).toBeInstanceOf(RateLimitError);
+ if (!(thrown instanceof RateLimitError)) {
+ throw thrown;
+ }
+ expect(thrown.result.limited).toBe(true);
+ expect(thrown.result.retryAfter).toBeGreaterThan(0);
+ }
+ });
+
+ test('throws when runtime is not initialized', async () => {
+ setRateLimitRuntime(null);
+ setRateLimitStorage(null as unknown as RateLimitStorage);
+
+ const fn = async () => {
+ 'use ratelimit';
+ return 'ok';
+ };
+
+ await expect(fn()).rejects.toThrow(
+ 'RateLimit runtime is not initialized. Register the RateLimitPlugin first.',
+ );
+ });
+});
diff --git a/packages/ratelimit/spec/engine.test.ts b/packages/ratelimit/spec/engine.test.ts
new file mode 100644
index 00000000..e7717615
--- /dev/null
+++ b/packages/ratelimit/spec/engine.test.ts
@@ -0,0 +1,58 @@
+/**
+ * Engine escalation tests.
+ *
+ * Fake timers keep violation cooldowns deterministic.
+ */
+
+import { afterEach, describe, expect, test, vi } from 'vitest';
+import { RateLimitEngine } from '../src/engine/RateLimitEngine';
+import { MemoryRateLimitStorage } from '../src/storage/memory';
+import type { ResolvedLimiterConfig } from '../src/types';
+
+const scope = 'user' as const;
+
+afterEach(() => {
+ vi.useRealTimers();
+});
+
+describe('RateLimitEngine violations', () => {
+ test('escalates cooldown when violations repeat', async () => {
+ vi.useFakeTimers();
+ vi.setSystemTime(0);
+
+ const storage = new MemoryRateLimitStorage();
+ const engine = new RateLimitEngine(storage);
+
+ const config: ResolvedLimiterConfig = {
+ maxRequests: 1,
+ intervalMs: 1000,
+ algorithm: 'fixed-window',
+ scope,
+ burst: 1,
+ refillRate: 1,
+ leakRate: 1,
+ violations: {
+ maxViolations: 3,
+ escalationMultiplier: 2,
+ resetAfter: 60_000,
+ },
+ };
+
+ const first = await engine.consume('key', config);
+ expect(first.result.limited).toBe(false);
+
+ const second = await engine.consume('key', config);
+ expect(second.result.limited).toBe(true);
+ expect(second.violationCount).toBe(1);
+
+ await vi.advanceTimersByTimeAsync(1000);
+
+ const third = await engine.consume('key', config);
+ expect(third.result.limited).toBe(false);
+
+ const fourth = await engine.consume('key', config);
+ expect(fourth.result.limited).toBe(true);
+ expect(fourth.violationCount).toBe(2);
+ expect(fourth.result.retryAfter).toBeGreaterThanOrEqual(2000);
+ });
+});
diff --git a/packages/ratelimit/spec/helpers.ts b/packages/ratelimit/spec/helpers.ts
new file mode 100644
index 00000000..cefad775
--- /dev/null
+++ b/packages/ratelimit/spec/helpers.ts
@@ -0,0 +1,177 @@
+/**
+ * Test helpers for ratelimit specs.
+ *
+ * Provides lightweight stubs for Discord and CommandKit so tests stay focused
+ * on rate limit behavior without a live client.
+ */
+
+import { Collection, Message } from 'discord.js';
+import { vi } from 'vitest';
+import type { Interaction } from 'discord.js';
+
+export interface InteractionStubOptions {
+ userId?: string;
+ guildId?: string | null;
+ channelId?: string | null;
+ parentId?: string | null;
+ replied?: boolean;
+ deferred?: boolean;
+ roleIds?: string[];
+}
+
+/**
+ * Build an Interaction-like stub with only the fields the plugin reads.
+ *
+ * Keeps tests fast without a live Discord client.
+ *
+ * @param options - Overrides for interaction fields used in tests.
+ * @returns Interaction stub matching the minimal plugin contract.
+ */
+export function createInteractionStub(options: InteractionStubOptions = {}) {
+ const interaction = {
+ reply: vi.fn(async () => undefined),
+ followUp: vi.fn(async () => undefined),
+ deferReply: vi.fn(async () => undefined),
+ isRepliable: vi.fn(() => true),
+ replied: options.replied ?? false,
+ deferred: options.deferred ?? false,
+ user: { id: options.userId ?? 'user-1' },
+ guildId: options.guildId ?? 'guild-1',
+ channelId: options.channelId ?? 'channel-1',
+ channel: { parentId: options.parentId ?? 'category-1' },
+ member: options.roleIds ? { roles: options.roleIds } : null,
+ } as Interaction & {
+ reply: ReturnType;
+ followUp: ReturnType;
+ deferReply: ReturnType;
+ isRepliable: ReturnType;
+ replied: boolean;
+ deferred: boolean;
+ user: { id: string } | null;
+ guildId: string | null;
+ channelId: string | null;
+ channel: { parentId: string | null } | null;
+ member: { roles: string[] } | null;
+ };
+
+ return interaction;
+}
+
+export interface MessageStubOptions {
+ userId?: string;
+ guildId?: string | null;
+ channelId?: string | null;
+ parentId?: string | null;
+ roleIds?: string[];
+}
+
+/**
+ * Build a Message-like stub with minimal fields used by rate limit logic.
+ *
+ * @param options - Overrides for message fields used in tests.
+ * @returns Message stub matching the minimal plugin contract.
+ */
+export function createMessageStub(options: MessageStubOptions = {}) {
+ const message = Object.create(Message.prototype) as Message & {
+ reply: ReturnType;
+ author: { id: string } | null;
+ guildId: string | null;
+ channelId: string | null;
+ channel: { parentId: string | null; isSendable: () => boolean } | null;
+ member: { roles: string[] } | null;
+ };
+
+ message.reply = vi.fn(async () => undefined);
+ message.author = { id: options.userId ?? 'user-1' };
+ message.guildId = options.guildId ?? 'guild-1';
+ message.channelId = options.channelId ?? 'channel-1';
+ message.channel = {
+ parentId: options.parentId ?? 'category-1',
+ isSendable: () => true,
+ };
+ message.member = options.roleIds ? { roles: options.roleIds } : null;
+
+ return message;
+}
+
+/**
+ * Create a minimal CommandKit env with a store for plugin results.
+ *
+ * @param commandName - Command name to seed into the context.
+ * @returns Minimal CommandKit environment for plugin tests.
+ */
+export function createEnv(commandName = 'ping') {
+ return {
+ context: { commandName },
+ store: new Collection(),
+ } as const;
+}
+
+/**
+ * Create a runtime context with stubbed analytics and capture hooks.
+ *
+ * @param overrides - Optional overrides for command arrays.
+ * @returns Runtime context and stubbed helpers.
+ */
+export function createRuntimeContext(
+ overrides: {
+ commands?: any[];
+ } = {},
+) {
+ const analyticsTrack = vi.fn(async () => undefined);
+ const capture = vi.fn();
+ const eventsEmit = vi.fn();
+ const eventsTo = vi.fn(() => ({ emit: eventsEmit }));
+
+ const commandkit = {
+ analytics: { track: analyticsTrack },
+ commandHandler: {
+ getCommandsArray: () => overrides.commands ?? [],
+ },
+ events: {
+ to: eventsTo,
+ },
+ };
+
+ return {
+ ctx: { commandkit, capture },
+ analyticsTrack,
+ capture,
+ eventsEmit,
+ eventsTo,
+ };
+}
+
+/**
+ * Build a prepared command shape for plugin tests.
+ *
+ * @param options - Command metadata overrides.
+ * @returns Prepared command payload for plugin tests.
+ */
+export function createPreparedCommand(options: {
+ name?: string;
+ metadata?: any;
+ path?: string;
+}) {
+ const name = options.name ?? 'ping';
+ return {
+ command: {
+ discordId: null,
+ command: {
+ id: 'cmd-1',
+ name,
+ path: options.path ?? 'C:/commands/ping.ts',
+ relativePath: 'ping.ts',
+ parentPath: 'C:/commands',
+ middlewares: [],
+ category: null,
+ },
+ metadata: options.metadata ?? {},
+ data: {
+ command: { name },
+ metadata: options.metadata ?? {},
+ },
+ },
+ middlewares: [],
+ } as const;
+}
diff --git a/packages/ratelimit/spec/plugin.test.ts b/packages/ratelimit/spec/plugin.test.ts
new file mode 100644
index 00000000..d7e01545
--- /dev/null
+++ b/packages/ratelimit/spec/plugin.test.ts
@@ -0,0 +1,349 @@
+/**
+ * Plugin integration tests.
+ *
+ * Uses stubs to keep plugin tests fast and offline.
+ */
+
+import { afterEach, beforeEach, describe, expect, test, vi } from 'vitest';
+import { MessageFlags } from 'discord.js';
+import { RateLimitPlugin } from '../src/plugin';
+import { MemoryRateLimitStorage } from '../src/storage/memory';
+import { RATELIMIT_STORE_KEY } from '../src/constants';
+import { setRateLimitRuntime, setRateLimitStorage } from '../src/runtime';
+import { configureRatelimit } from '../src/configure';
+import {
+ createEnv,
+ createInteractionStub,
+ createPreparedCommand,
+ createRuntimeContext,
+} from './helpers';
+import type { RateLimitStorage } from '../src/types';
+
+afterEach(() => {
+ setRateLimitRuntime(null);
+ setRateLimitStorage(null as unknown as RateLimitStorage);
+ vi.useRealTimers();
+});
+
+describe('RateLimitPlugin', () => {
+ beforeEach(() => {
+ configureRatelimit({});
+ });
+
+ test('allows first request and stores result', async () => {
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({
+ storage,
+ defaultLimiter: { maxRequests: 2, interval: 1000 },
+ });
+
+ const runtime = createRuntimeContext();
+ await plugin.activate(runtime.ctx as any);
+
+ const env = createEnv('ping');
+ const interaction = createInteractionStub();
+ const prepared = createPreparedCommand({
+ name: 'ping',
+ metadata: { ratelimit: true },
+ });
+ const execute = vi.fn(async () => undefined);
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ execute,
+ );
+
+ const stored = env.store.get(RATELIMIT_STORE_KEY);
+ expect(stored?.limited).toBe(false);
+ expect(interaction.reply).not.toHaveBeenCalled();
+ expect(execute).not.toHaveBeenCalled();
+ expect(runtime.capture).not.toHaveBeenCalled();
+ });
+
+ test('replies when limit is exceeded', async () => {
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({
+ storage,
+ defaultLimiter: { maxRequests: 1, interval: 1000 },
+ });
+
+ const runtime = createRuntimeContext();
+ await plugin.activate(runtime.ctx as any);
+
+ const env = createEnv('ping');
+ const interaction = createInteractionStub();
+ const prepared = createPreparedCommand({
+ name: 'ping',
+ metadata: { ratelimit: true },
+ });
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ const stored = env.store.get(RATELIMIT_STORE_KEY);
+ expect(stored?.limited).toBe(true);
+ expect(stored?.retryAfter).toBeGreaterThan(0);
+ expect(interaction.reply).toHaveBeenCalledTimes(1);
+
+ const [payload] = interaction.reply.mock.calls[0];
+ expect(payload.flags).toBe(MessageFlags.Ephemeral);
+ expect(runtime.capture).toHaveBeenCalled();
+ });
+
+ test('emits ratelimited event when blocked', async () => {
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({
+ storage,
+ defaultLimiter: { maxRequests: 1, interval: 1000 },
+ });
+
+ const runtime = createRuntimeContext();
+ await plugin.activate(runtime.ctx as any);
+
+ const env = createEnv('ping');
+ const interaction = createInteractionStub();
+ const prepared = createPreparedCommand({
+ name: 'ping',
+ metadata: { ratelimit: true },
+ });
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ expect(runtime.eventsTo).toHaveBeenCalledWith('ratelimits');
+ expect(runtime.eventsEmit).toHaveBeenCalledTimes(1);
+ const [eventName, payload] = runtime.eventsEmit.mock.calls[0];
+ expect(eventName).toBe('ratelimited');
+ expect(payload.commandName).toBe('ping');
+ expect(payload.queued).toBe(false);
+ expect(payload.aggregate.limited).toBe(true);
+ });
+
+ test('uses followUp when interaction already replied', async () => {
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({
+ storage,
+ defaultLimiter: { maxRequests: 1, interval: 1000 },
+ });
+
+ const runtime = createRuntimeContext();
+ await plugin.activate(runtime.ctx as any);
+
+ const env = createEnv('ping');
+ const interaction = createInteractionStub({ replied: true });
+ const prepared = createPreparedCommand({
+ name: 'ping',
+ metadata: { ratelimit: true },
+ });
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ expect(interaction.followUp).toHaveBeenCalledTimes(1);
+ });
+
+ test('queues execution when enabled', async () => {
+ vi.useFakeTimers();
+ vi.setSystemTime(0);
+
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({
+ storage,
+ defaultLimiter: { maxRequests: 1, interval: 1000 },
+ queue: { enabled: true, timeout: '5s' },
+ });
+
+ const runtime = createRuntimeContext();
+ await plugin.activate(runtime.ctx as any);
+
+ const env = createEnv('ping');
+ const interaction = createInteractionStub();
+ const prepared = createPreparedCommand({
+ name: 'ping',
+ metadata: { ratelimit: true },
+ });
+ const execute = vi.fn(async () => undefined);
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ execute,
+ );
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ execute,
+ );
+
+ expect(interaction.deferReply).toHaveBeenCalledTimes(1);
+ expect(execute).not.toHaveBeenCalled();
+ expect(runtime.capture).toHaveBeenCalled();
+
+ await vi.advanceTimersByTimeAsync(1100);
+
+ expect(execute).toHaveBeenCalledTimes(1);
+ });
+
+ test('applies role-specific limits', async () => {
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({
+ storage,
+ defaultLimiter: { maxRequests: 2, interval: 1000 },
+ roleLimits: {
+ 'role-1': { maxRequests: 1, interval: 1000 },
+ },
+ roleLimitStrategy: 'highest',
+ });
+
+ const runtime = createRuntimeContext();
+ await plugin.activate(runtime.ctx as any);
+
+ const env = createEnv('ping');
+ const interaction = createInteractionStub({ roleIds: ['role-1'] });
+ const prepared = createPreparedCommand({
+ name: 'ping',
+ metadata: { ratelimit: true },
+ });
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ const stored = env.store.get(RATELIMIT_STORE_KEY);
+ expect(stored?.limited).toBe(true);
+ });
+
+ test('stores multi-window results', async () => {
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({
+ storage,
+ defaultLimiter: {
+ algorithm: 'fixed-window',
+ scope: 'user',
+ windows: [
+ { id: 'short', maxRequests: 2, interval: '1s' },
+ { id: 'long', maxRequests: 5, interval: '1m' },
+ ],
+ },
+ });
+
+ const runtime = createRuntimeContext();
+ await plugin.activate(runtime.ctx as any);
+
+ const env = createEnv('ping');
+ const interaction = createInteractionStub();
+ const prepared = createPreparedCommand({
+ name: 'ping',
+ metadata: { ratelimit: true },
+ });
+
+ await plugin.executeCommand(
+ runtime.ctx as any,
+ env as any,
+ interaction as any,
+ prepared as any,
+ vi.fn(async () => undefined),
+ );
+
+ const stored = env.store.get(RATELIMIT_STORE_KEY);
+ expect(stored?.results).toHaveLength(2);
+ expect(stored?.results?.map((r: any) => r.windowId)).toEqual([
+ 'short',
+ 'long',
+ ]);
+ expect(stored?.remaining).toBe(1);
+ });
+
+ test('performHMR resets matching command keys', async () => {
+ const storage = new MemoryRateLimitStorage();
+ const plugin = new RateLimitPlugin({ storage });
+
+ const commandPath = 'C:/commands/ping.ts';
+ const prepared = createPreparedCommand({
+ name: 'ping',
+ path: commandPath,
+ metadata: { ratelimit: true },
+ });
+
+ const runtime = createRuntimeContext({ commands: [prepared.command] });
+ await plugin.activate(runtime.ctx as any);
+
+ const key = 'rl:user:user-1:ping';
+ await storage.set(key, { count: 1 }, 1000);
+ await storage.set(`violation:${key}`, { count: 1 }, 1000);
+ await storage.set(`${key}:w:short`, { count: 1 }, 1000);
+
+ const event = {
+ path: commandPath,
+ accept: vi.fn(),
+ preventDefault: vi.fn(),
+ };
+
+ await plugin.performHMR(runtime.ctx as any, event as any);
+
+ expect(await storage.get(key)).toBeNull();
+ expect(await storage.get(`violation:${key}`)).toBeNull();
+ expect(await storage.get(`${key}:w:short`)).toBeNull();
+ expect(event.accept).toHaveBeenCalled();
+ expect(event.preventDefault).toHaveBeenCalled();
+ });
+});
diff --git a/packages/ratelimit/spec/setup.ts b/packages/ratelimit/spec/setup.ts
new file mode 100644
index 00000000..d360f0bc
--- /dev/null
+++ b/packages/ratelimit/spec/setup.ts
@@ -0,0 +1,12 @@
+/**
+ * Vitest setup for ratelimit specs.
+ *
+ * Restores the Console constructor so logging helpers behave consistently.
+ */
+
+import { Console } from 'node:console';
+
+const consoleAny = console as Console & { Console?: typeof Console };
+if (typeof consoleAny.Console !== 'function') {
+ consoleAny.Console = Console;
+}
diff --git a/packages/ratelimit/src/api.ts b/packages/ratelimit/src/api.ts
new file mode 100644
index 00000000..7110a4b2
--- /dev/null
+++ b/packages/ratelimit/src/api.ts
@@ -0,0 +1,348 @@
+/**
+ * Public rate limit helpers.
+ *
+ * Used by handlers and admin tools to inspect, reset, and manage exemptions.
+ */
+
+import type { CommandKitEnvironment, Context } from 'commandkit';
+import { RATELIMIT_STORE_KEY } from './constants';
+import { getRateLimitRuntime, getRateLimitStorage } from './runtime';
+import type {
+ RateLimitExemptionGrantParams,
+ RateLimitExemptionInfo,
+ RateLimitExemptionListParams,
+ RateLimitExemptionRevokeParams,
+ RateLimitScope,
+ RateLimitStorage,
+ RateLimitStoreValue,
+} from './types';
+import {
+ buildExemptionKey,
+ buildExemptionPrefix,
+ buildScopePrefix,
+ parseExemptionKey,
+} from './utils/keys';
+import { resolveDuration } from './utils/time';
+
+/**
+ * Parameters for resetting a single key or scope-derived key.
+ */
+export interface ResetRateLimitParams {
+ key?: string;
+ scope?: RateLimitScope;
+ userId?: string;
+ guildId?: string;
+ channelId?: string;
+ commandName?: string;
+ keyPrefix?: string;
+}
+
+/**
+ * Parameters for batch resets by scope, prefix, or pattern.
+ */
+export interface ResetAllRateLimitsParams {
+ scope?: RateLimitScope;
+ userId?: string;
+ guildId?: string;
+ channelId?: string;
+ commandName?: string;
+ keyPrefix?: string;
+ pattern?: string;
+ prefix?: string;
+}
+
+/**
+ * Read aggregated rate limit info stored on a CommandKit env or context.
+ *
+ * @param envOrCtx - CommandKit environment or context holding the rate-limit store.
+ * @returns Aggregated rate-limit info or null when no store is present.
+ */
+export function getRateLimitInfo(
+ envOrCtx: CommandKitEnvironment | Context | null | undefined,
+): RateLimitStoreValue | null {
+ if (!envOrCtx) return null;
+ const store = 'store' in envOrCtx ? envOrCtx.store : null;
+ if (!store) return null;
+ return (store.get(RATELIMIT_STORE_KEY) as RateLimitStoreValue) ?? null;
+}
+
+/**
+ * Resolve the active storage or throw when none is configured.
+ *
+ * @returns Configured rate-limit storage.
+ * @throws Error when storage is not configured.
+ */
+function getRequiredStorage(): RateLimitStorage {
+ return getRuntimeStorage().storage;
+}
+
+/**
+ * Resolve runtime context plus the effective storage to use.
+ *
+ * @returns Runtime context (if any) and the resolved storage.
+ * @throws Error when storage is not configured.
+ */
+function getRuntimeStorage(): {
+ runtime: ReturnType;
+ storage: RateLimitStorage;
+} {
+ const runtime = getRateLimitRuntime();
+ const storage = runtime?.storage ?? getRateLimitStorage();
+ if (!storage) {
+ throw new Error('Rate limit storage not configured');
+ }
+ return { runtime, storage };
+}
+
+/**
+ * Normalize a prefix to include the window suffix marker.
+ *
+ * @param prefix - Base key prefix.
+ * @returns Prefix guaranteed to end with `w:`.
+ */
+function toWindowPrefix(prefix: string): string {
+ return prefix.endsWith(':') ? `${prefix}w:` : `${prefix}:w:`;
+}
+
+/**
+ * Reset a single key and its violation/window variants to keep state consistent.
+ *
+ * @param params - Reset parameters for a single key or scope-derived key.
+ * @returns Resolves when deletes and reset hooks (if any) complete.
+ * @throws Error when required scope identifiers are missing.
+ */
+export async function resetRateLimit(
+ params: ResetRateLimitParams,
+): Promise {
+ const storage = getRequiredStorage();
+ const hooks = getRateLimitRuntime()?.hooks;
+
+ if (params.key) {
+ await storage.delete(params.key);
+ await storage.delete(`violation:${params.key}`);
+ await deleteWindowVariants(storage, params.key);
+ if (hooks?.onReset) {
+ await hooks.onReset(params.key);
+ }
+ return;
+ }
+
+ if (!params.scope || !params.commandName) {
+ throw new Error(
+ 'scope and commandName are required when key is not provided',
+ );
+ }
+
+ const prefix = buildScopePrefix(params.scope, params.keyPrefix, {
+ userId: params.userId,
+ guildId: params.guildId,
+ channelId: params.channelId,
+ });
+
+ if (!prefix) {
+ throw new Error('Missing identifiers for scope');
+ }
+
+ const key = `${prefix}${params.commandName}`;
+ await storage.delete(key);
+ await storage.delete(`violation:${key}`);
+ await deleteWindowVariants(storage, key);
+ if (hooks?.onReset) {
+ await hooks.onReset(key);
+ }
+}
+
+/**
+ * Reset multiple keys by scope, command name, prefix, or pattern for bulk cleanup.
+ *
+ * @param params - Batch reset parameters, defaulting to an empty config.
+ * @returns Resolves when all matching keys are deleted.
+ * @throws Error when the storage backend lacks required delete helpers.
+ * @throws Error when scope identifiers are missing for scope-based resets.
+ */
+export async function resetAllRateLimits(
+ params: ResetAllRateLimitsParams = {},
+): Promise {
+ const storage = getRequiredStorage();
+
+ if (params.pattern) {
+ if (!storage.deleteByPattern) {
+ throw new Error('Storage does not support pattern deletes');
+ }
+ await storage.deleteByPattern(params.pattern);
+ await storage.deleteByPattern(`violation:${params.pattern}`);
+ await storage.deleteByPattern(`${params.pattern}:w:*`);
+ await storage.deleteByPattern(`violation:${params.pattern}:w:*`);
+ return;
+ }
+
+ if (params.prefix) {
+ if (!storage.deleteByPrefix) {
+ throw new Error('Storage does not support prefix deletes');
+ }
+ const windowPrefix = toWindowPrefix(params.prefix);
+ await storage.deleteByPrefix(params.prefix);
+ await storage.deleteByPrefix(`violation:${params.prefix}`);
+ await storage.deleteByPrefix(windowPrefix);
+ await storage.deleteByPrefix(`violation:${windowPrefix}`);
+ return;
+ }
+
+ if (params.commandName) {
+ if (!storage.deleteByPattern) {
+ throw new Error('Storage does not support pattern deletes');
+ }
+ const prefix = params.keyPrefix ?? '';
+ const pattern = `${prefix}*:${params.commandName}`;
+ await storage.deleteByPattern(pattern);
+ await storage.deleteByPattern(`violation:${pattern}`);
+ await storage.deleteByPattern(`${pattern}:w:*`);
+ await storage.deleteByPattern(`violation:${pattern}:w:*`);
+ return;
+ }
+
+ if (!params.scope) {
+ throw new Error('scope is required when commandName is not provided');
+ }
+
+ const scopePrefix = buildScopePrefix(params.scope, params.keyPrefix, {
+ userId: params.userId,
+ guildId: params.guildId,
+ channelId: params.channelId,
+ });
+
+ if (!scopePrefix) {
+ throw new Error('Missing identifiers for scope');
+ }
+
+ if (!storage.deleteByPrefix) {
+ throw new Error('Storage does not support prefix deletes');
+ }
+
+ const windowPrefix = toWindowPrefix(scopePrefix);
+ await storage.deleteByPrefix(scopePrefix);
+ await storage.deleteByPrefix(`violation:${scopePrefix}`);
+ await storage.deleteByPrefix(windowPrefix);
+ await storage.deleteByPrefix(`violation:${windowPrefix}`);
+}
+
+/**
+ * Grant a temporary exemption for a scope/id pair.
+ *
+ * @param params - Exemption scope, id, and duration.
+ * @returns Resolves when the exemption key is written.
+ * @throws Error when duration is missing or non-positive.
+ */
+export async function grantRateLimitExemption(
+ params: RateLimitExemptionGrantParams,
+): Promise {
+ const { runtime, storage } = getRuntimeStorage();
+ const keyPrefix = params.keyPrefix ?? runtime?.keyPrefix;
+ const ttlMs = resolveDuration(params.duration, 0);
+
+ if (!ttlMs || ttlMs <= 0) {
+ throw new Error('duration must be a positive value');
+ }
+
+ const key = buildExemptionKey(params.scope, params.id, keyPrefix);
+ await storage.set(key, true, ttlMs);
+}
+
+/**
+ * Revoke a temporary exemption for a scope/id pair.
+ *
+ * @param params - Exemption scope and id to revoke.
+ * @returns Resolves when the exemption key is removed.
+ */
+export async function revokeRateLimitExemption(
+ params: RateLimitExemptionRevokeParams,
+): Promise {
+ const { runtime, storage } = getRuntimeStorage();
+ const keyPrefix = params.keyPrefix ?? runtime?.keyPrefix;
+ const key = buildExemptionKey(params.scope, params.id, keyPrefix);
+ await storage.delete(key);
+}
+
+/**
+ * List exemptions by scope and/or id for admin/reporting.
+ *
+ * @param params - Optional scope/id filters and limits.
+ * @returns Exemption info entries that match the requested filters.
+ * @throws Error when scope is required but missing or listing is unsupported.
+ */
+export async function listRateLimitExemptions(
+ params: RateLimitExemptionListParams = {},
+): Promise {
+ const { runtime, storage } = getRuntimeStorage();
+ const keyPrefix = params.keyPrefix ?? runtime?.keyPrefix;
+
+ if (params.id && !params.scope) {
+ throw new Error('scope is required when id is provided');
+ }
+
+ if (params.scope && params.id) {
+ const key = buildExemptionKey(params.scope, params.id, keyPrefix);
+ const exists = await storage.get(key);
+ if (!exists) return [];
+ const expiresInMs = storage.ttl ? await storage.ttl(key) : null;
+ return [
+ {
+ key,
+ scope: params.scope,
+ id: params.id,
+ expiresInMs,
+ },
+ ];
+ }
+
+ if (!storage.keysByPrefix) {
+ throw new Error('Storage does not support listing exemptions');
+ }
+
+ const prefix = buildExemptionPrefix(keyPrefix, params.scope);
+ const keys = await storage.keysByPrefix(prefix);
+ const results: RateLimitExemptionInfo[] = [];
+
+ for (const key of keys) {
+ const parsed = parseExemptionKey(key, keyPrefix);
+ if (!parsed) continue;
+ if (params.scope && parsed.scope !== params.scope) continue;
+
+ const expiresInMs = storage.ttl ? await storage.ttl(key) : null;
+ results.push({
+ key,
+ scope: parsed.scope,
+ id: parsed.id,
+ expiresInMs,
+ });
+
+ if (params.limit && results.length >= params.limit) {
+ break;
+ }
+ }
+
+ return results;
+}
+
+/**
+ * Delete windowed variants for a base key using available storage helpers.
+ *
+ * @param storage - Storage driver to delete from.
+ * @param key - Base key to delete window variants for.
+ * @returns Resolves after window variants are removed.
+ */
+async function deleteWindowVariants(
+ storage: RateLimitStorage,
+ key: string,
+): Promise {
+ const prefix = `${key}:w:`;
+ if (storage.deleteByPrefix) {
+ await storage.deleteByPrefix(prefix);
+ await storage.deleteByPrefix(`violation:${prefix}`);
+ return;
+ }
+ if (storage.deleteByPattern) {
+ await storage.deleteByPattern(`${key}:w:*`);
+ await storage.deleteByPattern(`violation:${key}:w:*`);
+ }
+}
diff --git a/packages/ratelimit/src/augmentation.ts b/packages/ratelimit/src/augmentation.ts
new file mode 100644
index 00000000..94cdf913
--- /dev/null
+++ b/packages/ratelimit/src/augmentation.ts
@@ -0,0 +1,13 @@
+/**
+ * CommandKit metadata augmentation.
+ *
+ * Extends CommandKit metadata so commands can declare per-command limits.
+ */
+
+import type { RateLimitCommandConfig } from './types';
+
+declare module 'commandkit' {
+ interface CommandMetadata {
+ ratelimit?: RateLimitCommandConfig | boolean;
+ }
+}
diff --git a/packages/ratelimit/src/configure.ts b/packages/ratelimit/src/configure.ts
new file mode 100644
index 00000000..09149ee1
--- /dev/null
+++ b/packages/ratelimit/src/configure.ts
@@ -0,0 +1,104 @@
+/**
+ * Runtime configuration for the rate limit plugin.
+ *
+ * Mirrors configureAI so runtime options can be set outside commandkit.config
+ * before the plugin evaluates commands.
+ */
+
+import { DEFAULT_LIMITER } from './utils/config';
+import {
+ getRateLimitRuntime,
+ setRateLimitRuntime,
+ setRateLimitStorage,
+} from './runtime';
+import type {
+ RateLimitPluginOptions,
+ RateLimitRuntimeContext,
+ RateLimitStorage,
+ RateLimitStorageConfig,
+} from './types';
+
+const rateLimitConfig: RateLimitPluginOptions = {};
+let configured = false;
+
+/**
+ * Normalize a storage config into a storage driver instance.
+ *
+ * @param config - Storage config or driver.
+ * @returns Storage driver instance or null when not configured.
+ */
+function resolveStorage(
+ config: RateLimitStorageConfig,
+): RateLimitStorage | null {
+ if (!config) return null;
+ if (typeof config === 'object' && 'driver' in config) {
+ return config.driver;
+ }
+ return config;
+}
+
+/**
+ * Apply updated config to the active runtime context.
+ *
+ * @param config - Runtime configuration updates.
+ * @returns Nothing; mutates the active runtime context when present.
+ */
+function updateRuntime(config: RateLimitPluginOptions): void {
+ const runtime = getRateLimitRuntime();
+ const storageOverride = config.storage
+ ? resolveStorage(config.storage)
+ : null;
+
+ if (storageOverride) {
+ setRateLimitStorage(storageOverride);
+ }
+
+ if (!runtime) {
+ return;
+ }
+
+ const nextRuntime: RateLimitRuntimeContext = {
+ storage: storageOverride ?? runtime.storage,
+ keyPrefix: config.keyPrefix ?? runtime.keyPrefix,
+ defaultLimiter:
+ config.defaultLimiter ?? runtime.defaultLimiter ?? DEFAULT_LIMITER,
+ limiters: config.limiters ?? runtime.limiters,
+ hooks: config.hooks ?? runtime.hooks,
+ };
+
+ setRateLimitRuntime(nextRuntime);
+}
+
+/**
+ * Returns true once configureRatelimit has been called.
+ *
+ * @returns True when runtime configuration has been initialized.
+ */
+export function isRateLimitConfigured(): boolean {
+ return configured;
+}
+
+/**
+ * Retrieves the current rate limit configuration.
+ *
+ * @returns The current in-memory rate limit config object.
+ */
+export function getRateLimitConfig(): RateLimitPluginOptions {
+ return rateLimitConfig;
+}
+
+/**
+ * Configures the rate limit plugin runtime options.
+ *
+ * Call this once during startup (for example in src/ratelimit.ts).
+ *
+ * @param config - Runtime options to merge into the active configuration.
+ * @returns Nothing; updates runtime state in place.
+ */
+export function configureRatelimit(
+ config: RateLimitPluginOptions = {},
+): void {
+ configured = true;
+ Object.assign(rateLimitConfig, config);
+ updateRuntime(config);
+}
diff --git a/packages/ratelimit/src/constants.ts b/packages/ratelimit/src/constants.ts
new file mode 100644
index 00000000..daa97c55
--- /dev/null
+++ b/packages/ratelimit/src/constants.ts
@@ -0,0 +1,19 @@
+/**
+ * Rate limit constants shared across runtime and tests.
+ *
+ * Keeps key names consistent across storage, runtime, and docs.
+ */
+
+/**
+ * Store key used to stash aggregated results in CommandKit envs.
+ *
+ * @default 'ratelimit'
+ */
+export const RATELIMIT_STORE_KEY = 'ratelimit';
+
+/**
+ * Default prefix for storage keys; can be overridden per config.
+ *
+ * @default 'rl:'
+ */
+export const DEFAULT_KEY_PREFIX = 'rl:';
diff --git a/packages/ratelimit/src/directive/use-ratelimit-directive.ts b/packages/ratelimit/src/directive/use-ratelimit-directive.ts
new file mode 100644
index 00000000..e2111f21
--- /dev/null
+++ b/packages/ratelimit/src/directive/use-ratelimit-directive.ts
@@ -0,0 +1,40 @@
+import {
+ CommonDirectiveTransformer,
+ type CommonDirectiveTransformerOptions,
+ type CompilerPluginRuntime,
+} from 'commandkit';
+
+/**
+ * Compiler plugin for the "use ratelimit" directive.
+ *
+ * @extends CommonDirectiveTransformer
+ */
+export class UseRateLimitDirectivePlugin extends CommonDirectiveTransformer {
+ public readonly name = 'UseRateLimitDirectivePlugin';
+
+ /**
+ * Create the directive compiler plugin with optional overrides.
+ *
+ * @param options - Common directive transformer overrides.
+ */
+ public constructor(options?: Partial) {
+ super({
+ enabled: true,
+ ...options,
+ directive: 'use ratelimit',
+ importPath: '@commandkit/ratelimit',
+ importName: '$ckitirl',
+ asyncOnly: true,
+ });
+ }
+
+ /**
+ * Activate the compiler plugin in the current build runtime.
+ *
+ * @param ctx - Compiler plugin runtime.
+ * @returns Resolves after activation completes.
+ */
+ public async activate(ctx: CompilerPluginRuntime): Promise {
+ await super.activate(ctx);
+ }
+}
diff --git a/packages/ratelimit/src/directive/use-ratelimit.ts b/packages/ratelimit/src/directive/use-ratelimit.ts
new file mode 100644
index 00000000..15e75f1b
--- /dev/null
+++ b/packages/ratelimit/src/directive/use-ratelimit.ts
@@ -0,0 +1,201 @@
+/**
+ * Runtime wrapper for the "use ratelimit" directive.
+ *
+ * Uses the runtime default limiter for arbitrary async functions.
+ * Throws RateLimitError when the call is limited.
+ */
+
+import { randomUUID } from 'node:crypto';
+import type { AsyncFunction, GenericFunction } from 'commandkit';
+import { RateLimitEngine } from '../engine/RateLimitEngine';
+import { RateLimitError } from '../errors';
+import type {
+ RateLimitLimiterConfig,
+ RateLimitResult,
+ RateLimitStorage,
+ RateLimitStoreValue,
+} from '../types';
+import {
+ DEFAULT_LIMITER,
+ mergeLimiterConfigs,
+ resolveLimiterConfigs,
+} from '../utils/config';
+import { getRateLimitRuntime } from '../runtime';
+import { DEFAULT_KEY_PREFIX } from '../constants';
+
+const RATELIMIT_FN_SYMBOL = Symbol('commandkit.ratelimit.directive');
+
+let cachedEngine: RateLimitEngine | null = null;
+let cachedStorage: RateLimitStorage | null = null;
+
+/**
+ * Resolve the cached engine instance for a storage backend.
+ *
+ * @param storage - Storage backend to associate with the engine.
+ * @returns Cached engine instance for the storage.
+ */
+function getEngine(storage: RateLimitStorage): RateLimitEngine {
+ /**
+ * Cache per storage instance so violation tracking stays consistent.
+ */
+ if (!cachedEngine || cachedStorage !== storage) {
+ cachedEngine = new RateLimitEngine(storage);
+ cachedStorage = storage;
+ }
+ return cachedEngine;
+}
+
+/**
+ * Apply an optional prefix to a storage key.
+ *
+ * @param prefix - Optional prefix to prepend.
+ * @param key - Base key to prefix.
+ * @returns Prefixed key.
+ */
+function withPrefix(prefix: string | undefined, key: string): string {
+ if (!prefix) return key;
+ return `${prefix}${key}`;
+}
+
+/**
+ * Append a window suffix to a key when a window id is present.
+ *
+ * @param key - Base storage key.
+ * @param windowId - Optional window identifier.
+ * @returns Key with window suffix when provided.
+ */
+function withWindowSuffix(key: string, windowId?: string): string {
+ if (!windowId) return key;
+ return `${key}:w:${windowId}`;
+}
+
+/**
+ * Merge a runtime default limiter with an override when provided.
+ *
+ * @param runtimeDefault - Runtime default limiter configuration.
+ * @param limiter - Optional override limiter.
+ * @returns Resolved limiter configuration.
+ */
+function resolveLimiter(
+ runtimeDefault: RateLimitLimiterConfig,
+ limiter?: RateLimitLimiterConfig,
+): RateLimitLimiterConfig {
+ if (!limiter) return runtimeDefault;
+ return mergeLimiterConfigs(runtimeDefault, limiter);
+}
+
+/**
+ * Wrap an async function with the runtime default limiter.
+ *
+ * Throws RateLimitError when the call exceeds limits.
+ *
+ * @template R - Argument tuple type for the wrapped async function.
+ * @template F - Async function type being wrapped.
+ * @param fn - Async function to wrap with rate limiting.
+ * @returns Wrapped async function that enforces the default limiter.
+ * @throws RateLimitError when the call exceeds limits.
+ */
+function useRateLimit>(fn: F): F {
+ if (Object.prototype.hasOwnProperty.call(fn, RATELIMIT_FN_SYMBOL)) {
+ return fn;
+ }
+
+ const fnId = randomUUID();
+
+ const wrapped = (async (...args: R) => {
+ const runtime = getRateLimitRuntime();
+ if (!runtime) {
+ throw new Error(
+ 'RateLimit runtime is not initialized. Register the RateLimitPlugin first.',
+ );
+ }
+
+ const limiterConfig = resolveLimiter(
+ mergeLimiterConfigs(DEFAULT_LIMITER, runtime.defaultLimiter),
+ );
+
+ const key = `${DEFAULT_KEY_PREFIX}fn:${fnId}`;
+ const finalKey = withPrefix(runtime.keyPrefix, key);
+
+ const engine = getEngine(runtime.storage);
+ const resolvedConfigs = resolveLimiterConfigs(limiterConfig, 'custom');
+ const results: RateLimitResult[] = [];
+ for (const resolved of resolvedConfigs) {
+ const resolvedKey = withWindowSuffix(finalKey, resolved.windowId);
+ const { result } = await engine.consume(resolvedKey, resolved);
+ results.push(result);
+ }
+
+ const aggregate = aggregateResults(results);
+ if (aggregate.limited) {
+ throw new RateLimitError(aggregate);
+ }
+
+ return fn(...args);
+ }) as F;
+
+ Object.defineProperty(wrapped, RATELIMIT_FN_SYMBOL, {
+ value: true,
+ configurable: false,
+ enumerable: false,
+ writable: false,
+ });
+
+ return wrapped;
+}
+
+/**
+ * Aggregate multiple rate-limit results into a single summary object.
+ *
+ * @param results - Individual limiter/window results.
+ * @returns Aggregated rate-limit store value.
+ */
+function aggregateResults(results: RateLimitResult[]): RateLimitStoreValue {
+ if (!results.length) {
+ return {
+ limited: false,
+ remaining: 0,
+ resetAt: 0,
+ retryAfter: 0,
+ results: [],
+ };
+ }
+
+ const limitedResults = results.filter((r) => r.limited);
+ const limited = limitedResults.length > 0;
+ const remaining = Math.min(...results.map((r) => r.remaining));
+ const resetAt = Math.max(...results.map((r) => r.resetAt));
+ const retryAfter = limited
+ ? Math.max(...limitedResults.map((r) => r.retryAfter))
+ : 0;
+
+ return {
+ limited,
+ remaining,
+ resetAt,
+ retryAfter,
+ results,
+ };
+}
+
+/**
+ * Wrapper symbol injected by the compiler plugin.
+ *
+ * @param fn - Generic function to wrap with runtime rate limiting.
+ * @returns Wrapped function that enforces the runtime default limiter.
+ */
+export const $ckitirl: GenericFunction = (fn: GenericFunction) => {
+ return useRateLimit(fn as AsyncFunction);
+};
+
+if (!('$ckitirl' in globalThis)) {
+ /**
+ * Expose the wrapper globally so directive transforms can call it.
+ */
+ Object.defineProperty(globalThis, '$ckitirl', {
+ value: $ckitirl,
+ configurable: false,
+ enumerable: false,
+ writable: false,
+ });
+}
diff --git a/packages/ratelimit/src/engine/RateLimitEngine.ts b/packages/ratelimit/src/engine/RateLimitEngine.ts
new file mode 100644
index 00000000..49b35512
--- /dev/null
+++ b/packages/ratelimit/src/engine/RateLimitEngine.ts
@@ -0,0 +1,167 @@
+/**
+ * Engine coordinator.
+ *
+ * Selects algorithms and applies violation escalation before returning results.
+ */
+
+import type {
+ RateLimitAlgorithm,
+ RateLimitAlgorithmType,
+ RateLimitResult,
+ RateLimitStorage,
+ ResolvedLimiterConfig,
+} from '../types';
+import { FixedWindowAlgorithm } from './algorithms/fixed-window';
+import { SlidingWindowLogAlgorithm } from './algorithms/sliding-window';
+import { TokenBucketAlgorithm } from './algorithms/token-bucket';
+import { LeakyBucketAlgorithm } from './algorithms/leaky-bucket';
+import { ViolationTracker } from './violations';
+
+/**
+ * Consume output including optional violation count for callers.
+ */
+export interface RateLimitConsumeOutput {
+ result: RateLimitResult;
+ violationCount?: number;
+}
+
+/**
+ * Coordinates algorithm selection and violation escalation per storage.
+ */
+export class RateLimitEngine {
+ private readonly violations: ViolationTracker;
+
+ /**
+ * Create a rate limit engine bound to a storage backend.
+ *
+ * @param storage - Storage backend for rate-limit state.
+ */
+ public constructor(private readonly storage: RateLimitStorage) {
+ this.violations = new ViolationTracker(storage);
+ }
+
+/**
+ * Create an algorithm instance for a resolved config.
+ *
+ * @param config - Resolved limiter configuration.
+ * @returns Algorithm instance for the resolved config.
+ */
+ private createAlgorithm(config: ResolvedLimiterConfig): RateLimitAlgorithm {
+ switch (config.algorithm) {
+ case 'fixed-window':
+ return new FixedWindowAlgorithm(this.storage, {
+ maxRequests: config.maxRequests,
+ intervalMs: config.intervalMs,
+ scope: config.scope,
+ });
+ case 'sliding-window':
+ return new SlidingWindowLogAlgorithm(this.storage, {
+ maxRequests: config.maxRequests,
+ intervalMs: config.intervalMs,
+ scope: config.scope,
+ });
+ case 'token-bucket':
+ return new TokenBucketAlgorithm(this.storage, {
+ capacity: config.burst,
+ refillRate: config.refillRate,
+ scope: config.scope,
+ });
+ case 'leaky-bucket':
+ return new LeakyBucketAlgorithm(this.storage, {
+ capacity: config.burst,
+ leakRate: config.leakRate,
+ scope: config.scope,
+ });
+ default:
+ /**
+ * Fall back to fixed-window so unknown algorithms still enforce a limit.
+ */
+ return new FixedWindowAlgorithm(this.storage, {
+ maxRequests: config.maxRequests,
+ intervalMs: config.intervalMs,
+ scope: config.scope,
+ });
+ }
+ }
+
+ /**
+ * Consume a single key and apply escalation rules when enabled.
+ *
+ * @param key - Storage key for the limiter.
+ * @param config - Resolved limiter configuration.
+ * @returns Result plus optional violation count.
+ */
+ public async consume(
+ key: string,
+ config: ResolvedLimiterConfig,
+ ): Promise {
+ const now = Date.now();
+ const shouldEscalate =
+ config.violations != null && config.violations.escalate !== false;
+ if (shouldEscalate) {
+ const active = await this.violations.checkCooldown(key);
+ if (active) {
+ /**
+ * When an escalation cooldown is active, skip the algorithm to enforce the cooldown.
+ */
+ const limit =
+ config.algorithm === 'token-bucket' ||
+ config.algorithm === 'leaky-bucket'
+ ? config.burst
+ : config.maxRequests;
+ const result = {
+ key,
+ scope: config.scope,
+ algorithm: config.algorithm,
+ limited: true,
+ remaining: 0,
+ resetAt: active.cooldownUntil,
+ retryAfter: Math.max(0, active.cooldownUntil - now),
+ limit,
+ windowId: config.windowId,
+ };
+ return {
+ result,
+ violationCount: active.count,
+ };
+ }
+ }
+
+ const algorithm = this.createAlgorithm(config);
+ const result = await algorithm.consume(key);
+ if (config.windowId) {
+ result.windowId = config.windowId;
+ }
+
+ if (result.limited && shouldEscalate) {
+ const state = await this.violations.recordViolation(
+ key,
+ result.retryAfter,
+ config.violations,
+ );
+
+ /**
+ * If escalation extends the cooldown, update the result so retry info stays accurate.
+ */
+ if (state.cooldownUntil > result.resetAt) {
+ result.resetAt = state.cooldownUntil;
+ result.retryAfter = Math.max(0, state.cooldownUntil - now);
+ }
+
+ return { result, violationCount: state.count };
+ }
+
+ return { result };
+ }
+
+ /**
+ * Reset a key and its associated violation state.
+ *
+ * @param key - Storage key to reset.
+ * @returns Resolves after the key and violations are cleared.
+ */
+ public async reset(key: string): Promise {
+ await this.storage.delete(key);
+ await this.violations.reset(key);
+ }
+}
diff --git a/packages/ratelimit/src/engine/algorithms/fixed-window.ts b/packages/ratelimit/src/engine/algorithms/fixed-window.ts
new file mode 100644
index 00000000..5b1cb6da
--- /dev/null
+++ b/packages/ratelimit/src/engine/algorithms/fixed-window.ts
@@ -0,0 +1,237 @@
+/**
+ * Fixed window rate limiting.
+ *
+ * Simple counters per window are fast and predictable, at the cost of allowing
+ * bursts within the window boundary. Prefer atomic storage for correctness.
+ */
+
+import type {
+ RateLimitAlgorithm,
+ RateLimitAlgorithmType,
+ RateLimitResult,
+ RateLimitStorage,
+} from '../../types';
+import { withStorageKeyLock } from '../../utils/locking';
+
+interface FixedWindowConfig {
+ maxRequests: number;
+ intervalMs: number;
+ scope: RateLimitResult['scope'];
+}
+
+interface FixedWindowState {
+ count: number;
+ resetAt: number;
+ version?: number;
+}
+
+/**
+ * Basic fixed-window counter for low-cost rate limits.
+ *
+ * @implements RateLimitAlgorithm
+ */
+export class FixedWindowAlgorithm implements RateLimitAlgorithm {
+ public readonly type: RateLimitAlgorithmType = 'fixed-window';
+
+ /**
+ * Create a fixed-window algorithm bound to a storage backend.
+ *
+ * @param storage - Storage backend for rate-limit state.
+ * @param config - Fixed-window configuration.
+ */
+ public constructor(
+ private readonly storage: RateLimitStorage,
+ private readonly config: FixedWindowConfig,
+ ) {}
+
+ /**
+ * Record one attempt and return the current window status for this key.
+ *
+ * @param key - Storage key for the limiter.
+ * @returns Rate limit result for the current window.
+ */
+ public async consume(key: string): Promise {
+ const limit = this.config.maxRequests;
+ const interval = this.config.intervalMs;
+
+ if (this.storage.consumeFixedWindow) {
+ const now = Date.now();
+ const { count, ttlMs } = await this.storage.consumeFixedWindow(
+ key,
+ limit,
+ interval,
+ now,
+ );
+ const resetAt = now + ttlMs;
+ const limited = count > limit;
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited,
+ remaining: Math.max(0, limit - count),
+ resetAt,
+ retryAfter: limited ? Math.max(0, resetAt - now) : 0,
+ limit,
+ };
+ }
+
+ if (this.storage.incr) {
+ const now = Date.now();
+ const { count, ttlMs } = await this.storage.incr(key, interval);
+ const resetAt = now + ttlMs;
+ const limited = count > limit;
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited,
+ remaining: Math.max(0, limit - count),
+ resetAt,
+ retryAfter: limited ? Math.max(0, resetAt - now) : 0,
+ limit,
+ };
+ }
+
+ /**
+ * Fallback is serialized per process to avoid same-instance races.
+ * Multi-process strictness still requires atomic storage operations.
+ */
+ return withStorageKeyLock(this.storage, key, async () => {
+ const maxRetries = 5;
+ for (let attempt = 0; attempt < maxRetries; attempt++) {
+ const attemptNow = Date.now();
+ const existingRaw = await this.storage.get(key);
+ const existing = isFixedWindowState(existingRaw) ? existingRaw : null;
+
+ if (!existing || existing.resetAt <= attemptNow) {
+ const resetAt = attemptNow + interval;
+ const state: FixedWindowState = { count: 1, resetAt, version: 1 };
+ const currentRaw = await this.storage.get(key);
+ const current = isFixedWindowState(currentRaw) ? currentRaw : null;
+ if (current && current.resetAt > attemptNow) {
+ continue;
+ }
+ await this.storage.set(key, state, interval);
+ const verifyRaw = await this.storage.get(key);
+ const verify = isFixedWindowState(verifyRaw) ? verifyRaw : null;
+ if ((verify?.version ?? 0) !== 1) {
+ continue;
+ }
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: false,
+ remaining: Math.max(0, limit - 1),
+ resetAt,
+ retryAfter: 0,
+ limit,
+ };
+ }
+
+ if (existing.count >= limit) {
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: true,
+ remaining: 0,
+ resetAt: existing.resetAt,
+ retryAfter: Math.max(0, existing.resetAt - attemptNow),
+ limit,
+ };
+ }
+
+ let nextState: FixedWindowState = {
+ count: existing.count + 1,
+ resetAt: existing.resetAt,
+ version: (existing.version ?? 0) + 1,
+ };
+
+ const currentRaw = await this.storage.get(key);
+ const current = isFixedWindowState(currentRaw) ? currentRaw : null;
+ if (
+ !current ||
+ current.resetAt !== existing.resetAt ||
+ current.count !== existing.count ||
+ (current.version ?? 0) !== (existing.version ?? 0)
+ ) {
+ continue;
+ }
+
+ let ttlMs = existing.resetAt - attemptNow;
+ if (ttlMs <= 0) {
+ nextState = {
+ count: 1,
+ resetAt: attemptNow + interval,
+ version: 1,
+ };
+ ttlMs = interval;
+ }
+
+ await this.storage.set(key, nextState, ttlMs);
+ const verifyRaw = await this.storage.get(key);
+ const verify = isFixedWindowState(verifyRaw) ? verifyRaw : null;
+ if ((verify?.version ?? 0) !== (nextState.version ?? 0)) {
+ continue;
+ }
+
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: false,
+ remaining: Math.max(0, limit - nextState.count),
+ resetAt: nextState.resetAt,
+ retryAfter: 0,
+ limit,
+ };
+ }
+
+ const now = Date.now();
+ const resetAt = now + interval;
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: true,
+ remaining: 0,
+ resetAt,
+ retryAfter: Math.max(0, resetAt - now),
+ limit,
+ };
+ });
+ }
+
+ /**
+ * Reset the stored key state for this limiter.
+ *
+ * @param key - Storage key to reset.
+ * @returns Resolves after the key is deleted.
+ */
+ public async reset(key: string): Promise {
+ await this.storage.delete(key);
+ }
+}
+
+/**
+ * Type guard for fixed-window state entries loaded from storage.
+ *
+ * @param value - Stored value to validate.
+ * @returns True when the value matches the FixedWindowState shape.
+ */
+function isFixedWindowState(value: unknown): value is FixedWindowState {
+ if (!value || typeof value !== 'object') return false;
+ const state = value as FixedWindowState;
+ const hasValidVersion =
+ state.version === undefined ||
+ (typeof state.version === 'number' && Number.isFinite(state.version));
+ return (
+ typeof state.count === 'number' &&
+ Number.isFinite(state.count) &&
+ typeof state.resetAt === 'number' &&
+ Number.isFinite(state.resetAt) &&
+ hasValidVersion
+ );
+}
diff --git a/packages/ratelimit/src/engine/algorithms/leaky-bucket.ts b/packages/ratelimit/src/engine/algorithms/leaky-bucket.ts
new file mode 100644
index 00000000..ac5d72f8
--- /dev/null
+++ b/packages/ratelimit/src/engine/algorithms/leaky-bucket.ts
@@ -0,0 +1,157 @@
+/**
+ * Leaky bucket rate limiting.
+ *
+ * Drains at a steady rate to smooth spikes in traffic.
+ * The stored level keeps limits consistent across commands.
+ */
+
+import type {
+ RateLimitAlgorithm,
+ RateLimitAlgorithmType,
+ RateLimitResult,
+ RateLimitStorage,
+} from '../../types';
+
+interface LeakyBucketConfig {
+ /** Maximum fill level before limiting. */
+ capacity: number;
+ /** Tokens drained per second during leak. */
+ leakRate: number;
+ /** Scope reported in rate-limit results. */
+ scope: RateLimitResult['scope'];
+}
+
+interface LeakyBucketState {
+ level: number;
+ lastLeak: number;
+}
+
+/**
+ * Leaky bucket algorithm for smoothing output to a steady rate.
+ *
+ * @implements RateLimitAlgorithm
+ */
+export class LeakyBucketAlgorithm implements RateLimitAlgorithm {
+ public readonly type: RateLimitAlgorithmType = 'leaky-bucket';
+
+ /**
+ * Create a leaky-bucket algorithm bound to a storage backend.
+ *
+ * @param storage - Storage backend for rate-limit state.
+ * @param config - Leaky-bucket configuration.
+ */
+ public constructor(
+ private readonly storage: RateLimitStorage,
+ private readonly config: LeakyBucketConfig,
+ ) {}
+
+ /**
+ * Record one attempt and return the current bucket status for this key.
+ *
+ * @param key - Storage key for the limiter.
+ * @returns Rate limit result for the current bucket.
+ * @throws Error when leakRate is non-positive.
+ */
+ public async consume(key: string): Promise {
+ const now = Date.now();
+ const { capacity, leakRate } = this.config;
+
+ if (leakRate <= 0) {
+ throw new Error('leakRate must be greater than 0');
+ }
+
+ const stored = await this.storage.get(key);
+ const state = isLeakyBucketState(stored)
+ ? stored
+ : ({ level: 0, lastLeak: now } satisfies LeakyBucketState);
+
+ const elapsedSeconds = Math.max(0, (now - state.lastLeak) / 1000);
+ const leaked = Math.max(0, state.level - elapsedSeconds * leakRate);
+
+ const nextState: LeakyBucketState = {
+ level: leaked,
+ lastLeak: now,
+ };
+
+ if (leaked + 1 > capacity) {
+ const overflow = leaked + 1 - capacity;
+ const retryAfter = Math.ceil((overflow / leakRate) * 1000);
+ const resetAt = now + retryAfter;
+ await this.storage.set(
+ key,
+ nextState,
+ estimateLeakyTtl(capacity, leakRate),
+ );
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: true,
+ remaining: 0,
+ resetAt,
+ retryAfter,
+ limit: capacity,
+ };
+ }
+
+ nextState.level = leaked + 1;
+ await this.storage.set(
+ key,
+ nextState,
+ estimateLeakyTtl(capacity, leakRate),
+ );
+
+ const remaining = Math.floor(Math.max(0, capacity - nextState.level));
+ const resetAt = now + Math.ceil((nextState.level / leakRate) * 1000);
+
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: false,
+ remaining,
+ resetAt,
+ retryAfter: 0,
+ limit: capacity,
+ };
+ }
+
+ /**
+ * Reset the stored key state for this limiter.
+ *
+ * @param key - Storage key to reset.
+ * @returns Resolves after the key is deleted.
+ */
+ public async reset(key: string): Promise {
+ await this.storage.delete(key);
+ }
+}
+
+/**
+ * Type guard for leaky-bucket state entries loaded from storage.
+ *
+ * @param value - Stored value to validate.
+ * @returns True when the value matches the LeakyBucketState shape.
+ */
+function isLeakyBucketState(value: unknown): value is LeakyBucketState {
+ if (!value || typeof value !== 'object') return false;
+ const state = value as LeakyBucketState;
+ return (
+ typeof state.level === 'number' &&
+ Number.isFinite(state.level) &&
+ typeof state.lastLeak === 'number' &&
+ Number.isFinite(state.lastLeak)
+ );
+}
+
+/**
+ * Estimate a TTL window large enough to cover full bucket drainage.
+ *
+ * @param capacity - Bucket capacity.
+ * @param leakRate - Tokens drained per second.
+ * @returns TTL in milliseconds.
+ */
+function estimateLeakyTtl(capacity: number, leakRate: number): number {
+ if (leakRate <= 0) return 60_000;
+ return Math.ceil((capacity / leakRate) * 1000 * 2);
+}
diff --git a/packages/ratelimit/src/engine/algorithms/sliding-window.ts b/packages/ratelimit/src/engine/algorithms/sliding-window.ts
new file mode 100644
index 00000000..7fda0ff9
--- /dev/null
+++ b/packages/ratelimit/src/engine/algorithms/sliding-window.ts
@@ -0,0 +1,162 @@
+/**
+ * Sliding window log rate limiting.
+ *
+ * Tracks individual request timestamps for smoother limits and accurate retry
+ * timing. Requires sorted-set support or an atomic storage helper.
+ */
+
+import type {
+ RateLimitAlgorithm,
+ RateLimitAlgorithmType,
+ RateLimitResult,
+ RateLimitStorage,
+} from '../../types';
+import { withStorageKeyLock } from '../../utils/locking';
+
+interface SlidingWindowConfig {
+ maxRequests: number;
+ intervalMs: number;
+ scope: RateLimitResult['scope'];
+}
+
+/**
+ * Sliding-window log algorithm for smoother limits.
+ *
+ * @implements RateLimitAlgorithm
+ */
+export class SlidingWindowLogAlgorithm implements RateLimitAlgorithm {
+ public readonly type: RateLimitAlgorithmType = 'sliding-window';
+
+ /**
+ * Create a sliding-window algorithm bound to a storage backend.
+ *
+ * @param storage - Storage backend for rate-limit state.
+ * @param config - Sliding-window configuration.
+ */
+ public constructor(
+ private readonly storage: RateLimitStorage,
+ private readonly config: SlidingWindowConfig,
+ ) {}
+
+ /**
+ * Record one attempt and return the current window status for this key.
+ *
+ * @param key - Storage key for the limiter.
+ * @returns Rate limit result for the current window.
+ * @throws Error when the storage backend lacks sorted-set support.
+ */
+ public async consume(key: string): Promise {
+ const limit = this.config.maxRequests;
+ const windowMs = this.config.intervalMs;
+
+ if (this.storage.consumeSlidingWindowLog) {
+ const now = Date.now();
+ /**
+ * Include the timestamp so reset time can be derived without extra reads.
+ */
+ const member = `${now}-${Math.random().toString(36).slice(2, 8)}`;
+ const res = await this.storage.consumeSlidingWindowLog(
+ key,
+ limit,
+ windowMs,
+ now,
+ member,
+ );
+ const limited = !res.allowed;
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited,
+ remaining: Math.max(0, limit - res.count),
+ resetAt: res.resetAt,
+ retryAfter: limited ? Math.max(0, res.resetAt - now) : 0,
+ limit,
+ };
+ }
+
+ if (
+ !this.storage.zRemRangeByScore ||
+ !this.storage.zCard ||
+ !this.storage.zAdd
+ ) {
+ throw new Error('Sliding window requires sorted set support in storage');
+ }
+
+ return withStorageKeyLock(this.storage, key, async () => {
+ const now = Date.now();
+ /**
+ * Include the timestamp so reset time can be derived without extra reads.
+ */
+ const member = `${now}-${Math.random().toString(36).slice(2, 8)}`;
+ /**
+ * Fallback is serialized per process; multi-process strictness needs atomic storage.
+ */
+ await this.storage.zRemRangeByScore!(key, 0, now - windowMs);
+ const count = await this.storage.zCard!(key);
+
+ if (count >= limit) {
+ const oldestMembers = this.storage.zRangeByScore
+ ? await this.storage.zRangeByScore(
+ key,
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY,
+ )
+ : [];
+ const oldestMember = oldestMembers[0];
+ const oldestTs = oldestMember
+ ? Number(oldestMember.split('-')[0])
+ : now;
+ const resetAt = oldestTs + windowMs;
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: true,
+ remaining: 0,
+ resetAt,
+ retryAfter: Math.max(0, resetAt - now),
+ limit,
+ };
+ }
+
+ await this.storage.zAdd!(key, now, member);
+ if (this.storage.expire) {
+ await this.storage.expire(key, windowMs);
+ }
+
+ const newCount = count + 1;
+ const oldestMembers = this.storage.zRangeByScore
+ ? await this.storage.zRangeByScore(
+ key,
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY,
+ )
+ : [];
+ const oldestMember = oldestMembers[0];
+ const oldestTs = oldestMember ? Number(oldestMember.split('-')[0]) : now;
+ const resetAt = oldestTs + windowMs;
+
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: false,
+ remaining: Math.max(0, limit - newCount),
+ resetAt,
+ retryAfter: 0,
+ limit,
+ };
+ });
+ }
+
+ /**
+ * Reset the stored key state for this limiter.
+ *
+ * @param key - Storage key to reset.
+ * @returns Resolves after the key is deleted.
+ */
+ public async reset(key: string): Promise {
+ await this.storage.delete(key);
+ }
+}
diff --git a/packages/ratelimit/src/engine/algorithms/token-bucket.ts b/packages/ratelimit/src/engine/algorithms/token-bucket.ts
new file mode 100644
index 00000000..e6feadb6
--- /dev/null
+++ b/packages/ratelimit/src/engine/algorithms/token-bucket.ts
@@ -0,0 +1,159 @@
+/**
+ * Token bucket rate limiting.
+ *
+ * Allows short bursts while refilling steadily up to a cap.
+ * Bucket state is stored so limits stay consistent across commands.
+ */
+
+import type {
+ RateLimitAlgorithm,
+ RateLimitAlgorithmType,
+ RateLimitResult,
+ RateLimitStorage,
+} from '../../types';
+
+export interface TokenBucketConfig {
+ /** Maximum tokens available when the bucket is full. */
+ capacity: number;
+ /** Tokens added per second during refill. */
+ refillRate: number;
+ /** Scope reported in rate-limit results. */
+ scope: RateLimitResult['scope'];
+}
+
+interface TokenBucketState {
+ tokens: number;
+ lastRefill: number;
+}
+
+/**
+ * Token bucket algorithm for bursty traffic with steady refill.
+ *
+ * @implements RateLimitAlgorithm
+ */
+export class TokenBucketAlgorithm implements RateLimitAlgorithm {
+ public readonly type: RateLimitAlgorithmType = 'token-bucket';
+
+ /**
+ * Create a token-bucket algorithm bound to a storage backend.
+ *
+ * @param storage - Storage backend for rate-limit state.
+ * @param config - Token-bucket configuration.
+ */
+ public constructor(
+ private readonly storage: RateLimitStorage,
+ private readonly config: TokenBucketConfig,
+ ) {}
+
+ /**
+ * Record one attempt and return the current bucket status for this key.
+ *
+ * @param key - Storage key for the limiter.
+ * @returns Rate limit result for the current bucket.
+ * @throws Error when refillRate is non-positive.
+ */
+ public async consume(key: string): Promise {
+ const now = Date.now();
+ const { capacity, refillRate } = this.config;
+
+ if (refillRate <= 0) {
+ throw new Error('refillRate must be greater than 0');
+ }
+
+ const stored = await this.storage.get(key);
+ const state = isTokenBucketState(stored)
+ ? stored
+ : ({ tokens: capacity, lastRefill: now } satisfies TokenBucketState);
+
+ const elapsedSeconds = Math.max(0, (now - state.lastRefill) / 1000);
+ const refilled = Math.min(
+ capacity,
+ state.tokens + elapsedSeconds * refillRate,
+ );
+ const nextState: TokenBucketState = {
+ tokens: refilled,
+ lastRefill: now,
+ };
+
+ if (refilled < 1) {
+ const retryAfter = Math.ceil(((1 - refilled) / refillRate) * 1000);
+ const resetAt = now + retryAfter;
+ await this.storage.set(
+ key,
+ nextState,
+ estimateBucketTtl(capacity, refillRate),
+ );
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: true,
+ remaining: 0,
+ resetAt,
+ retryAfter,
+ limit: capacity,
+ };
+ }
+
+ nextState.tokens = refilled - 1;
+ await this.storage.set(
+ key,
+ nextState,
+ estimateBucketTtl(capacity, refillRate),
+ );
+
+ const remaining = Math.floor(nextState.tokens);
+ const resetAt =
+ now + Math.ceil(((capacity - nextState.tokens) / refillRate) * 1000);
+
+ return {
+ key,
+ scope: this.config.scope,
+ algorithm: this.type,
+ limited: false,
+ remaining,
+ resetAt,
+ retryAfter: 0,
+ limit: capacity,
+ };
+ }
+
+ /**
+ * Reset the stored key state for this limiter.
+ *
+ * @param key - Storage key to reset.
+ * @returns Resolves after the key is deleted.
+ */
+ public async reset(key: string): Promise {
+ await this.storage.delete(key);
+ }
+}
+
+/**
+ * Type guard for token-bucket state entries loaded from storage.
+ *
+ * @param value - Stored value to validate.
+ * @returns True when the value matches the TokenBucketState shape.
+ */
+function isTokenBucketState(value: unknown): value is TokenBucketState {
+ if (!value || typeof value !== 'object') return false;
+ const state = value as TokenBucketState;
+ return (
+ typeof state.tokens === 'number' &&
+ Number.isFinite(state.tokens) &&
+ typeof state.lastRefill === 'number' &&
+ Number.isFinite(state.lastRefill)
+ );
+}
+
+/**
+ * Estimate a TTL window large enough to cover full bucket refills.
+ *
+ * @param capacity - Bucket capacity.
+ * @param refillRate - Tokens refilled per second.
+ * @returns TTL in milliseconds.
+ */
+function estimateBucketTtl(capacity: number, refillRate: number): number {
+ if (refillRate <= 0) return 60_000;
+ return Math.ceil((capacity / refillRate) * 1000 * 2);
+}
diff --git a/packages/ratelimit/src/engine/violations.ts b/packages/ratelimit/src/engine/violations.ts
new file mode 100644
index 00000000..3d635251
--- /dev/null
+++ b/packages/ratelimit/src/engine/violations.ts
@@ -0,0 +1,125 @@
+/**
+ * Violation tracking.
+ *
+ * Persists repeat violations so cooldowns can escalate predictably.
+ */
+
+import type { RateLimitStorage, ViolationOptions } from '../types';
+import { resolveDuration } from '../utils/time';
+
+interface ViolationState {
+ count: number;
+ cooldownUntil: number;
+ lastViolationAt: number;
+}
+
+const DEFAULT_MAX_VIOLATIONS = 5;
+const DEFAULT_ESCALATION_MULTIPLIER = 2;
+const DEFAULT_RESET_AFTER_MS = 60 * 60 * 1000;
+
+/**
+ * Tracks repeated violations and computes escalating cooldowns.
+ */
+export class ViolationTracker {
+ /**
+ * Create a violation tracker bound to a storage backend.
+ *
+ * @param storage - Storage backend for violation state.
+ */
+ public constructor(private readonly storage: RateLimitStorage) {}
+
+ private key(key: string): string {
+ return `violation:${key}`;
+ }
+
+/**
+ * Read stored violation state for a key, if present.
+ *
+ * @param key - Storage key for the limiter.
+ * @returns Stored violation state or null when none is present.
+ */
+ async getState(key: string): Promise {
+ const stored = await this.storage.get(this.key(key));
+ return isViolationState(stored) ? stored : null;
+ }
+
+ /**
+ * Check if a cooldown is currently active for this key.
+ *
+ * @param key - Storage key for the limiter.
+ * @returns Violation state when cooldown is active, otherwise null.
+ */
+ async checkCooldown(key: string): Promise {
+ const state = await this.getState(key);
+ if (!state) return null;
+ if (state.cooldownUntil > Date.now()) return state;
+ return null;
+ }
+
+ /**
+ * Record a violation and return the updated state for callers.
+ *
+ * @param key - Storage key for the limiter.
+ * @param baseRetryAfterMs - Base retry delay in milliseconds.
+ * @param options - Optional escalation settings.
+ * @returns Updated violation state.
+ */
+ async recordViolation(
+ key: string,
+ baseRetryAfterMs: number,
+ options?: ViolationOptions,
+ ): Promise {
+ const now = Date.now();
+ const prev = await this.getState(key);
+ const maxViolations = options?.maxViolations ?? DEFAULT_MAX_VIOLATIONS;
+ const multiplier =
+ options?.escalationMultiplier ?? DEFAULT_ESCALATION_MULTIPLIER;
+ const resetAfter = resolveDuration(
+ options?.resetAfter,
+ DEFAULT_RESET_AFTER_MS,
+ );
+
+ const count = Math.min((prev?.count ?? 0) + 1, maxViolations);
+ const base = Math.max(0, baseRetryAfterMs);
+ const cooldownMs = base * Math.pow(multiplier, Math.max(0, count - 1));
+ const cooldownUntil = now + cooldownMs;
+
+ const state: ViolationState = {
+ count,
+ cooldownUntil,
+ lastViolationAt: now,
+ };
+
+ await this.storage.set(this.key(key), state, resetAfter);
+ return state;
+ }
+
+ /**
+ * Clear stored violation state for a key.
+ *
+ * @param key - Storage key to reset.
+ * @returns Resolves after the violation entry is deleted.
+ */
+ async reset(key: string): Promise {
+ await this.storage.delete(this.key(key));
+ }
+}
+
+/**
+ * Type guard for violation state entries loaded from storage.
+ *
+ * @param value - Stored value to validate.
+ * @returns True when the value matches the ViolationState shape.
+ */
+function isViolationState(value: unknown): value is ViolationState {
+ if (!value || typeof value !== 'object') return false;
+ const state = value as ViolationState;
+ return (
+ typeof state.count === 'number' &&
+ Number.isFinite(state.count) &&
+ typeof state.cooldownUntil === 'number' &&
+ Number.isFinite(state.cooldownUntil) &&
+ typeof state.lastViolationAt === 'number' &&
+ Number.isFinite(state.lastViolationAt)
+ );
+}
diff --git a/packages/ratelimit/src/errors.ts b/packages/ratelimit/src/errors.ts
new file mode 100644
index 00000000..aed4b805
--- /dev/null
+++ b/packages/ratelimit/src/errors.ts
@@ -0,0 +1,28 @@
+/**
+ * Rate limit error type.
+ *
+ * Lets callers distinguish rate-limit failures from other errors.
+ */
+
+import type { RateLimitStoreValue } from './types';
+
+/**
+ * Error thrown by the directive wrapper when a function is rate-limited.
+ *
+ * @extends Error
+ */
+export class RateLimitError extends Error {
+ public readonly result: RateLimitStoreValue;
+
+ /**
+ * Create a rate-limit error with the stored result payload.
+ *
+ * @param result - Aggregated rate-limit result.
+ * @param message - Optional error message override.
+ */
+ public constructor(result: RateLimitStoreValue, message?: string) {
+ super(message ?? 'Rate limit exceeded');
+ this.name = 'RateLimitError';
+ this.result = result;
+ }
+}
diff --git a/packages/ratelimit/src/index.ts b/packages/ratelimit/src/index.ts
new file mode 100644
index 00000000..e014d00e
--- /dev/null
+++ b/packages/ratelimit/src/index.ts
@@ -0,0 +1,42 @@
+import './augmentation';
+import { RateLimitPlugin } from './plugin';
+import { UseRateLimitDirectivePlugin } from './directive/use-ratelimit-directive';
+import type { CommandKitPlugin } from 'commandkit';
+import { getRateLimitConfig } from './configure';
+
+/**
+ * Create compiler + runtime plugins for rate limiting.
+ *
+ * Runtime options are provided via configureRatelimit().
+ *
+ * @param options - Optional compiler plugin configuration.
+ * @returns Ordered array of compiler and runtime plugins.
+ */
+export function ratelimit(
+ options?: Partial<{
+ compiler: import('commandkit').CommonDirectiveTransformerOptions;
+ }>,
+): CommandKitPlugin[] {
+ const compiler = new UseRateLimitDirectivePlugin(options?.compiler);
+ const runtime = new RateLimitPlugin(getRateLimitConfig());
+ return [compiler, runtime];
+}
+
+export * from './types';
+export * from './constants';
+export * from './runtime';
+export * from './configure';
+export * from './errors';
+export * from './api';
+export * from './plugin';
+export * from './directive/use-ratelimit';
+export * from './directive/use-ratelimit-directive';
+export * from './engine/RateLimitEngine';
+export * from './engine/algorithms/fixed-window';
+export * from './engine/algorithms/sliding-window';
+export * from './engine/algorithms/token-bucket';
+export * from './engine/algorithms/leaky-bucket';
+export * from './engine/violations';
+export * from './storage/memory';
+export * from './storage/redis';
+export * from './storage/fallback';
diff --git a/packages/ratelimit/src/plugin.ts b/packages/ratelimit/src/plugin.ts
new file mode 100644
index 00000000..cd76dec0
--- /dev/null
+++ b/packages/ratelimit/src/plugin.ts
@@ -0,0 +1,1036 @@
+import { Logger, RuntimePlugin, defer } from 'commandkit';
+import type {
+ CommandKitEnvironment,
+ CommandKitPluginRuntime,
+ CommandKitHMREvent,
+ PreparedAppCommandExecution,
+} from 'commandkit';
+import { createAsyncQueue, type AsyncQueue } from 'commandkit/async-queue';
+import { EmbedBuilder, MessageFlags } from 'discord.js';
+import type { Interaction, Message } from 'discord.js';
+import { RateLimitEngine } from './engine/RateLimitEngine';
+import type {
+ RateLimitCommandConfig,
+ RateLimitLimiterConfig,
+ RateLimitPluginOptions,
+ RateLimitResult,
+ RateLimitScope,
+ RateLimitStorage,
+ RateLimitStorageConfig,
+ RateLimitQueueOptions,
+ RateLimitRoleLimitStrategy,
+ RateLimitStoreValue,
+} from './types';
+import {
+ DEFAULT_LIMITER,
+ mergeLimiterConfigs,
+ resolveLimiterConfigs,
+} from './utils/config';
+import {
+ getRoleIds,
+ resolveExemptionKeys,
+ resolveScopeKeys,
+} from './utils/keys';
+import type { ResolvedScopeKey } from './utils/keys';
+import { RATELIMIT_STORE_KEY } from './constants';
+import { MemoryRateLimitStorage } from './storage/memory';
+import {
+ getRateLimitStorage,
+ setRateLimitRuntime,
+ setRateLimitStorage,
+} from './runtime';
+import { isRateLimitConfigured } from './configure';
+import { clampAtLeast, resolveDuration } from './utils/time';
+
+const ANALYTICS_EVENTS = {
+ HIT: 'ratelimit_hit',
+ ALLOWED: 'ratelimit_allowed',
+ RESET: 'ratelimit_reset',
+ VIOLATION: 'ratelimit_violation',
+} as const;
+
+type RateLimitEventPayload = {
+ key: string;
+ result: RateLimitResult;
+ source: Interaction | Message;
+ aggregate: RateLimitStoreValue;
+ commandName: string;
+ queued: boolean;
+};
+
+/**
+ * Runtime plugin that enforces rate limits for CommandKit commands so handlers stay lean.
+ *
+ * @extends RuntimePlugin
+ */
+export class RateLimitPlugin extends RuntimePlugin {
+ public readonly name = 'RateLimitPlugin';
+ private readonly engines = new WeakMap();
+ private readonly memoryStorage = new MemoryRateLimitStorage();
+ private readonly queues = new Map();
+ private hasLoggedMissingStorage = false;
+
+ public constructor(options: RateLimitPluginOptions) {
+ super(options);
+ this.preload.add('ratelimit.js');
+ }
+
+ /**
+ * Initialize runtime storage and defaults for this plugin instance.
+ *
+ * @param ctx - CommandKit runtime for the active application.
+ * @returns Resolves when runtime storage has been initialized.
+ * @throws Error when the plugin has not been configured.
+ */
+ public async activate(ctx: CommandKitPluginRuntime): Promise {
+ if (!isRateLimitConfigured()) {
+ throw new Error(
+ 'RateLimit is not configured. Call configureRatelimit() during startup (for example in src/ratelimit.ts).',
+ );
+ }
+
+ const runtimeStorage = this.resolveDefaultStorage();
+
+ if (!runtimeStorage) {
+ this.logMissingStorage();
+ setRateLimitRuntime(null);
+ return;
+ }
+
+ if (!getRateLimitStorage()) {
+ setRateLimitStorage(runtimeStorage);
+ }
+
+ setRateLimitRuntime({
+ storage: runtimeStorage,
+ keyPrefix: this.options.keyPrefix,
+ defaultLimiter: this.options.defaultLimiter ?? DEFAULT_LIMITER,
+ limiters: this.options.limiters,
+ hooks: this.options.hooks,
+ });
+ }
+
+ /**
+ * Dispose queues and clear shared runtime state.
+ *
+ * @returns Resolves after queues are aborted and runtime state is cleared.
+ */
+ public async deactivate(): Promise {
+ for (const queue of this.queues.values()) {
+ queue.abort();
+ }
+ this.queues.clear();
+ setRateLimitRuntime(null);
+ }
+
+ /**
+ * Evaluate rate limits and optionally queue execution to avoid dropping commands.
+ *
+ * @param ctx - CommandKit runtime for the active application.
+ * @param env - Command execution environment.
+ * @param source - Interaction or message triggering the command.
+ * @param prepared - Prepared command execution data.
+ * @param execute - Callback that executes the command handler.
+ * @returns True when execution is deferred or handled, otherwise false to continue.
+ */
+ public async executeCommand(
+ ctx: CommandKitPluginRuntime,
+ env: CommandKitEnvironment,
+ source: Interaction | Message,
+ prepared: PreparedAppCommandExecution,
+ execute: () => Promise,
+ ): Promise {
+ const metadata = prepared.command.metadata as {
+ ratelimit?: RateLimitCommandConfig | boolean;
+ };
+
+ const rateLimitSetting = metadata?.ratelimit;
+ if (rateLimitSetting == null || rateLimitSetting === false) {
+ return false;
+ }
+
+ if (!env.context) {
+ return false;
+ }
+
+ if (await this.shouldBypass(source)) {
+ return false;
+ }
+
+ const commandConfig =
+ typeof rateLimitSetting === 'object' ? rateLimitSetting : {};
+
+ const { limiter: limiterName, ...commandOverrides } = commandConfig;
+ const namedLimiter = limiterName
+ ? this.options.limiters?.[limiterName]
+ : undefined;
+
+ const mergedLimiter = mergeLimiterConfigs(
+ DEFAULT_LIMITER,
+ this.options.defaultLimiter,
+ namedLimiter,
+ commandOverrides,
+ );
+
+ const roleLimits = mergeRoleLimits(
+ this.options.roleLimits,
+ this.options.defaultLimiter?.roleLimits,
+ namedLimiter?.roleLimits,
+ commandOverrides.roleLimits,
+ );
+ const roleStrategy =
+ commandOverrides.roleLimitStrategy ??
+ namedLimiter?.roleLimitStrategy ??
+ this.options.defaultLimiter?.roleLimitStrategy ??
+ this.options.roleLimitStrategy;
+ const roleOverride = resolveRoleLimit(roleLimits, roleStrategy, source);
+
+ const effectiveLimiter = roleOverride
+ ? mergeLimiterConfigs(mergedLimiter, roleOverride)
+ : mergedLimiter;
+
+ const queueConfig = resolveQueueOptions(
+ this.options.queue,
+ this.options.defaultLimiter?.queue,
+ namedLimiter?.queue,
+ commandOverrides.queue,
+ roleOverride?.queue,
+ );
+
+ const scopes = normalizeScopes(effectiveLimiter.scope);
+ const keyResolver =
+ effectiveLimiter.keyResolver ?? this.options.keyResolver;
+ const keyPrefix = effectiveLimiter.keyPrefix ?? this.options.keyPrefix;
+ const storage =
+ this.resolveStorage(effectiveLimiter.storage) ??
+ this.resolveDefaultStorage();
+
+ if (!storage) {
+ this.logMissingStorage();
+ env.store.set(RATELIMIT_STORE_KEY, createEmptyStoreValue());
+ return false;
+ }
+
+ const engine = this.getEngine(storage);
+
+ const resolvedKeys = resolveScopeKeys({
+ ctx: env.context,
+ source,
+ command: prepared.command,
+ scopes,
+ keyPrefix,
+ keyResolver,
+ });
+
+ if (!resolvedKeys.length) {
+ env.store.set(RATELIMIT_STORE_KEY, createEmptyStoreValue());
+ return false;
+ }
+
+ const results: RateLimitResult[] = [];
+ let violationCount: number | undefined;
+
+ for (const resolved of resolvedKeys) {
+ const resolvedConfigs = resolveLimiterConfigs(
+ effectiveLimiter,
+ resolved.scope,
+ );
+
+ for (const resolvedConfig of resolvedConfigs) {
+ const resolvedKey = withWindowSuffix(
+ resolved.key,
+ resolvedConfig.windowId,
+ );
+
+ let output: Awaited>;
+ try {
+ output = await engine.consume(resolvedKey, resolvedConfig);
+ } catch (error) {
+ if (this.options.hooks?.onStorageError) {
+ await this.options.hooks.onStorageError(error, false);
+ }
+ Logger.error`[ratelimit] Storage error during consume: ${error}`;
+ env.store.set(RATELIMIT_STORE_KEY, createEmptyStoreValue());
+ return false;
+ }
+
+ const { result, violationCount: count } = output;
+ results.push(result);
+ if (typeof count === 'number') {
+ violationCount =
+ violationCount == null ? count : Math.max(violationCount, count);
+ }
+
+ if (result.limited) {
+ defer(() =>
+ ctx.commandkit.analytics.track({
+ name: ANALYTICS_EVENTS.HIT,
+ id: prepared.command.command.name,
+ data: {
+ key: result.key,
+ scope: result.scope,
+ algorithm: result.algorithm,
+ resetAt: result.resetAt,
+ remaining: result.remaining,
+ },
+ }),
+ );
+
+ if (violationCount != null) {
+ defer(() =>
+ ctx.commandkit.analytics.track({
+ name: ANALYTICS_EVENTS.VIOLATION,
+ id: prepared.command.command.name,
+ data: {
+ key: result.key,
+ count: violationCount,
+ },
+ }),
+ );
+ }
+ } else {
+ defer(() =>
+ ctx.commandkit.analytics.track({
+ name: ANALYTICS_EVENTS.ALLOWED,
+ id: prepared.command.command.name,
+ data: {
+ key: result.key,
+ scope: result.scope,
+ algorithm: result.algorithm,
+ remaining: result.remaining,
+ },
+ }),
+ );
+ }
+ }
+ }
+
+ /**
+ * Aggregate across all scopes/windows so callers see a single response.
+ */
+ const aggregate = aggregateResults(results);
+ env.store.set(RATELIMIT_STORE_KEY, aggregate);
+
+ if (aggregate.limited) {
+ const firstLimited = results.find((r) => r.limited) ?? results[0];
+ if (!firstLimited) {
+ return false;
+ }
+
+ if (
+ queueConfig.enabled &&
+ (await this.enqueueExecution({
+ queueKey: selectQueueKey(results),
+ queue: queueConfig,
+ initialDelayMs: aggregate.retryAfter,
+ source,
+ execute,
+ engine,
+ resolvedKeys,
+ limiter: effectiveLimiter,
+ }))
+ ) {
+ Logger.info(
+ `[ratelimit] Queued command /${prepared.command.command.name} for retry in ${Math.ceil(aggregate.retryAfter / 1000)}s`,
+ );
+ ctx.capture();
+ if (this.options.hooks?.onRateLimited) {
+ await this.options.hooks.onRateLimited({
+ key: firstLimited.key,
+ result: firstLimited,
+ source,
+ });
+ }
+
+ if (violationCount != null && this.options.hooks?.onViolation) {
+ await this.options.hooks.onViolation(
+ firstLimited.key,
+ violationCount,
+ );
+ }
+
+ this.emitRateLimited(ctx, {
+ key: firstLimited.key,
+ result: firstLimited,
+ source,
+ aggregate,
+ commandName: prepared.command.command.name,
+ queued: true,
+ });
+
+ return false;
+ }
+
+ Logger.warn(
+ `[ratelimit] User hit rate limit on /${prepared.command.command.name} - retry in ${Math.ceil(aggregate.retryAfter / 1000)}s`,
+ );
+
+ await this.respondRateLimited(env, source, aggregate);
+
+ if (this.options.hooks?.onRateLimited) {
+ await this.options.hooks.onRateLimited({
+ key: firstLimited.key,
+ result: firstLimited,
+ source,
+ });
+ }
+
+ if (violationCount != null && this.options.hooks?.onViolation) {
+ await this.options.hooks.onViolation(firstLimited.key, violationCount);
+ }
+
+ ctx.capture();
+
+ this.emitRateLimited(ctx, {
+ key: firstLimited.key,
+ result: firstLimited,
+ source,
+ aggregate,
+ commandName: prepared.command.command.name,
+ queued: false,
+ });
+ } else if (this.options.hooks?.onAllowed) {
+ const first = results[0];
+ if (first) {
+ await this.options.hooks.onAllowed({
+ key: first.key,
+ result: first,
+ source,
+ });
+ }
+ }
+
+ return false;
+ }
+
+ /**
+ * Clear matching keys when a command is hot-reloaded to avoid stale state.
+ *
+ * @param ctx - CommandKit runtime for the active application.
+ * @param event - HMR event describing the changed file.
+ * @returns Resolves after matching keys are cleared and the event is handled.
+ */
+ public async performHMR(
+ ctx: CommandKitPluginRuntime,
+ event: CommandKitHMREvent,
+ ): Promise {
+ if (!event.path) return;
+
+ const normalized = normalizePath(event.path);
+ const commands = ctx.commandkit.commandHandler.getCommandsArray();
+ const matched = commands.filter((cmd) =>
+ cmd.command.path ? normalizePath(cmd.command.path) === normalized : false,
+ );
+
+ if (!matched.length) return;
+
+ const storage = this.resolveDefaultStorage();
+
+ if (!storage) {
+ this.logMissingStorage();
+ return;
+ }
+
+ for (const cmd of matched) {
+ await resetByCommand(storage, this.options.keyPrefix, cmd.command.name);
+ }
+
+ event.accept();
+ event.preventDefault();
+ }
+
+ /**
+ * Resolve a cached engine instance for a storage backend.
+ *
+ * @param storage - Storage backend to associate with the engine.
+ * @returns Cached engine instance for the storage.
+ */
+ private getEngine(storage: RateLimitStorage): RateLimitEngine {
+ const existing = this.engines.get(storage);
+ if (existing) return existing;
+ const engine = new RateLimitEngine(storage);
+ this.engines.set(storage, engine);
+ return engine;
+ }
+
+ /**
+ * Normalize a storage config into a storage driver instance.
+ *
+ * @param config - Storage config or driver.
+ * @returns Storage driver instance or null when not configured.
+ */
+ private resolveStorage(
+ config?: RateLimitStorageConfig,
+ ): RateLimitStorage | null {
+ if (!config) return null;
+ if ('driver' in config) return config.driver;
+ return config;
+ }
+
+ /**
+ * Resolve the default storage, falling back to memory when enabled.
+ *
+ * @returns Resolved storage instance or null when disabled.
+ */
+ private resolveDefaultStorage(): RateLimitStorage | null {
+ const resolved =
+ this.resolveStorage(this.options.storage) ?? getRateLimitStorage();
+
+ if (resolved) return resolved;
+ if (
+ this.options.initializeDefaultStorage === false ||
+ this.options.initializeDefaultDriver === false
+ ) {
+ return null;
+ }
+ return this.memoryStorage;
+ }
+
+ /**
+ * Log a one-time error when storage is missing.
+ *
+ * @returns Nothing; logs at most once per process.
+ */
+ private logMissingStorage(): void {
+ if (this.hasLoggedMissingStorage) return;
+ this.hasLoggedMissingStorage = true;
+ Logger.error(
+ '[ratelimit] No storage configured. Set storage via configureRatelimit({ storage }), setRateLimitStorage(), or enable initializeDefaultStorage.',
+ );
+ }
+
+ /**
+ * Emit a ratelimited event through CommandKit's event bus.
+ *
+ * @param ctx - CommandKit runtime for the active application.
+ * @param payload - Rate-limit event payload to emit.
+ * @returns Nothing; emits the event when available.
+ */
+ private emitRateLimited(
+ ctx: CommandKitPluginRuntime,
+ payload: RateLimitEventPayload,
+ ): void {
+ ctx.commandkit.events?.to('ratelimits').emit('ratelimited', payload);
+ }
+
+ /**
+ * Determine whether a source should bypass rate limits.
+ *
+ * @param source - Interaction or message to evaluate.
+ * @returns True when the source should bypass rate limiting.
+ */
+ private async shouldBypass(source: Interaction | Message): Promise {
+ const bypass = this.options.bypass;
+ if (bypass) {
+ /**
+ * Check permanent allowlists first to avoid storage lookups.
+ */
+ const userId =
+ source instanceof Message ? source.author.id : source.user?.id;
+ if (userId && bypass.userIds?.includes(userId)) return true;
+
+ const guildId = source.guildId ?? null;
+ if (guildId && bypass.guildIds?.includes(guildId)) return true;
+
+ const roleIds = getRoleIds(source);
+ if (roleIds.length && bypass.roleIds?.length) {
+ if (roleIds.some((roleId) => bypass.roleIds!.includes(roleId)))
+ return true;
+ }
+ }
+
+ /**
+ * Check temporary exemptions stored in the rate limit storage next.
+ */
+ if (await this.hasTemporaryBypass(source)) {
+ return true;
+ }
+
+ /**
+ * Run custom predicate last so it can override previous checks.
+ */
+ if (bypass?.check) {
+ return Boolean(await bypass.check(source));
+ }
+
+ return false;
+ }
+
+ /**
+ * Check for temporary exemptions in storage for the source.
+ *
+ * @param source - Interaction or message to evaluate.
+ * @returns True when a temporary exemption is found.
+ */
+ private async hasTemporaryBypass(
+ source: Interaction | Message,
+ ): Promise {
+ const storage = this.resolveDefaultStorage();
+ if (!storage) return false;
+
+ const keys = resolveExemptionKeys(source, this.options.keyPrefix);
+ if (!keys.length) return false;
+
+ try {
+ for (const key of keys) {
+ if (await storage.get(key)) return true;
+ }
+ } catch (error) {
+ if (this.options.hooks?.onStorageError) {
+ await this.options.hooks.onStorageError(error, false);
+ }
+ Logger.error`[ratelimit] Storage error during exemption check: ${error}`;
+ }
+
+ return false;
+ }
+
+ /**
+ * Send the default rate-limited response when no custom handler is set.
+ *
+ * @param env - Command execution environment.
+ * @param source - Interaction or message that was limited.
+ * @param info - Aggregated rate-limit info for the response.
+ * @returns Resolves after the response is sent.
+ */
+ private async respondRateLimited(
+ env: CommandKitEnvironment,
+ source: Interaction | Message,
+ info: RateLimitStoreValue,
+ ) {
+ const ctx = env.context;
+ if (this.options.onRateLimited && ctx) {
+ await this.options.onRateLimited(ctx, info);
+ return;
+ }
+
+ const retrySeconds = Math.ceil(info.retryAfter / 1000);
+ const embed = new EmbedBuilder()
+ .setTitle(':hourglass_flowing_sand: You are on cooldown')
+ .setDescription(
+ `Try again (in ${retrySeconds}s).`,
+ )
+ .setColor('Red');
+
+ if (source instanceof Message) {
+ if (source.channel?.isSendable()) {
+ try {
+ await source.reply({ embeds: [embed] });
+ } catch (error) {
+ Logger.error`[ratelimit] Failed to reply with rate limit embed: ${error}`;
+ }
+ }
+ return;
+ }
+
+ if (!source.isRepliable()) return;
+
+ if (source.replied || source.deferred) {
+ try {
+ await source.followUp({
+ embeds: [embed],
+ flags: MessageFlags.Ephemeral,
+ });
+ } catch (error) {
+ Logger.error`[ratelimit] Failed to follow up with rate limit embed: ${error}`;
+ }
+ return;
+ }
+
+ try {
+ await source.reply({
+ embeds: [embed],
+ flags: MessageFlags.Ephemeral,
+ });
+ } catch (error) {
+ Logger.error`[ratelimit] Failed to reply with rate limit embed: ${error}`;
+ }
+ }
+
+ /**
+ * Enqueue a command execution for later retry under queue rules.
+ *
+ * @param params - Queue execution parameters.
+ * @returns True when the execution was queued.
+ */
+ private async enqueueExecution(params: {
+ queueKey: string;
+ queue: NormalizedQueueOptions;
+ initialDelayMs: number;
+ source: Interaction | Message;
+ execute: () => Promise;
+ engine: RateLimitEngine;
+ resolvedKeys: ResolvedScopeKey[];
+ limiter: RateLimitLimiterConfig;
+ }): Promise {
+ if (!params.queue.enabled) return false;
+
+ const queue = this.getQueue(params.queueKey, params.queue);
+ const size = queue.getPending() + queue.getRunning();
+ if (size >= params.queue.maxSize) {
+ /**
+ * Queue full: fall back to immediate rate-limit handling to avoid unbounded growth.
+ */
+ return false;
+ }
+
+ await this.deferInteractionIfNeeded(params.source, params.queue);
+
+ const queuedAt = Date.now();
+ const timeoutAt = queuedAt + params.queue.timeoutMs;
+ const initialDelay = Math.max(0, params.initialDelayMs);
+
+ void queue
+ .add(async () => {
+ let delayMs = initialDelay;
+ while (true) {
+ if (delayMs > 0) {
+ await sleep(delayMs);
+ }
+
+ if (Date.now() > timeoutAt) {
+ Logger.warn(
+ `[ratelimit] Queue timeout exceeded for key ${params.queueKey}`,
+ );
+ return;
+ }
+
+ const aggregate = await this.consumeForQueue(
+ params.engine,
+ params.limiter,
+ params.resolvedKeys,
+ ).catch(async (error) => {
+ if (this.options.hooks?.onStorageError) {
+ await this.options.hooks.onStorageError(error, false);
+ }
+ Logger.error`[ratelimit] Storage error during queued consume: ${error}`;
+ return null;
+ });
+
+ if (!aggregate) {
+ return;
+ }
+
+ if (!aggregate.limited) {
+ await params.execute();
+ return;
+ }
+
+ delayMs = Math.max(aggregate.retryAfter, 250);
+ }
+ })
+ .catch((error) => {
+ Logger.error`[ratelimit] Queue task failed: ${error}`;
+ });
+
+ return true;
+ }
+
+ /**
+ * Get or create an async queue for the given key.
+ *
+ * @param key - Queue identifier.
+ * @param options - Normalized queue settings.
+ * @returns Async queue instance.
+ */
+ private getQueue(key: string, options: NormalizedQueueOptions): AsyncQueue {
+ const existing = this.queues.get(key);
+ if (existing) return existing;
+ const queue = createAsyncQueue({ concurrency: options.concurrency });
+ this.queues.set(key, queue);
+ return queue;
+ }
+
+ /**
+ * Consume limits for queued execution to decide whether to run now.
+ *
+ * @param engine - Rate limit engine.
+ * @param limiter - Resolved limiter configuration.
+ * @param resolvedKeys - Scope keys to consume.
+ * @returns Aggregated rate-limit info for the queue check.
+ */
+ private async consumeForQueue(
+ engine: RateLimitEngine,
+ limiter: RateLimitLimiterConfig,
+ resolvedKeys: ResolvedScopeKey[],
+ ): Promise {
+ const results: RateLimitResult[] = [];
+ for (const resolved of resolvedKeys) {
+ const resolvedConfigs = resolveLimiterConfigs(limiter, resolved.scope);
+ for (const resolvedConfig of resolvedConfigs) {
+ const resolvedKey = withWindowSuffix(
+ resolved.key,
+ resolvedConfig.windowId,
+ );
+ const output = await engine.consume(resolvedKey, resolvedConfig);
+ results.push(output.result);
+ }
+ }
+
+ return aggregateResults(results);
+ }
+
+ /**
+ * Defer interaction replies when queueing and the source is repliable.
+ *
+ * @param source - Interaction or message that may be deferred.
+ * @param queue - Normalized queue settings.
+ * @returns Resolves after attempting to defer the interaction.
+ */
+ private async deferInteractionIfNeeded(
+ source: Interaction | Message,
+ queue: NormalizedQueueOptions,
+ ): Promise {
+ if (!queue.deferInteraction) return;
+ if (source instanceof Message) return;
+ if (!source.isRepliable()) return;
+ if (source.deferred || source.replied) return;
+
+ try {
+ await source.deferReply({
+ flags: queue.ephemeral ? MessageFlags.Ephemeral : undefined,
+ });
+ } catch (error) {
+ Logger.debug(
+ `[ratelimit] Failed to defer interaction for queued command: ${error}`,
+ );
+ }
+ }
+}
+
+interface NormalizedQueueOptions {
+ enabled: boolean;
+ maxSize: number;
+ timeoutMs: number;
+ deferInteraction: boolean;
+ ephemeral: boolean;
+ concurrency: number;
+}
+
+/**
+ * Normalize scope input into a de-duplicated scope array.
+ *
+ * @param scope - Scope config value.
+ * @returns Array of scopes to enforce.
+ */
+function normalizeScopes(
+ scope: RateLimitLimiterConfig['scope'] | undefined,
+): RateLimitScope[] {
+ if (!scope) return ['user'];
+ if (Array.isArray(scope)) return Array.from(new Set(scope));
+ return [scope];
+}
+
+/**
+ * Aggregate multiple rate-limit results into a single summary object.
+ *
+ * @param results - Individual limiter/window results.
+ * @returns Aggregated rate-limit store value.
+ */
+function aggregateResults(results: RateLimitResult[]): RateLimitStoreValue {
+ if (!results.length) {
+ return createEmptyStoreValue();
+ }
+
+ const limitedResults = results.filter((r) => r.limited);
+ const limited = limitedResults.length > 0;
+ const remaining = Math.min(...results.map((r) => r.remaining));
+ const resetAt = Math.max(...results.map((r) => r.resetAt));
+ const retryAfter = limited
+ ? Math.max(...limitedResults.map((r) => r.retryAfter))
+ : 0;
+
+ return {
+ limited,
+ remaining,
+ resetAt,
+ retryAfter,
+ results,
+ };
+}
+
+/**
+ * Append a window suffix to a key when a window id is present.
+ *
+ * @param key - Base storage key.
+ * @param windowId - Optional window identifier.
+ * @returns Key with window suffix when provided.
+ */
+function withWindowSuffix(key: string, windowId?: string): string {
+ if (!windowId) return key;
+ return `${key}:w:${windowId}`;
+}
+
+/**
+ * Create an empty aggregate result for cases with no limiter results.
+ *
+ * @returns Empty rate-limit store value.
+ */
+function createEmptyStoreValue(): RateLimitStoreValue {
+ return {
+ limited: false,
+ remaining: 0,
+ resetAt: 0,
+ retryAfter: 0,
+ results: [],
+ };
+}
+
+/**
+ * Merge multiple role limit maps, with later maps overriding earlier ones.
+ *
+ * @param limits - Role limit maps ordered from lowest to highest priority.
+ * @returns Merged role limits or undefined when empty.
+ */
+function mergeRoleLimits(
+ ...limits: Array | undefined>
+): Record | undefined {
+ const merged: Record = {};
+ for (const limit of limits) {
+ if (!limit) continue;
+ Object.assign(merged, limit);
+ }
+ return Object.keys(merged).length ? merged : undefined;
+}
+
+/**
+ * Resolve a role-specific limiter for a source using a strategy.
+ *
+ * @param limits - Role limit map keyed by role id.
+ * @param strategy - Role limit strategy to apply.
+ * @param source - Interaction or message to resolve roles from.
+ * @returns Resolved role limiter or null when none match.
+ */
+function resolveRoleLimit(
+ limits: Record | undefined,
+ strategy: RateLimitRoleLimitStrategy | undefined,
+ source: Interaction | Message,
+): RateLimitLimiterConfig | null {
+ if (!limits) return null;
+ const roleIds = getRoleIds(source);
+ if (!roleIds.length) return null;
+
+ const entries = Object.entries(limits).filter(([roleId]) =>
+ roleIds.includes(roleId),
+ );
+ if (!entries.length) return null;
+
+ const resolvedStrategy = strategy ?? 'highest';
+ if (resolvedStrategy === 'first') {
+ return entries[0]?.[1] ?? null;
+ }
+
+ const scored = entries.map(([, limiter]) => ({
+ limiter,
+ score: computeLimiterScore(limiter),
+ }));
+
+ scored.sort((a, b) => {
+ if (resolvedStrategy === 'lowest') {
+ return a.score - b.score;
+ }
+ return b.score - a.score;
+ });
+
+ return scored[0]?.limiter ?? null;
+}
+
+/**
+ * Compute a comparable score for a limiter for role-strategy sorting.
+ *
+ * @param limiter - Limiter configuration to score.
+ * @returns Minimum request rate across windows.
+ */
+function computeLimiterScore(limiter: RateLimitLimiterConfig): number {
+ const resolvedConfigs = resolveLimiterConfigs(limiter, 'user');
+ if (!resolvedConfigs.length) return 0;
+ const scores = resolvedConfigs.map(
+ (resolved) => resolved.maxRequests / resolved.intervalMs,
+ );
+ return Math.min(...scores);
+}
+
+/**
+ * Merge and normalize queue options across config layers.
+ *
+ * @param options - Queue option layers ordered from lowest to highest priority.
+ * @returns Normalized queue options.
+ */
+function resolveQueueOptions(
+ ...options: Array
+): NormalizedQueueOptions {
+ const merged = options.reduce(
+ (acc, opt) => ({ ...acc, ...(opt ?? {}) }),
+ {},
+ );
+ const hasConfig = options.some((opt) => opt != null);
+ const enabled = merged.enabled ?? hasConfig;
+
+ return {
+ enabled,
+ maxSize: clampAtLeast(merged.maxSize ?? 3, 1),
+ timeoutMs: clampAtLeast(resolveDuration(merged.timeout, 30_000), 1),
+ deferInteraction: merged.deferInteraction !== false,
+ ephemeral: merged.ephemeral !== false,
+ concurrency: clampAtLeast(merged.concurrency ?? 1, 1),
+ };
+}
+
+/**
+ * Select the queue key from the result with the longest retry delay.
+ *
+ * @param results - Rate limit results for the command.
+ * @returns Queue key to use for serialization.
+ */
+function selectQueueKey(results: RateLimitResult[]): string {
+ let target: RateLimitResult | undefined;
+ for (const result of results) {
+ if (!result.limited) continue;
+ if (!target || result.retryAfter > target.retryAfter) {
+ target = result;
+ }
+ }
+ return (target ?? results[0])?.key ?? 'ratelimit:queue';
+}
+
+/**
+ * Delay execution for a given duration.
+ *
+ * @param ms - Delay duration in milliseconds.
+ * @returns Promise that resolves after the delay.
+ */
+function sleep(ms: number): Promise {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+/**
+ * Reset all rate-limit keys for a specific command name.
+ *
+ * @param storage - Storage backend to delete from.
+ * @param keyPrefix - Optional prefix to prepend to the key.
+ * @param commandName - Command name to reset.
+ * @returns Resolves after matching keys are deleted.
+ */
+async function resetByCommand(
+ storage: RateLimitStorage,
+ keyPrefix: string | undefined,
+ commandName: string,
+) {
+ if (!storage.deleteByPattern) return;
+ const prefix = keyPrefix ?? '';
+ const pattern = `${prefix}*:${commandName}`;
+ await storage.deleteByPattern(pattern);
+ await storage.deleteByPattern(`violation:${pattern}`);
+ await storage.deleteByPattern(`${pattern}:w:*`);
+ await storage.deleteByPattern(`violation:${pattern}:w:*`);
+}
+
+/**
+ * Normalize path separators to forward slashes for comparisons.
+ *
+ * @param path - Path to normalize.
+ * @returns Normalized path string.
+ */
+function normalizePath(path: string): string {
+ return path.replace(/\\/g, '/');
+}
diff --git a/packages/ratelimit/src/providers/fallback.ts b/packages/ratelimit/src/providers/fallback.ts
new file mode 100644
index 00000000..5111db35
--- /dev/null
+++ b/packages/ratelimit/src/providers/fallback.ts
@@ -0,0 +1,8 @@
+/**
+ * Provider re-export for fallback storage.
+ *
+ * Exposes the wrapper and its options for consumers.
+ */
+
+export { FallbackRateLimitStorage } from '../storage/fallback';
+export type { FallbackRateLimitStorageOptions } from '../storage/fallback';
diff --git a/packages/ratelimit/src/providers/memory.ts b/packages/ratelimit/src/providers/memory.ts
new file mode 100644
index 00000000..4c9656e0
--- /dev/null
+++ b/packages/ratelimit/src/providers/memory.ts
@@ -0,0 +1,7 @@
+/**
+ * Provider re-export for memory storage.
+ *
+ * Keeps public imports stable across plugin packages.
+ */
+
+export { MemoryRateLimitStorage } from '../storage/memory';
diff --git a/packages/ratelimit/src/providers/redis.ts b/packages/ratelimit/src/providers/redis.ts
new file mode 100644
index 00000000..44cbb314
--- /dev/null
+++ b/packages/ratelimit/src/providers/redis.ts
@@ -0,0 +1,8 @@
+/**
+ * Provider re-export for Redis storage.
+ *
+ * Exposes the storage class and RedisOptions type for consumers.
+ */
+
+export { RedisRateLimitStorage } from '../storage/redis';
+export type { RedisOptions } from 'ioredis';
diff --git a/packages/ratelimit/src/runtime.ts b/packages/ratelimit/src/runtime.ts
new file mode 100644
index 00000000..b5c3e1fe
--- /dev/null
+++ b/packages/ratelimit/src/runtime.ts
@@ -0,0 +1,69 @@
+/**
+ * Runtime globals for rate limiting.
+ *
+ * Stores the active storage and plugin context for directives and helpers.
+ */
+
+import type { RateLimitRuntimeContext, RateLimitStorage } from './types';
+
+let defaultStorage: RateLimitStorage | null = null;
+let activeRuntime: RateLimitRuntimeContext | null = null;
+
+/**
+ * Set the default rate limit storage instance for the process.
+ *
+ * @param storage - Storage driver to use for rate-limit state.
+ * @returns Nothing; updates the process-wide default storage.
+ */
+export function setRateLimitStorage(storage: RateLimitStorage): void {
+ defaultStorage = storage;
+}
+
+/**
+ * Get the default rate limit storage instance for the process.
+ *
+ * @returns Default storage instance or null if unset.
+ */
+export function getRateLimitStorage(): RateLimitStorage | null {
+ return defaultStorage;
+}
+
+/**
+ * Alias for setRateLimitStorage to match other packages (tasks/queue).
+ *
+ * @param storage - Storage driver to use for rate-limit state.
+ * @returns Nothing; updates the process-wide default storage.
+ */
+export function setDriver(storage: RateLimitStorage): void {
+ setRateLimitStorage(storage);
+}
+
+/**
+ * Alias for getRateLimitStorage to match other packages (tasks/queue).
+ *
+ * @returns Default storage instance or null if unset.
+ */
+export function getDriver(): RateLimitStorage | null {
+ return getRateLimitStorage();
+}
+
+/**
+ * Set the active runtime context used by directives and APIs.
+ *
+ * @param runtime - Active runtime context or null to clear.
+ * @returns Nothing; updates the active runtime context.
+ */
+export function setRateLimitRuntime(
+ runtime: RateLimitRuntimeContext | null,
+): void {
+ activeRuntime = runtime;
+}
+
+/**
+ * Get the active runtime context for directives and APIs.
+ *
+ * @returns Active runtime context or null if not initialized.
+ */
+export function getRateLimitRuntime(): RateLimitRuntimeContext | null {
+ return activeRuntime;
+}
diff --git a/packages/ratelimit/src/storage/fallback.ts b/packages/ratelimit/src/storage/fallback.ts
new file mode 100644
index 00000000..f4a89472
--- /dev/null
+++ b/packages/ratelimit/src/storage/fallback.ts
@@ -0,0 +1,365 @@
+/**
+ * Fallback storage wrapper.
+ *
+ * Routes storage calls to a secondary backend when the primary fails.
+ */
+
+import { Logger } from 'commandkit';
+import type { RateLimitStorage } from '../types';
+
+/**
+ * Options that control fallback logging/cooldown behavior.
+ */
+export interface FallbackRateLimitStorageOptions {
+ /**
+ * Minimum time between fallback log entries (to avoid log spam).
+ *
+ * @default 30000
+ */
+ cooldownMs?: number;
+}
+
+/**
+ * Storage wrapper that falls back to a secondary implementation on failure.
+ *
+ * @implements RateLimitStorage
+ */
+export class FallbackRateLimitStorage implements RateLimitStorage {
+ private lastErrorAt = 0;
+
+ /**
+ * Create a fallback wrapper with primary/secondary storages.
+ *
+ * @param primary - Primary storage backend.
+ * @param secondary - Secondary storage backend used on failure.
+ * @param options - Fallback logging and cooldown options.
+ */
+ public constructor(
+ private readonly primary: RateLimitStorage,
+ private readonly secondary: RateLimitStorage,
+ private readonly options: FallbackRateLimitStorageOptions = {},
+ ) {}
+
+ /**
+ * Check whether a fallback error should be logged.
+ *
+ * @returns True when the log cooldown has elapsed.
+ */
+ private shouldLog(): boolean {
+ const now = Date.now();
+ const cooldown = this.options.cooldownMs ?? 30_000;
+ if (now - this.lastErrorAt > cooldown) {
+ this.lastErrorAt = now;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Execute a storage operation with a fallback on failure.
+ *
+ * @param op - Primary operation.
+ * @param fallback - Secondary operation when primary fails.
+ * @returns Result from the primary or fallback operation.
+ */
+ private async withFallback(
+ op: () => Promise,
+ fallback: () => Promise,
+ ): Promise {
+ try {
+ return await op();
+ } catch (error) {
+ if (this.shouldLog()) {
+ Logger.error`[ratelimit] Storage error, falling back to secondary: ${error}`;
+ }
+ return fallback();
+ }
+ }
+
+ /**
+ * Read a value using primary storage with fallback.
+ *
+ * @param key - Storage key to read.
+ * @returns Stored value or null when absent.
+ */
+ async get(key: string): Promise {
+ return this.withFallback(
+ () => this.primary.get(key),
+ () => this.secondary.get(key),
+ );
+ }
+
+ /**
+ * Store a value using primary storage with fallback.
+ *
+ * @param key - Storage key to write.
+ * @param value - Value to store.
+ * @param ttlMs - Optional TTL in milliseconds.
+ * @returns Resolves when the value is stored.
+ */
+ async set(key: string, value: T, ttlMs?: number): Promise {
+ return this.withFallback(
+ () => this.primary.set(key, value, ttlMs),
+ () => this.secondary.set(key, value, ttlMs),
+ );
+ }
+
+ /**
+ * Delete a key using primary storage with fallback.
+ *
+ * @param key - Storage key to delete.
+ * @returns Resolves when the key is removed.
+ */
+ async delete(key: string): Promise {
+ return this.withFallback(
+ () => this.primary.delete(key),
+ () => this.secondary.delete(key),
+ );
+ }
+
+ /**
+ * Increment a fixed-window counter using primary storage with fallback.
+ *
+ * @param key - Storage key to increment.
+ * @param ttlMs - TTL window in milliseconds.
+ * @returns Fixed-window consume result.
+ * @throws Error when either storage lacks incr support.
+ */
+ async incr(key: string, ttlMs: number) {
+ if (!this.primary.incr || !this.secondary.incr) {
+ throw new Error('incr not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.incr!(key, ttlMs),
+ () => this.secondary.incr!(key, ttlMs),
+ );
+ }
+
+ /**
+ * Read TTL using primary storage with fallback.
+ *
+ * @param key - Storage key to inspect.
+ * @returns Remaining TTL in ms or null when no TTL is set.
+ * @throws Error when either storage lacks ttl support.
+ */
+ async ttl(key: string) {
+ if (!this.primary.ttl || !this.secondary.ttl) {
+ throw new Error('ttl not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.ttl!(key),
+ () => this.secondary.ttl!(key),
+ );
+ }
+
+ /**
+ * Update TTL using primary storage with fallback.
+ *
+ * @param key - Storage key to update.
+ * @param ttlMs - TTL in milliseconds.
+ * @returns Resolves after the TTL is updated.
+ * @throws Error when either storage lacks expire support.
+ */
+ async expire(key: string, ttlMs: number) {
+ if (!this.primary.expire || !this.secondary.expire) {
+ throw new Error('expire not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.expire!(key, ttlMs),
+ () => this.secondary.expire!(key, ttlMs),
+ );
+ }
+
+ /**
+ * Add a member to a sorted set using primary storage with fallback.
+ *
+ * @param key - Sorted-set key.
+ * @param score - Score to associate with the member.
+ * @param member - Member identifier.
+ * @returns Resolves when the member is added.
+ */
+ async zAdd(key: string, score: number, member: string) {
+ if (!this.primary.zAdd || !this.secondary.zAdd) {
+ throw new Error('zAdd not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.zAdd!(key, score, member),
+ () => this.secondary.zAdd!(key, score, member),
+ );
+ }
+
+ /**
+ * Remove sorted-set members in a score range with fallback.
+ *
+ * @param key - Sorted-set key.
+ * @param min - Minimum score (inclusive).
+ * @param max - Maximum score (inclusive).
+ * @returns Resolves when the range is removed.
+ */
+ async zRemRangeByScore(key: string, min: number, max: number) {
+ if (!this.primary.zRemRangeByScore || !this.secondary.zRemRangeByScore) {
+ throw new Error('zRemRangeByScore not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.zRemRangeByScore!(key, min, max),
+ () => this.secondary.zRemRangeByScore!(key, min, max),
+ );
+ }
+
+ /**
+ * Count sorted-set members with fallback.
+ *
+ * @param key - Sorted-set key.
+ * @returns Number of members in the set.
+ */
+ async zCard(key: string) {
+ if (!this.primary.zCard || !this.secondary.zCard) {
+ throw new Error('zCard not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.zCard!(key),
+ () => this.secondary.zCard!(key),
+ );
+ }
+
+ /**
+ * Read sorted-set members in a score range with fallback.
+ *
+ * @param key - Sorted-set key.
+ * @param min - Minimum score (inclusive).
+ * @param max - Maximum score (inclusive).
+ * @returns Ordered members in the score range.
+ */
+ async zRangeByScore(key: string, min: number, max: number) {
+ if (!this.primary.zRangeByScore || !this.secondary.zRangeByScore) {
+ throw new Error('zRangeByScore not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.zRangeByScore!(key, min, max),
+ () => this.secondary.zRangeByScore!(key, min, max),
+ );
+ }
+
+ /**
+ * Atomically consume a fixed-window counter with fallback.
+ *
+ * @param key - Storage key to consume.
+ * @param limit - Request limit for the window.
+ * @param windowMs - Window size in milliseconds.
+ * @param nowMs - Current timestamp in milliseconds.
+ * @returns Fixed-window consume result.
+ * @throws Error when either storage lacks consumeFixedWindow support.
+ */
+ async consumeFixedWindow(
+ key: string,
+ limit: number,
+ windowMs: number,
+ nowMs: number,
+ ) {
+ if (
+ !this.primary.consumeFixedWindow ||
+ !this.secondary.consumeFixedWindow
+ ) {
+ throw new Error('consumeFixedWindow not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.consumeFixedWindow!(key, limit, windowMs, nowMs),
+ () => this.secondary.consumeFixedWindow!(key, limit, windowMs, nowMs),
+ );
+ }
+
+ /**
+ * Atomically consume a sliding-window log with fallback.
+ *
+ * @param key - Storage key to consume.
+ * @param limit - Request limit for the window.
+ * @param windowMs - Window size in milliseconds.
+ * @param nowMs - Current timestamp in milliseconds.
+ * @param member - Member identifier for this request.
+ * @returns Sliding-window consume result.
+ * @throws Error when either storage lacks consumeSlidingWindowLog support.
+ */
+ async consumeSlidingWindowLog(
+ key: string,
+ limit: number,
+ windowMs: number,
+ nowMs: number,
+ member: string,
+ ) {
+ if (
+ !this.primary.consumeSlidingWindowLog ||
+ !this.secondary.consumeSlidingWindowLog
+ ) {
+ throw new Error('consumeSlidingWindowLog not supported by both storages');
+ }
+ return this.withFallback(
+ () =>
+ this.primary.consumeSlidingWindowLog!(
+ key,
+ limit,
+ windowMs,
+ nowMs,
+ member,
+ ),
+ () =>
+ this.secondary.consumeSlidingWindowLog!(
+ key,
+ limit,
+ windowMs,
+ nowMs,
+ member,
+ ),
+ );
+ }
+
+ /**
+ * Delete keys with a prefix using primary storage with fallback.
+ *
+ * @param prefix - Prefix to match.
+ * @returns Resolves after matching keys are deleted.
+ * @throws Error when either storage lacks deleteByPrefix support.
+ */
+ async deleteByPrefix(prefix: string) {
+ if (!this.primary.deleteByPrefix || !this.secondary.deleteByPrefix) {
+ throw new Error('deleteByPrefix not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.deleteByPrefix!(prefix),
+ () => this.secondary.deleteByPrefix!(prefix),
+ );
+ }
+
+ /**
+ * Delete keys matching a pattern using primary storage with fallback.
+ *
+ * @param pattern - Glob pattern to match.
+ * @returns Resolves after matching keys are deleted.
+ * @throws Error when either storage lacks deleteByPattern support.
+ */
+ async deleteByPattern(pattern: string) {
+ if (!this.primary.deleteByPattern || !this.secondary.deleteByPattern) {
+ throw new Error('deleteByPattern not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.deleteByPattern!(pattern),
+ () => this.secondary.deleteByPattern!(pattern),
+ );
+ }
+
+ /**
+ * List keys matching a prefix using primary storage with fallback.
+ *
+ * @param prefix - Prefix to match.
+ * @returns Matching keys.
+ * @throws Error when either storage lacks keysByPrefix support.
+ */
+ async keysByPrefix(prefix: string) {
+ if (!this.primary.keysByPrefix || !this.secondary.keysByPrefix) {
+ throw new Error('keysByPrefix not supported by both storages');
+ }
+ return this.withFallback(
+ () => this.primary.keysByPrefix!(prefix),
+ () => this.secondary.keysByPrefix!(prefix),
+ );
+ }
+}
diff --git a/packages/ratelimit/src/storage/memory.ts b/packages/ratelimit/src/storage/memory.ts
new file mode 100644
index 00000000..0cea2ea8
--- /dev/null
+++ b/packages/ratelimit/src/storage/memory.ts
@@ -0,0 +1,365 @@
+/**
+ * In-memory storage.
+ *
+ * Used for tests and local development; implements TTL and sorted-set helpers.
+ * Not suitable for multi-process deployments.
+ */
+
+import type {
+ FixedWindowConsumeResult,
+ RateLimitStorage,
+ SlidingWindowConsumeResult,
+} from '../types';
+
+interface KvEntry {
+ value: unknown;
+ expiresAt: number | null;
+}
+
+interface ZSetItem {
+ score: number;
+ member: string;
+}
+
+interface ZSetEntry {
+ items: ZSetItem[];
+ expiresAt: number | null;
+}
+
+/**
+ * In-memory storage used for tests and local usage.
+ *
+ * @implements RateLimitStorage
+ */
+export class MemoryRateLimitStorage implements RateLimitStorage {
+ private readonly kv = new Map();
+ private readonly zsets = new Map();
+
+ private now(): number {
+ return Date.now();
+ }
+
+ private isExpired(expiresAt: number | null): boolean {
+ return expiresAt != null && expiresAt <= this.now();
+ }
+
+ /**
+ * Clear expired entries so reads reflect current state.
+ *
+ * @param key - Storage key to clean.
+ */
+ private cleanupKey(key: string) {
+ const kvEntry = this.kv.get(key);
+ if (kvEntry && this.isExpired(kvEntry.expiresAt)) {
+ this.kv.delete(key);
+ }
+
+ const zEntry = this.zsets.get(key);
+ if (zEntry && this.isExpired(zEntry.expiresAt)) {
+ this.zsets.delete(key);
+ }
+ }
+
+ /**
+ * Read a value from the in-memory key/value store.
+ *
+ * @param key - Storage key to read.
+ * @returns Stored value or null when absent/expired.
+ */
+ async get(key: string): Promise {
+ this.cleanupKey(key);
+ const entry = this.kv.get(key);
+ if (!entry) return null;
+ return entry.value as T;
+ }
+
+ /**
+ * Store a value in memory with optional TTL.
+ *
+ * @param key - Storage key to write.
+ * @param value - Value to store.
+ * @param ttlMs - Optional TTL in milliseconds.
+ * @returns Resolves when the value is stored.
+ */
+ async set(key: string, value: T, ttlMs?: number): Promise {
+ const expiresAt = typeof ttlMs === 'number' ? this.now() + ttlMs : null;
+ this.kv.set(key, { value, expiresAt });
+ }
+
+ /**
+ * Delete a key from the in-memory store.
+ *
+ * @param key - Storage key to delete.
+ * @returns Resolves when the key is removed.
+ */
+ async delete(key: string): Promise {
+ this.kv.delete(key);
+ this.zsets.delete(key);
+ }
+
+ /**
+ * Increment a fixed-window counter with TTL handling.
+ *
+ * @param key - Storage key to increment.
+ * @param ttlMs - TTL window in milliseconds.
+ * @returns Updated counter value and remaining TTL.
+ */
+ async incr(key: string, ttlMs: number): Promise {
+ this.cleanupKey(key);
+ const entry = this.kv.get(key);
+
+ if (!entry || typeof entry.value !== 'number') {
+ const expiresAt = this.now() + ttlMs;
+ this.kv.set(key, { value: 1, expiresAt });
+ return { count: 1, ttlMs };
+ }
+
+ const count = entry.value + 1;
+ entry.value = count;
+ if (!entry.expiresAt) {
+ entry.expiresAt = this.now() + ttlMs;
+ }
+
+ const remainingTtl = Math.max(
+ 0,
+ (entry.expiresAt ?? this.now()) - this.now(),
+ );
+ return { count, ttlMs: remainingTtl };
+ }
+
+ /**
+ * Read the TTL for a key when present.
+ *
+ * @param key - Storage key to inspect.
+ * @returns Remaining TTL in ms or null when no TTL is set.
+ */
+ async ttl(key: string): Promise {
+ this.cleanupKey(key);
+ const entry = this.kv.get(key) ?? this.zsets.get(key);
+ if (!entry) return null;
+ if (entry.expiresAt == null) return null;
+ return Math.max(0, entry.expiresAt - this.now());
+ }
+
+ /**
+ * Update the TTL for an existing key.
+ *
+ * @param key - Storage key to update.
+ * @param ttlMs - TTL in milliseconds.
+ * @returns Resolves after the TTL is updated.
+ */
+ async expire(key: string, ttlMs: number): Promise {
+ const expiresAt = this.now() + ttlMs;
+ const kvEntry = this.kv.get(key);
+ if (kvEntry) kvEntry.expiresAt = expiresAt;
+ const zEntry = this.zsets.get(key);
+ if (zEntry) zEntry.expiresAt = expiresAt;
+ }
+
+ /**
+ * Add a member to a sorted set with the given score.
+ *
+ * @param key - Sorted-set key.
+ * @param score - Score to associate with the member.
+ * @param member - Member identifier.
+ * @returns Resolves when the member is added.
+ */
+ async zAdd(key: string, score: number, member: string): Promise {
+ this.cleanupKey(key);
+ const entry = this.zsets.get(key) ?? { items: [], expiresAt: null };
+ const existingIndex = entry.items.findIndex(
+ (item) => item.member === member,
+ );
+ if (existingIndex >= 0) {
+ entry.items[existingIndex] = { score, member };
+ } else {
+ entry.items.push({ score, member });
+ }
+ entry.items.sort((a, b) => a.score - b.score);
+ this.zsets.set(key, entry);
+ }
+
+ /**
+ * Remove sorted-set members with scores in the given range.
+ *
+ * @param key - Sorted-set key.
+ * @param min - Minimum score (inclusive).
+ * @param max - Maximum score (inclusive).
+ * @returns Resolves when the range is removed.
+ */
+ async zRemRangeByScore(key: string, min: number, max: number): Promise {
+ this.cleanupKey(key);
+ const entry = this.zsets.get(key);
+ if (!entry) return;
+ entry.items = entry.items.filter(
+ (item) => item.score < min || item.score > max,
+ );
+ }
+
+ /**
+ * Count members in a sorted set.
+ *
+ * @param key - Sorted-set key.
+ * @returns Number of members in the set.
+ */
+ async zCard(key: string): Promise {
+ this.cleanupKey(key);
+ const entry = this.zsets.get(key);
+ return entry ? entry.items.length : 0;
+ }
+
+ /**
+ * Read sorted-set members in a score range.
+ *
+ * @param key - Sorted-set key.
+ * @param min - Minimum score (inclusive).
+ * @param max - Maximum score (inclusive).
+ * @returns Ordered members in the score range.
+ */
+ async zRangeByScore(
+ key: string,
+ min: number,
+ max: number,
+ ): Promise {
+ this.cleanupKey(key);
+ const entry = this.zsets.get(key);
+ if (!entry) return [];
+ return entry.items
+ .filter((item) => item.score >= min && item.score <= max)
+ .map((item) => item.member);
+ }
+
+ /**
+ * Atomically consume a fixed-window counter for the key.
+ *
+ * @param key - Storage key to consume.
+ * @param limit - Request limit for the window.
+ * @param windowMs - Window size in milliseconds.
+ * @param nowMs - Current timestamp in milliseconds.
+ * @returns Fixed-window consume result.
+ */
+ async consumeFixedWindow(
+ key: string,
+ _limit: number,
+ windowMs: number,
+ _nowMs: number,
+ ): Promise {
+ return this.incr(key, windowMs);
+ }
+
+ /**
+ * Atomically consume a sliding-window log for the key.
+ *
+ * @param key - Storage key to consume.
+ * @param limit - Request limit for the window.
+ * @param windowMs - Window size in milliseconds.
+ * @param nowMs - Current timestamp in milliseconds.
+ * @param member - Member identifier for this request.
+ * @returns Sliding-window consume result.
+ */
+ async consumeSlidingWindowLog(
+ key: string,
+ limit: number,
+ windowMs: number,
+ nowMs: number,
+ member: string,
+ ): Promise {
+ await this.zRemRangeByScore(key, 0, nowMs - windowMs);
+ const count = await this.zCard(key);
+ if (count >= limit) {
+ const oldest = await this.zRangeByScore(
+ key,
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY,
+ );
+ const oldestMember = oldest[0];
+ const oldestTs = parseMemberTimestamp(oldestMember, nowMs);
+ return { allowed: false, count, resetAt: oldestTs + windowMs };
+ }
+
+ await this.zAdd(key, nowMs, member);
+ await this.expire(key, windowMs);
+ const newCount = count + 1;
+ const oldest = await this.zRangeByScore(
+ key,
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY,
+ );
+ const oldestMember = oldest[0];
+ const oldestTs = parseMemberTimestamp(oldestMember, nowMs);
+
+ return { allowed: true, count: newCount, resetAt: oldestTs + windowMs };
+ }
+
+ /**
+ * Delete keys with the given prefix.
+ *
+ * @param prefix - Prefix to match.
+ * @returns Resolves after matching keys are deleted.
+ */
+ async deleteByPrefix(prefix: string): Promise {
+ for (const key of Array.from(this.kv.keys())) {
+ if (key.startsWith(prefix)) this.kv.delete(key);
+ }
+ for (const key of Array.from(this.zsets.keys())) {
+ if (key.startsWith(prefix)) this.zsets.delete(key);
+ }
+ }
+
+ /**
+ * Delete keys matching a glob pattern.
+ *
+ * @param pattern - Glob pattern to match.
+ * @returns Resolves after matching keys are deleted.
+ */
+ async deleteByPattern(pattern: string): Promise {
+ const regex = globToRegex(pattern);
+ for (const key of Array.from(this.kv.keys())) {
+ if (regex.test(key)) this.kv.delete(key);
+ }
+ for (const key of Array.from(this.zsets.keys())) {
+ if (regex.test(key)) this.zsets.delete(key);
+ }
+ }
+
+ /**
+ * List keys that match a prefix.
+ *
+ * @param prefix - Prefix to match.
+ * @returns Matching keys.
+ */
+ async keysByPrefix(prefix: string): Promise {
+ const keys = new Set();
+ const kvKeys = Array.from(this.kv.keys());
+ for (const key of kvKeys) {
+ this.cleanupKey(key);
+ if (this.kv.has(key) && key.startsWith(prefix)) {
+ keys.add(key);
+ }
+ }
+ const zsetKeys = Array.from(this.zsets.keys());
+ for (const key of zsetKeys) {
+ this.cleanupKey(key);
+ if (this.zsets.has(key) && key.startsWith(prefix)) {
+ keys.add(key);
+ }
+ }
+ return Array.from(keys);
+ }
+}
+
+function globToRegex(glob: string): RegExp {
+ const escaped = glob.replace(/[.+?^${}()|[\]\\]/g, '\\$&');
+ const regex = `^${escaped.replace(/\*/g, '.*')}$`;
+ return new RegExp(regex);
+}
+
+function parseMemberTimestamp(
+ member: string | undefined,
+ fallback: number,
+): number {
+ if (!member) return fallback;
+ const prefix = member.split('-')[0];
+ const parsed = Number(prefix);
+ return Number.isFinite(parsed) ? parsed : fallback;
+}
diff --git a/packages/ratelimit/src/storage/redis.ts b/packages/ratelimit/src/storage/redis.ts
new file mode 100644
index 00000000..9fab4348
--- /dev/null
+++ b/packages/ratelimit/src/storage/redis.ts
@@ -0,0 +1,303 @@
+/**
+ * Redis storage.
+ *
+ * Uses Lua scripts for atomic fixed/sliding window operations.
+ */
+
+import Redis, { type RedisOptions } from 'ioredis';
+import type {
+ FixedWindowConsumeResult,
+ RateLimitStorage,
+ SlidingWindowConsumeResult,
+} from '../types';
+
+const FIXED_WINDOW_SCRIPT = /* lua */ `
+ local key = KEYS[1]
+ local window = tonumber(ARGV[1])
+ local count = redis.call('INCR', key)
+ local ttl = redis.call('PTTL', key)
+ if ttl < 0 then
+ redis.call('PEXPIRE', key, window)
+ ttl = window
+ end
+ return {count, ttl}
+`;
+
+const SLIDING_WINDOW_SCRIPT = /* lua */ `
+ local key = KEYS[1]
+ local limit = tonumber(ARGV[1])
+ local window = tonumber(ARGV[2])
+ local now = tonumber(ARGV[3])
+ local member = ARGV[4]
+
+ redis.call('ZREMRANGEBYSCORE', key, 0, now - window)
+ local count = redis.call('ZCARD', key)
+
+ if count >= limit then
+ local oldest = redis.call('ZRANGE', key, 0, 0, 'WITHSCORES')
+ local resetAt = now + window
+ if oldest[2] then
+ resetAt = tonumber(oldest[2]) + window
+ end
+ return {0, count, resetAt}
+ end
+
+ redis.call('ZADD', key, now, member)
+ redis.call('PEXPIRE', key, window)
+ count = count + 1
+ local oldest = redis.call('ZRANGE', key, 0, 0, 'WITHSCORES')
+ local resetAt = now + window
+ if oldest[2] then
+ resetAt = tonumber(oldest[2]) + window
+ end
+ return {1, count, resetAt}
+`;
+
+/**
+ * Redis-backed storage with Lua scripts for atomic window operations.
+ *
+ * @implements RateLimitStorage
+ */
+export class RedisRateLimitStorage implements RateLimitStorage {
+ public readonly redis: Redis;
+
+ public constructor(redis?: Redis | RedisOptions) {
+ this.redis = redis instanceof Redis ? redis : new Redis(redis ?? {});
+ }
+
+ /**
+ * Read a value from Redis and JSON-decode it.
+ *
+ * @param key - Storage key to read.
+ * @returns Parsed value or null when absent.
+ */
+ async get(key: string): Promise {
+ const value = await this.redis.get(key);
+ if (value == null) return null;
+ return JSON.parse(value) as T;
+ }
+
+ /**
+ * Store a value in Redis with optional TTL.
+ *
+ * @param key - Storage key to write.
+ * @param value - Value to serialize and store.
+ * @param ttlMs - Optional TTL in milliseconds.
+ * @returns Resolves when the value is stored.
+ */
+ async set(key: string, value: T, ttlMs?: number): Promise {
+ const payload = JSON.stringify(value);
+ if (typeof ttlMs === 'number') {
+ await this.redis.set(key, payload, 'PX', ttlMs);
+ return;
+ }
+ await this.redis.set(key, payload);
+ }
+
+ /**
+ * Delete a key from Redis.
+ *
+ * @param key - Storage key to delete.
+ * @returns Resolves when the key is removed.
+ */
+ async delete(key: string): Promise {
+ await this.redis.del(key);
+ }
+
+ /**
+ * Read the TTL for a key when present.
+ *
+ * @param key - Storage key to inspect.
+ * @returns Remaining TTL in ms or null when no TTL is set.
+ */
+ async ttl(key: string): Promise {
+ const ttl = await this.redis.pttl(key);
+ if (ttl < 0) return null;
+ return ttl;
+ }
+
+ /**
+ * Update the TTL for an existing key.
+ *
+ * @param key - Storage key to update.
+ * @param ttlMs - TTL in milliseconds.
+ * @returns Resolves after the TTL is updated.
+ */
+ async expire(key: string, ttlMs: number): Promise {
+ await this.redis.pexpire(key, ttlMs);
+ }
+
+ /**
+ * Add a member to a sorted set with the given score.
+ *
+ * @param key - Sorted-set key.
+ * @param score - Score to associate with the member.
+ * @param member - Member identifier.
+ * @returns Resolves when the member is added.
+ */
+ async zAdd(key: string, score: number, member: string): Promise {
+ await this.redis.zadd(key, score.toString(), member);
+ }
+
+ /**
+ * Remove sorted-set members with scores in the given range.
+ *
+ * @param key - Sorted-set key.
+ * @param min - Minimum score (inclusive).
+ * @param max - Maximum score (inclusive).
+ * @returns Resolves when the range is removed.
+ */
+ async zRemRangeByScore(key: string, min: number, max: number): Promise {
+ await this.redis.zremrangebyscore(key, min.toString(), max.toString());
+ }
+
+ /**
+ * Count members in a sorted set.
+ *
+ * @param key - Sorted-set key.
+ * @returns Number of members in the set.
+ */
+ async zCard(key: string): Promise {
+ return Number(await this.redis.zcard(key));
+ }
+
+ /**
+ * Read sorted-set members in a score range.
+ *
+ * @param key - Sorted-set key.
+ * @param min - Minimum score (inclusive).
+ * @param max - Maximum score (inclusive).
+ * @returns Ordered members in the score range.
+ */
+ async zRangeByScore(
+ key: string,
+ min: number,
+ max: number,
+ ): Promise {
+ return this.redis.zrangebyscore(key, min.toString(), max.toString());
+ }
+
+ /**
+ * Atomically consume a fixed-window counter via Lua.
+ *
+ * @param key - Storage key to consume.
+ * @param _limit - Limit (unused by the script).
+ * @param windowMs - Window size in milliseconds.
+ * @param _nowMs - Current time (unused by the script).
+ * @returns Fixed-window consume result.
+ */
+ async consumeFixedWindow(
+ key: string,
+ _limit: number,
+ windowMs: number,
+ _nowMs: number,
+ ): Promise {
+ const result = (await this.redis.eval(
+ FIXED_WINDOW_SCRIPT,
+ 1,
+ key,
+ windowMs.toString(),
+ )) as [number, number];
+
+ return {
+ count: Number(result[0]),
+ ttlMs: Number(result[1]),
+ };
+ }
+
+ /**
+ * Atomically consume a sliding-window log via Lua.
+ *
+ * @param key - Storage key to consume.
+ * @param limit - Request limit for the window.
+ * @param windowMs - Window size in milliseconds.
+ * @param nowMs - Current timestamp in milliseconds.
+ * @param member - Member identifier for this request.
+ * @returns Sliding-window consume result.
+ */
+ async consumeSlidingWindowLog(
+ key: string,
+ limit: number,
+ windowMs: number,
+ nowMs: number,
+ member: string,
+ ): Promise {
+ const result = (await this.redis.eval(
+ SLIDING_WINDOW_SCRIPT,
+ 1,
+ key,
+ limit.toString(),
+ windowMs.toString(),
+ nowMs.toString(),
+ member,
+ )) as [number, number, number];
+
+ return {
+ allowed: Number(result[0]) === 1,
+ count: Number(result[1]),
+ resetAt: Number(result[2]),
+ };
+ }
+
+ /**
+ * Delete keys with the given prefix.
+ *
+ * @param prefix - Prefix to match.
+ * @returns Resolves after matching keys are deleted.
+ */
+ async deleteByPrefix(prefix: string): Promise {
+ await this.deleteByPattern(`${prefix}*`);
+ }
+
+ /**
+ * Delete keys matching a glob pattern using SCAN to avoid blocking Redis.
+ *
+ * @param pattern - Glob pattern to match keys against.
+ * @returns Resolves after matching keys are deleted.
+ */
+ async deleteByPattern(pattern: string): Promise {
+ let cursor = '0';
+ do {
+ const [nextCursor, keys] = (await this.redis.scan(
+ cursor,
+ 'MATCH',
+ pattern,
+ 'COUNT',
+ '100',
+ )) as [string, string[]];
+
+ if (keys.length) {
+ await this.redis.del(...keys);
+ }
+ cursor = nextCursor;
+ } while (cursor !== '0');
+ }
+
+ /**
+ * List keys that match a prefix using SCAN.
+ *
+ * @param prefix - Prefix to match.
+ * @returns Matching keys.
+ */
+ async keysByPrefix(prefix: string): Promise {
+ const pattern = `${prefix}*`;
+ const collected = new Set();
+ let cursor = '0';
+ do {
+ const [nextCursor, keys] = (await this.redis.scan(
+ cursor,
+ 'MATCH',
+ pattern,
+ 'COUNT',
+ '100',
+ )) as [string, string[]];
+
+ for (const key of keys) {
+ collected.add(key);
+ }
+ cursor = nextCursor;
+ } while (cursor !== '0');
+
+ return Array.from(collected);
+ }
+}
diff --git a/packages/ratelimit/src/types.ts b/packages/ratelimit/src/types.ts
new file mode 100644
index 00000000..cf42971f
--- /dev/null
+++ b/packages/ratelimit/src/types.ts
@@ -0,0 +1,371 @@
+/**
+ * Rate limit type contracts.
+ *
+ * Shared config and result shapes for the plugin, engine, storage, and helpers.
+ * Keeping them in one place reduces drift between runtime behavior and docs.
+ */
+
+import type { Interaction, Message } from 'discord.js';
+import type { Context } from 'commandkit';
+import type { LoadedCommand } from 'commandkit';
+
+/**
+ * Scopes used to build rate limit keys and apply per-scope limits.
+ */
+export const RATE_LIMIT_SCOPES = [
+ 'user',
+ 'guild',
+ 'channel',
+ 'global',
+ 'user-guild',
+ 'custom',
+] as const;
+
+/**
+ * Literal union of supported key scopes.
+ */
+export type RateLimitScope = (typeof RATE_LIMIT_SCOPES)[number];
+
+/**
+ * Scopes eligible for temporary exemptions stored in rate limit storage.
+ */
+export const RATE_LIMIT_EXEMPTION_SCOPES = [
+ 'user',
+ 'guild',
+ 'role',
+ 'channel',
+ 'category',
+] as const;
+
+/**
+ * Literal union of exemption scopes.
+ */
+export type RateLimitExemptionScope =
+ (typeof RATE_LIMIT_EXEMPTION_SCOPES)[number];
+
+/**
+ * Algorithm identifiers used to select the limiter implementation.
+ */
+export const RATE_LIMIT_ALGORITHMS = [
+ 'fixed-window',
+ 'sliding-window',
+ 'token-bucket',
+ 'leaky-bucket',
+] as const;
+
+/**
+ * Literal union of algorithm identifiers.
+ */
+export type RateLimitAlgorithmType = (typeof RATE_LIMIT_ALGORITHMS)[number];
+
+/**
+ * Duration input accepted by configs: milliseconds or a duration string.
+ */
+export type DurationLike = number | string;
+
+/**
+ * Queue behavior for delayed retries after a limit is hit.
+ */
+export interface RateLimitQueueOptions {
+ enabled?: boolean;
+ maxSize?: number;
+ timeout?: DurationLike;
+ deferInteraction?: boolean;
+ ephemeral?: boolean;
+ concurrency?: number;
+}
+
+/**
+ * Strategy for choosing among matching role-based overrides.
+ */
+export type RateLimitRoleLimitStrategy = 'highest' | 'lowest' | 'first';
+
+/**
+ * Result for a single limiter/window evaluation used for aggregation.
+ */
+export interface RateLimitResult {
+ key: string;
+ scope: RateLimitScope;
+ algorithm: RateLimitAlgorithmType;
+ windowId?: string;
+ limited: boolean;
+ remaining: number;
+ resetAt: number;
+ retryAfter: number;
+ limit: number;
+}
+
+/**
+ * Contract for rate limit algorithms used by the engine.
+ */
+export interface RateLimitAlgorithm {
+ readonly type: RateLimitAlgorithmType;
+ consume(key: string): Promise;
+ reset(key: string): Promise