diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index e19724a..8a502b3 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -25,6 +25,28 @@ jobs: - name: Lint run: pnpm lint + format: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Install pnpm + run: | + corepack enable + corepack prepare pnpm@latest --activate + + - name: Install dependencies + run: pnpm install + + - name: Format + run: pnpm format + typecheck: runs-on: ubuntu-latest diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..1c0197c --- /dev/null +++ b/.prettierignore @@ -0,0 +1,11 @@ +node_modules +build +dist +.git +.github +coverage +*.min.js +pnpm-lock.yaml +yarn.lock +package-lock.json +**/CHANGELOG.md diff --git a/README.md b/README.md index 5bf9f63..599c776 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,7 @@ The one-level mode is a standard caching method. Choose from a variety of driver In addition to this, you benefit from many features that allow you to efficiently manage your cache, such as **cache stampede protection**, **grace periods**, **timeouts**, **namespaces**, etc. ### Two-levels + For those looking to go further, you can use the two-levels caching system. Here's basically how it works: - **L1: Local Cache**: First level cache. Data is stored in memory with an LRU algorithm for quick access @@ -60,7 +61,6 @@ The major benefit of multi-tier caching, is that it allows for responses between In fact, it's a quite common pattern : to quote an example, it's [what Stackoverflow does](https://nickcraver.com/blog/2019/08/06/stack-overflow-how-we-do-app-caching/#layers-of-cache-at-stack-overflow). - To give some perspective, here's a simple benchmark that shows the difference between a simple distributed cache ( using Redis ) vs a multi-tier cache ( using Redis + In-memory cache ) : ![Redis vs Multi-tier caching](./assets/redis_vs_mtier.png) @@ -99,10 +99,10 @@ Allows associating a cache entry with one or more tags to simplify invalidation. await bento.getOrSet({ key: 'foo', factory: getFromDb(), - tags: ['tag-1', 'tag-2'] -}); + tags: ['tag-1', 'tag-2'], +}) -await bento.deleteByTag({ tags: ['tag-1'] }); +await bento.deleteByTag({ tags: ['tag-1'] }) ``` ### Namespaces @@ -157,7 +157,7 @@ You can pass a logger to Bentocache, and it will log everything that happens. Ca import { pino } from 'pino' const bento = new BentoCache({ - logger: pino() + logger: pino(), }) ``` diff --git a/benchmarks/README.md b/benchmarks/README.md index 35131c9..d2f87c8 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -3,7 +3,7 @@ > [!IMPORTANT] > The benchmarks are not meant to be a definitive proof of which library is the best. They are mainly here to see if we make any performance regressions. And also for fun. Do not take them too seriously. -At the time of writing, every librairies seems on pair with each other when using a single tier cache. The real differences come when using a two-tier cache, only CacheManager and Bentocache support this feature. +At the time of writing, every librairies seems on pair with each other when using a single tier cache. The real differences come when using a two-tier cache, only CacheManager and Bentocache support this feature. - `mtier_get_key` : Just get a key from the cache stack. diff --git a/compose.yml b/compose.yml index 308b087..9d4f3ba 100644 --- a/compose.yml +++ b/compose.yml @@ -2,12 +2,12 @@ services: redis: image: redis:6.2-alpine ports: - - "6379:6379" + - '6379:6379' valkey: image: valkey/valkey:8.1-alpine ports: - - "6380:6379" + - '6380:6379' valkey-cluster: profiles: @@ -17,7 +17,7 @@ services: - valkey-node-1 - valkey-node-2 - valkey-node-3 - entrypoint: ["/bin/sh", "-c"] + entrypoint: ['/bin/sh', '-c'] command: - | sleep 5 @@ -32,7 +32,7 @@ services: image: valkey/valkey:8.1-alpine command: valkey-server --cluster-enabled yes --cluster-config-file nodes.conf --cluster-node-timeout 5000 --appendonly yes --port 6379 --cluster-announce-ip valkey-node-1 ports: - - "7100:6379" + - '7100:6379' networks: - valkey-cluster @@ -42,7 +42,7 @@ services: image: valkey/valkey:8.1-alpine command: valkey-server --cluster-enabled yes --cluster-config-file nodes.conf --cluster-node-timeout 5000 --appendonly yes --port 6379 --cluster-announce-ip valkey-node-2 ports: - - "7101:6379" + - '7101:6379' networks: - valkey-cluster @@ -52,7 +52,7 @@ services: image: valkey/valkey:8.1-alpine command: valkey-server --cluster-enabled yes --cluster-config-file nodes.conf --cluster-node-timeout 5000 --appendonly yes --port 6379 --cluster-announce-ip valkey-node-3 ports: - - "7102:6379" + - '7102:6379' networks: - valkey-cluster @@ -66,19 +66,19 @@ services: - MASTERS=3 - SLAVES_PER_MASTER=0 ports: - - "7000:7000" - - "7001:7001" - - "7002:7002" + - '7000:7000' + - '7001:7001' + - '7002:7002' redis-insight: image: redis/redisinsight:latest ports: - - "5540:5540" + - '5540:5540' dynamodb: image: amazon/dynamodb-local ports: - - "8000:8000" + - '8000:8000' postgres: image: postgres:15-alpine @@ -87,7 +87,7 @@ services: POSTGRES_PASSWORD: postgres POSTGRES_DB: postgres ports: - - "5432:5432" + - '5432:5432' mysql: image: mysql:8.0 @@ -95,19 +95,19 @@ services: MYSQL_ROOT_PASSWORD: root MYSQL_DATABASE: mysql ports: - - "3306:3306" + - '3306:3306' lgtm: image: grafana/otel-lgtm:latest extra_hosts: - - "host.docker.internal:host-gateway" + - 'host.docker.internal:host-gateway' ports: - - "3001:3000" # Grafana - - "3100:3100" # Loki HTTP API - - "3200:3200" # Tempo HTTP API - - "4317:4317" # OTLP gRPC - - "4318:4318" # OTLP HTTP - - "9090:9090" # Prometheus + - '3001:3000' # Grafana + - '3100:3100' # Loki HTTP API + - '3200:3200' # Tempo HTTP API + - '4317:4317' # OTLP gRPC + - '4318:4318' # OTLP HTTP + - '9090:9090' # Prometheus environment: - GF_SECURITY_ADMIN_USER=admin - GF_SECURITY_ADMIN_PASSWORD=admin diff --git a/docker/prometheus.yml b/docker/prometheus.yml index 8cfc3ac..ce4c615 100644 --- a/docker/prometheus.yml +++ b/docker/prometheus.yml @@ -3,10 +3,10 @@ global: scrape_timeout: 10s evaluation_interval: 5s scrape_configs: -- job_name: playground-app - metrics_path: /metrics - scheme: https - static_configs: - - targets: ['employees-projectors-reason-finland.trycloudflare.com'] - labels: - service: 'app' + - job_name: playground-app + metrics_path: /metrics + scheme: https + static_configs: + - targets: ['employees-projectors-reason-finland.trycloudflare.com'] + labels: + service: 'app' diff --git a/docs/assets/app.css b/docs/assets/app.css index 3266c7d..02e974f 100644 --- a/docs/assets/app.css +++ b/docs/assets/app.css @@ -61,7 +61,8 @@ html.dark { margin: var(--prose-elements-margin) 0; } -.markdown .media_box figure, .markdown .media_box p { +.markdown .media_box figure, +.markdown .media_box p { margin: 0; } @@ -69,7 +70,6 @@ html.dark { margin: 0px; } - @media only screen and (min-width: 768px) { .header_container { background-color: var(--mauveA1); @@ -80,4 +80,3 @@ html.dark { .markdown .table_container { overflow-x: auto; } - diff --git a/docs/content/docs/adaptive_caching.md b/docs/content/docs/adaptive_caching.md index 435f335..836ed7b 100644 --- a/docs/content/docs/adaptive_caching.md +++ b/docs/content/docs/adaptive_caching.md @@ -15,7 +15,7 @@ const authToken = await bento.getOrSet({ const token = await fetchAccessToken() return token }, - ttl: '10m' + ttl: '10m', }) ``` @@ -30,35 +30,35 @@ This is where adaptive caching comes in. Instead of setting a fixed TTL, we can const authToken = await bento.getOrSet({ key: 'token', factory: async (options) => { - const token = await fetchAccessToken(); - options.setOptions({ ttl: token.expiresIn }); - return token; - } -}); + const token = await fetchAccessToken() + options.setOptions({ ttl: token.expiresIn }) + return token + }, +}) ``` And that's it! Now, the token will be removed from the cache when it expires, and a new one will be fetched. -There are other use cases for adaptive caching. For example, consider managing a news feed with BentoCache. You may want to cache the freshest articles for a short period of time and the older articles for a much longer period. +There are other use cases for adaptive caching. For example, consider managing a news feed with BentoCache. You may want to cache the freshest articles for a short period of time and the older articles for a much longer period. Because the freshest articles are more likely to change: they may have typos, require updates, etc., whereas the older articles are less likely to change and may not have been updated for years. Let's see how we can achieve this with BentoCache: ```ts -const namespace = bento.namespace('news'); +const namespace = bento.namespace('news') const news = await namespace.getOrSet({ key: newsId, factory: async (options) => { - const newsItem = await fetchNews(newsId); + const newsItem = await fetchNews(newsId) if (newsItem.hasBeenUpdatedRecently) { - options.setOptions({ ttl: '5m' }); + options.setOptions({ ttl: '5m' }) } else { - options.setOptions({ ttl: '2d' }); + options.setOptions({ ttl: '2d' }) } - return newsItem; - } -}); + return newsItem + }, +}) ``` diff --git a/docs/content/docs/cache_drivers.md b/docs/content/docs/cache_drivers.md index d51b45e..f1b8e71 100644 --- a/docs/content/docs/cache_drivers.md +++ b/docs/content/docs/cache_drivers.md @@ -11,6 +11,7 @@ Some options are common to all drivers. For more information about them, see the You will need to install `ioredis` to use this driver. The Redis driver can be used with many different providers: + - Upstash - Vercel KV - Valkey @@ -18,7 +19,6 @@ The Redis driver can be used with many different providers: - DragonFly - Redis Cluster - The driver uses the [ioredis](https://github.com/redis/ioredis) library under the hood. So all possible ioredis configurations are assignable when creating the bentocache driver. Feel free to look at their documentation for more details. ```ts @@ -28,10 +28,12 @@ import { redisDriver } from 'bentocache/drivers/redis' const bento = new BentoCache({ default: 'redis', stores: { - redis: bentostore().useL2Layer(redisDriver({ - connection: { host: '127.0.0.1', port: 6379 } - })) - } + redis: bentostore().useL2Layer( + redisDriver({ + connection: { host: '127.0.0.1', port: 6379 }, + }), + ), + }, }) ``` @@ -45,15 +47,17 @@ const ioredis = new Redis() const bento = new BentoCache({ default: 'redis', stores: { - redis: bentostore().useL2Layer(redisDriver({ - connection: ioredis - })) - } + redis: bentostore().useL2Layer( + redisDriver({ + connection: ioredis, + }), + ), + }, }) ``` | Option | Description | Default | -|--------------|-------------------------------------------------------------------------------|---------| +| ------------ | ----------------------------------------------------------------------------- | ------- | | `connection` | The connection options to use to connect to Redis or an instance of `ioredis` | N/A | ## Filesystem @@ -62,7 +66,7 @@ The filesystem driver will store your cache in a distributed way in several file ```ts import { BentoCache, bentostore } from 'bentocache' -import { fileDriver } from "bentocache/drivers/file"; +import { fileDriver } from 'bentocache/drivers/file' const bento = new BentoCache({ default: 'file', @@ -70,14 +74,15 @@ const bento = new BentoCache({ redis: bentostore().useL2Layer( fileDriver({ directory: './cache', - pruneInterval: '1h' - })) - } + pruneInterval: '1h', + }), + ), + }, }) ``` | Option | Description | Default | -|-----------------|--------------------------------------------------------------------------|---------| +| --------------- | ------------------------------------------------------------------------ | ------- | | `directory` | The directory where the cache files will be stored. | N/A | | `pruneInterval` | The interval in milliseconds to prune expired entries. false to disable. | false | @@ -91,7 +96,6 @@ The memory driver will store your cache directly in memory. Use [node-lru-cache](https://github.com/isaacs/node-lru-cache) under the hood. - ```ts import { BentoCache, bentostore } from 'bentocache' import { memoryDriver } from 'bentocache/drivers/memory' @@ -99,17 +103,19 @@ import { memoryDriver } from 'bentocache/drivers/memory' const bento = new BentoCache({ default: 'memory', stores: { - memory: bentostore().useL1Layer(memoryDriver({ - maxSize: '10mb', - maxEntrySize: '1mb', - maxItems: 1000 - })) - } + memory: bentostore().useL1Layer( + memoryDriver({ + maxSize: '10mb', + maxEntrySize: '1mb', + maxItems: 1000, + }), + ), + }, }) ``` | Option | Description | Default | -|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------|---------| +| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | | `maxSize` | The maximum size of the cache **in bytes**. | N/A | | `maxItems` | The maximum number of entries that the cache can contain. Note that fewer items may be stored if you are also using `maxSize` and the cache is full. | N/A | | `maxEntrySize` | The maximum size of a single entry in bytes. | N/A | @@ -121,6 +127,7 @@ const bento = new BentoCache({ By default, the data stored in the memory cache will always be serialized using `JSON.stringify` and `JSON.parse`. You can disable this feature by setting `serialize` to `false`. This allows for a much faster throughput but at the expense of: + - not being able to limit the size of the stored data, because we can't really know the size of an unserialized object. So if `maxSize` or `maxEntrySize` is set, it throws an error, but you still can use `maxItems` option. - **Having inconsistent return between the L1 and L2 cache**. The data stored in the L2 Cache will always be serialized because it passes over the network. Therefore, depending on whether the data is retrieved from the L1 and L2, we can have data that does not have the same form. For example, a Date instance will become a string if retrieved from the L2, but will remain a Date instance if retrieved from the L1. So, **you should put extra care when using this feature with an additional L2 cache**. @@ -137,35 +144,36 @@ import { dynamoDbDriver } from 'bentocache/drivers/dynamodb' const bento = new BentoCache({ default: 'dynamo', stores: { - dynamo: bentostore().useL2Layer(dynamoDbDriver({ - endpoint: '...', - region: 'eu-west-3', - table: { - name: 'cache' // Name of the table - }, - - // Credentials to use to connect to DynamoDB - credentials: { - accessKeyId: '...', - secretAccessKey: '...' - } - })) - } + dynamo: bentostore().useL2Layer( + dynamoDbDriver({ + endpoint: '...', + region: 'eu-west-3', + table: { + name: 'cache', // Name of the table + }, + + // Credentials to use to connect to DynamoDB + credentials: { + accessKeyId: '...', + secretAccessKey: '...', + }, + }), + ), + }, }) ``` -You will also need to create a DynamoDB table with a string partition key named `key`. You must create this table before starting to use the driver. +You will also need to create a DynamoDB table with a string partition key named `key`. You must create this table before starting to use the driver. **Make sure to also enable [Time To Live (TTL)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) on the table, on the `ttl` attribute. This will allow DynamoDB to automatically delete expired items.** | Option | Description | Default | -|---------------|-------------------------------------------------------------|---------| +| ------------- | ----------------------------------------------------------- | ------- | | `table.name` | The name of the table that will be used to store the cache. | `cache` | | `credentials` | The credentials to use to connect to DynamoDB. | N/A | | `endpoint` | The endpoint to use to connect to DynamoDB. | N/A | | `region` | The region to use to connect to DynamoDB. | N/A | - :::warning You should be careful with the `.clear()` function of the DynamoDB driver. We do not recommend using it. Dynamo does not offer a "native" `clear`, so we are forced to make several API calls to: retrieve the keys and delete them, 25 by 25 (max per `BatchWriteItemCommand`). @@ -185,7 +193,7 @@ Note that you can easily create your own adapter by implementing the `DatabaseAd All SQL drivers accept the following options: | Option | Description | Default | -|-------------------|------------------------------------------------------------------------------------|--------------| +| ----------------- | ---------------------------------------------------------------------------------- | ------------ | | `tableName` | The name of the table that will be used to store the cache. | `bentocache` | | `autoCreateTable` | If the cache table should be automatically created if it does not exist. | `true` | | `connection` | An instance of `knex` or `Kysely` based on the driver. | N/A | @@ -202,19 +210,19 @@ import { knexDriver } from 'bentocache/drivers/knex' const db = knex({ client: 'pg', - connection: { + connection: { port: 5432, - user: 'root', - password: 'root', - database: 'postgres', - } + user: 'root', + password: 'root', + database: 'postgres', + }, }) const bento = new BentoCache({ default: 'pg', stores: { - pg: bentostore().useL2Layer(knexDriver({ connection: db })) - } + pg: bentostore().useL2Layer(knexDriver({ connection: db })), + }, }) ``` @@ -234,8 +242,8 @@ const db = new Kysely({ dialect }) const bento = new BentoCache({ default: 'pg', stores: { - pg: bentostore().useL2Layer(kyselyStore({ connection: db })) - } + pg: bentostore().useL2Layer(kyselyStore({ connection: db })), + }, }) ``` @@ -250,12 +258,14 @@ import { createDb } from 'orchid-orm' import { BentoCache, bentostore } from 'bentocache' import { orchidDriver } from 'bentocache/drivers/orchid' -export const db = createDb({ databaseURL: `postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}` }) +export const db = createDb({ + databaseURL: `postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}`, +}) export const bento = new BentoCache({ default: 'cache', stores: { - cache: bentostore().useL2Layer(orchidDriver({ connection: db })) - } + cache: bentostore().useL2Layer(orchidDriver({ connection: db })), + }, }) ``` diff --git a/docs/content/docs/digging_deeper/logging.md b/docs/content/docs/digging_deeper/logging.md index c197bf2..a64b340 100644 --- a/docs/content/docs/digging_deeper/logging.md +++ b/docs/content/docs/digging_deeper/logging.md @@ -10,25 +10,25 @@ Your logger must comply with the following interface: ```ts export interface Logger { - trace(msg: string | LogObject): void; - trace(obj: LogObject, msg: string): void; + trace(msg: string | LogObject): void + trace(obj: LogObject, msg: string): void - debug(msg: string | LogObject): void; - debug(obj: LogObject, msg: string): void; + debug(msg: string | LogObject): void + debug(obj: LogObject, msg: string): void - info(msg: string | LogObject): void; - info(obj: LogObject, msg: string): void; + info(msg: string | LogObject): void + info(obj: LogObject, msg: string): void - warn(msg: string): void; - warn(obj: LogObject, msg: string): void; + warn(msg: string): void + warn(obj: LogObject, msg: string): void - error(msg: string): void; - error(obj: ErrorObject, msg: string): void; + error(msg: string): void + error(obj: ErrorObject, msg: string): void - fatal(msg: string): void; - fatal(obj: ErrorObject, msg: string): void; + fatal(msg: string): void + fatal(obj: ErrorObject, msg: string): void - child(childObj: LogObject): Logger; + child(childObj: LogObject): Logger } ``` @@ -41,7 +41,7 @@ import { pino } from 'pino' const logger = pino({ level: 'trace', - transport: { target: 'pino-pretty' } + transport: { target: 'pino-pretty' }, }) const bento = new BentoCache({ diff --git a/docs/content/docs/extend/create_plugin.md b/docs/content/docs/extend/create_plugin.md index 9e20d16..8dd278b 100644 --- a/docs/content/docs/extend/create_plugin.md +++ b/docs/content/docs/extend/create_plugin.md @@ -4,32 +4,32 @@ summary: Learn how to create a plugin for Bentocache # Create a plugin -BentoCache allows you to register plugins to extend its functionalities. Creating a plugin is as simple as creating a function that returns an object with the `register` method : +BentoCache allows you to register plugins to extend its functionalities. Creating a plugin is as simple as creating a function that returns an object with the `register` method : ```ts import type { BentoCachePlugin } from 'bentocache/types' export function myBentoCachePlugin(): BentoCachePlugin { return { - register(bentocache) { - // And here you can do whatever you - // want with the bentocache instance - bentocache.on('cache:miss', doSomething) - bentocache.on('cache:hit', doSomething) - } - } + register(bentocache) { + // And here you can do whatever you + // want with the bentocache instance + bentocache.on('cache:miss', doSomething) + bentocache.on('cache:hit', doSomething) + }, + } } ``` The `register` method will be called internally by BentoCache when starting the application. -Then, you can register your plugin when creating the BentoCache instance : +Then, you can register your plugin when creating the BentoCache instance : ```ts import { BentoCache } from 'bentocache' const bentocache = new BentoCache({ - plugins: [myBentoCachePlugin()] + plugins: [myBentoCachePlugin()], }) ``` diff --git a/docs/content/docs/extend/custom_bus_driver.md b/docs/content/docs/extend/custom_bus_driver.md index 0a91a1b..9457b81 100644 --- a/docs/content/docs/extend/custom_bus_driver.md +++ b/docs/content/docs/extend/custom_bus_driver.md @@ -20,12 +20,12 @@ export interface BusDriver { Feel free to take inspirations from the existing drivers to create your own driver. -## Methods +## Methods ### publish This method will be called internally by BentoCache when you call some of the core methods. -First argument is the channel name, second argument is the message to publish. +First argument is the channel name, second argument is the message to publish. ### subscribe diff --git a/docs/content/docs/extend/custom_cache_driver.md b/docs/content/docs/extend/custom_cache_driver.md index 38c1143..1d6c2a5 100644 --- a/docs/content/docs/extend/custom_cache_driver.md +++ b/docs/content/docs/extend/custom_cache_driver.md @@ -32,7 +32,6 @@ interface L2CacheDriver { */ set(key: string, value: string, ttl?: number): Promise - /** * Remove all items from the cache */ @@ -57,9 +56,9 @@ interface L2CacheDriver { } ``` -Similarly, the `L1CacheDriver` interface is the same, except that it is not async. +Similarly, the `L1CacheDriver` interface is the same, except that it is not async. -So this should be quite easy to implement. Feel free to take a lot at [the existing drivers](https://github.com/Julien-R44/bentocache/tree/main/packages/bentocache/src/drivers) implementations for inspiration. +So this should be quite easy to implement. Feel free to take a lot at [the existing drivers](https://github.com/Julien-R44/bentocache/tree/main/packages/bentocache/src/drivers) implementations for inspiration. Also note that your driver will receive two additional parameters in the constructor : `ttl` and `prefix`. These parameters are common to every driver and their purpose is explained in the [options](../options.md) page. @@ -71,7 +70,7 @@ import type { CreateDriverResult } from 'bentocache/types' export function myDriver(options: MyDriverOptions): CreateDriverResult { return { options, - factory: (config: MyDriverOptions) => new MyDriver(config) + factory: (config: MyDriverOptions) => new MyDriver(config), } } ``` @@ -84,9 +83,12 @@ import { BentoCache, bentostore } from 'bentocache' const bento = new BentoCache({ default: 'myStore', stores: { - myStore: bentostore() - .useL2Layer(myDriver({ /* Your driver options */ })) - } + myStore: bentostore().useL2Layer( + myDriver({ + /* Your driver options */ + }), + ), + }, }) ``` @@ -178,6 +180,7 @@ export function prismaDriver(options: PrismaOptions): CreateDriverResult { registerCacheDriverTestSuite({ test, group, - createDriver: (options) => new MyDriver({ - myOption: 'myValue', - ...options - }), + createDriver: (options) => + new MyDriver({ + myOption: 'myValue', + ...options, + }), }) }) ``` diff --git a/docs/content/docs/grace_periods.md b/docs/content/docs/grace_periods.md index 2b73955..a7d565f 100644 --- a/docs/content/docs/grace_periods.md +++ b/docs/content/docs/grace_periods.md @@ -1,5 +1,5 @@ --- -summary: "Explore the concept of grace periods in BentoCache: extending the life of cached data beyond its TTL for enhanced resilience. Understand how it improves user experience during downtimes and ensures continuous data access." +summary: 'Explore the concept of grace periods in BentoCache: extending the life of cached data beyond its TTL for enhanced resilience. Understand how it improves user experience during downtimes and ensures continuous data access.' --- # Grace periods @@ -45,7 +45,7 @@ bento.getOrSet({ - First time this code is executed, user will be fetched from database, stored in cache for **10 minutes** with a grace period of **6 hours**. - **11 minutes later**, someone request the same user. The cache entry is logically expired, but the grace period is still valid. -- So, we try to call the factory again to refresh the cache entry. But oops, **the database is down** ( or factory is failing for any other reasons ). +- So, we try to call the factory again to refresh the cache entry. But oops, **the database is down** ( or factory is failing for any other reasons ). - Since we are still in the grace period of 6h, we will serve the stale data from the cache. As a result, instead of displaying an error page to the user, we are serving data that's a little out of date. Depending on your use case, this can result in a much better user experience. diff --git a/docs/content/docs/introduction.md b/docs/content/docs/introduction.md index cd97f3a..312521b 100644 --- a/docs/content/docs/introduction.md +++ b/docs/content/docs/introduction.md @@ -42,6 +42,7 @@ The one-level mode is a standard caching method. Choose from a variety of driver In addition to this, you benefit from many features that allow you to efficiently manage your cache, such as **cache stampede protection**, **grace periods**, **timeouts**, **namespaces**, etc. ### Two-levels + For those looking to go further, you can use the two-levels caching system. Here's basically how it works: - **L1: Local Cache**: First level cache. Data is stored in memory with an LRU algorithm for quick access @@ -58,7 +59,6 @@ The major benefit of multi-tier caching is that it allows for responses between In fact, it's a quite common pattern : to quote an example, it's [what Stackoverflow does](https://nickcraver.com/blog/2019/08/06/stack-overflow-how-we-do-app-caching/#layers-of-cache-at-stack-overflow). - To give some perspective, here's a simple benchmark that shows the difference between a simple distributed cache ( using Redis ) vs a multi-tier cache ( using Redis + In-memory cache ) : ```ts @@ -72,7 +72,6 @@ benchmark So a pretty huge difference. - ## Features Below is a list of the main features of BentoCache. If you want to know more, you can read each associated documentation page. @@ -91,7 +90,6 @@ See the [drivers documentation](./cache_drivers.md) for list of available driver Only a Redis driver for the bus is currently available. We probably have drivers for other backends like Zookeeper, Kafka, RabbitMQ... Let us know with an issue if you are interested in this. ::: --> - ### Resiliency - [Grace period](./grace_periods.md): Keep your application running smoothly with the ability to temporarily use expired cache entries when your database is down, or when a factory is failing. @@ -112,10 +110,10 @@ Allows associating a cache entry with one or more tags to simplify invalidation. await bento.getOrSet({ key: 'foo', factory: getFromDb(), - tags: ['tag-1', 'tag-2'] -}); + tags: ['tag-1', 'tag-2'], +}) -await bento.deleteByTag({ tags: ['tag-1'] }); +await bento.deleteByTag({ tags: ['tag-1'] }) ``` ### Namespaces @@ -176,7 +174,7 @@ You can pass a logger to Bentocache, and it will log everything that happens. Ca import { pino } from 'pino' const bento = new BentoCache({ - logger: pino() + logger: pino(), }) ``` diff --git a/docs/content/docs/methods.md b/docs/content/docs/methods.md index 5512292..1ccb6b4 100644 --- a/docs/content/docs/methods.md +++ b/docs/content/docs/methods.md @@ -1,5 +1,5 @@ --- -summary: "Comprehensive list of all methods available when using BentoCache" +summary: 'Comprehensive list of all methods available when using BentoCache' --- # Methods @@ -11,20 +11,20 @@ Below is a list of all the methods available when using BentoCache. Returns a new instance of the driver namespace. See [Namespaces](./namespaces.md) for more information. ```ts -const usersNamespace = bento.namespace('users'); +const usersNamespace = bento.namespace('users') -usersNamespace.set('1', { name: 'John' }); -usersNamespace.set('2', { name: 'Jane' }); -usersNamespace.set('3', { name: 'Doe' }); +usersNamespace.set('1', { name: 'John' }) +usersNamespace.set('2', { name: 'Jane' }) +usersNamespace.set('3', { name: 'Doe' }) -usersNamespace.clear(); +usersNamespace.clear() ``` -## get +## get `get` allows you to retrieve a value from the cache. It returns `undefined` if the key does not exist. -#### get(options: GetPojoOptions) +#### get(options: GetPojoOptions) Returns the value of the key, or `undefined` if the key does not exist. @@ -32,7 +32,7 @@ Returns the value of the key, or `undefined` if the key does not exist. const products = await bento.get({ key: 'products', defaultValue: [], -}); +}) ``` ## set @@ -69,7 +69,7 @@ It will try to get the value in the cache. If it exists, it will return it. If i // basic usage const products = await bento.getOrSet({ key: 'products', - factory: () => fetchProducts() + factory: () => fetchProducts(), }) // with options @@ -98,7 +98,7 @@ cache.getOrSet({ } return item - } + }, }) ``` @@ -120,7 +120,7 @@ cache.getOrSet({ } return item - } + }, }) ``` @@ -128,7 +128,6 @@ cache.getOrSet({ `setOptions` allows you to update the options of the cache entry. This is useful when you want to update the TTL, grace period, or tags and when it depends on the value itself. - ```ts const products = await bento.getOrSet({ key: 'token', @@ -141,11 +140,10 @@ const products = await bento.getOrSet({ }) return token - } + }, }) ``` - Auth tokens are a perfect example of this use case. The cached token should expire when the token itself expires. And we know the expiration time only after fetching the token. See [Adaptive Caching docs](./adaptive_caching.md) for more information. ### ctx.gracedEntry @@ -161,7 +159,7 @@ const products = await bento.getOrSet({ } return 'bar' - } + }, }) ``` @@ -218,10 +216,10 @@ When we delete a key, it is completely removed and forgotten. This means that ev ```ts // Set a value with a grace period of 6 minutes -await cache.set({ +await cache.set({ key: 'hello', value: 'world', - grace: '6m' + grace: '6m', }) // Expire the value. It is kept in the cache but marked as STALE for 6 minutes @@ -247,7 +245,7 @@ await bento.deleteMany({ keys: ['products', 'users'] }) Clear the cache. This will delete all the keys in the cache if called from the "root" instance. If called from a namespace, it will only delete the keys in that namespace. ```ts -await bento.clear(); +await bento.clear() ``` ## prune @@ -255,7 +253,7 @@ await bento.clear(); Prunes the cache by removing expired entries. This is useful for drivers that do not have native TTL support, such as File and Database drivers. On drivers with native TTL support, this is typically a noop. ```ts -await bento.prune(); +await bento.prune() ``` ## disconnect @@ -263,5 +261,5 @@ await bento.prune(); Disconnect from the cache. This will close the connection to the cache server, if applicable. ```ts -await bento.disconnect(); +await bento.disconnect() ``` diff --git a/docs/content/docs/multi_tier.md b/docs/content/docs/multi_tier.md index b91147a..3888da4 100644 --- a/docs/content/docs/multi_tier.md +++ b/docs/content/docs/multi_tier.md @@ -1,10 +1,10 @@ --- -summary: "Discover how to use multi-tier Caching with Bentocache: combines in-memory and distributed caches for optimal performance." +summary: 'Discover how to use multi-tier Caching with Bentocache: combines in-memory and distributed caches for optimal performance.' --- # Multi Tier -A multi-tier caching system can be very useful when you want to boost even more the performance of you caching strategy. +A multi-tier caching system can be very useful when you want to boost even more the performance of you caching strategy. To do that, we generally use a in-memory cache as the first level cache, and a distributed cache as the second level cache. In-memory cache is really fast, but it is limited by the amount of memory available on your server. Distributed cache is slower, but can store a lot more data, and is shared between your different instances. @@ -23,7 +23,7 @@ If your application is running on a single instance, you don't need to bother wi ```ts import { BentoCache, bentostore } from 'bentocache' import { memoryDriver } from 'bentocache/drivers/memory' -import { redisDriver,redisBusDriver } from 'bentocache/drivers/redis' +import { redisDriver, redisBusDriver } from 'bentocache/drivers/redis' const redisConnection = { host: 'localhost', port: 6379 } const bento = new BentoCache({ @@ -31,7 +31,7 @@ const bento = new BentoCache({ stores: { multitier: bentostore() - // Your L1 Cache. Here, an in-memory cache with + // Your L1 Cache. Here, an in-memory cache with // a maximum size of 10Mb .useL1Layer(memoryDriver({ maxSize: '10mb' })) // Your L2 Cache. Here, a Redis cache @@ -39,11 +39,12 @@ const bento = new BentoCache({ // Finally, the bus to synchronize the L1 caches between // the different instances of your application .useBus(redisBusDriver({ connection: redisConnection })), - } + }, }) ``` So here, We have defined a multi-tier cache with : + - L1: An in-memory cache with a maximum size of 10Mb. After that, the LRU algorithm will be used to remove the least recently used items. - L2: A distributed cache using Redis. - And a Redis bus to synchronize the in-memory caches between the different instances of your application. The redis bus leverage Redis Pub/Sub system to send messages between instances. @@ -71,9 +72,9 @@ See the problem ? That's why the bus is needed. ### How the bus works -The bus is, as the name suggests, just a bus and messaging system. With the `redisBusDriver` we are leveraging Redis Pub/Sub system to send messages between instances. +The bus is, as the name suggests, just a bus and messaging system. With the `redisBusDriver` we are leveraging Redis Pub/Sub system to send messages between instances. -Every time a key is invalidated or updated, the instance will notify other ones by sending a message saying "Hey, this key has been invalidated, you should delete it from your cache". Note that we are not sending the new value to other instances, for multiple reasons : +Every time a key is invalidated or updated, the instance will notify other ones by sending a message saying "Hey, this key has been invalidated, you should delete it from your cache". Note that we are not sending the new value to other instances, for multiple reasons : - Maybe the other instance will never need this key. So let's not waste memory space on this instance. It will fetch the value from the distributed cache if needed. - We also save network bandwidth and not overload the bus with serialized data of the value. @@ -82,7 +83,7 @@ Bus messages are also encoded using a custom binary format instead of plain JSON ### Retry queue strategy -The bus also has a retry queue strategy. If an instance fails to publish a message through the bus, it will be added to a retry queue. As soon as we can publish messages again, we will try to process that queue and send the messages. +The bus also has a retry queue strategy. If an instance fails to publish a message through the bus, it will be added to a retry queue. As soon as we can publish messages again, we will try to process that queue and send the messages. This can be configured through the `redisBusDriver` options as follow : @@ -90,8 +91,8 @@ This can be configured through the `redisBusDriver` options as follow : redisBusDriver({ retryQueue: { enabled: true, - maxSize: undefined - } + maxSize: undefined, + }, }) ``` diff --git a/docs/content/docs/named_caches.md b/docs/content/docs/named_caches.md index 926ffd5..f8033ce 100644 --- a/docs/content/docs/named_caches.md +++ b/docs/content/docs/named_caches.md @@ -1,5 +1,5 @@ --- -summary: "Discover BentoCache named caches feature. Learn how to define multiple cache stores in your application and use them distinctly" +summary: 'Discover BentoCache named caches feature. Learn how to define multiple cache stores in your application and use them distinctly' --- # Named Caches @@ -11,8 +11,7 @@ const bento = new BentoCache({ default: 'memory', stores: { // One store named "memory". Only L1 in-memory cache - memory: bentostore() - .useL1Layer(memoryDriver({ maxSize: '10mb' })), + memory: bentostore().useL1Layer(memoryDriver({ maxSize: '10mb' })), // One store named "multitier" using full multi-tier cache multitier: bentostore() @@ -21,8 +20,11 @@ const bento = new BentoCache({ .useBus(redisBusDriver({ connection: redisConnection })), // One store named "dynamo" using the dynamodb driver - dynamo: bentostore() - .useL2Layer(dynamodbDriver({ /* ... */ })), + dynamo: bentostore().useL2Layer( + dynamodbDriver({ + /* ... */ + }), + ), }, }) ``` @@ -57,11 +59,9 @@ In some cases, you may want to define two named caches that use the same backend const bento = new BentoCache({ default: 'users', stores: { - users: bentostore() - .useL2Layer(redisDriver({ prefix: 'users' })), + users: bentostore().useL2Layer(redisDriver({ prefix: 'users' })), - posts: bentostore() - .useL2Layer(redisDriver({ prefix: 'posts' })) + posts: bentostore().useL2Layer(redisDriver({ prefix: 'posts' })), }, }) ``` diff --git a/docs/content/docs/namespaces.md b/docs/content/docs/namespaces.md index efd667c..128e616 100644 --- a/docs/content/docs/namespaces.md +++ b/docs/content/docs/namespaces.md @@ -8,17 +8,17 @@ Namespaces are a way to organize your cache entries by grouping them into some c Basically, what BentoCache does internally is it prefixes all the keys with the namespace you provide. -Let's take an example. +Let's take an example. ```ts -const usersNamespace = bento.namespace('users'); +const usersNamespace = bento.namespace('users') bento.set('products', products) -usersNamespace.set({ key: '1', value: { name: 'John' } }); -usersNamespace.set({ key: '2', value: { name: 'Jane' } }); -usersNamespace.set({ key: '3', value: { name: 'Doe' } }); +usersNamespace.set({ key: '1', value: { name: 'John' } }) +usersNamespace.set({ key: '2', value: { name: 'Jane' } }) +usersNamespace.set({ key: '3', value: { name: 'Doe' } }) -usersNamespace.clear(); +usersNamespace.clear() ``` Here, the `bento.namespace('users')` call will return a new instance of the driver. Further calls to methods via this instance will basically automatically prefix the keys with `users:`. diff --git a/docs/content/docs/options.md b/docs/content/docs/options.md index d9f7abe..02fd765 100644 --- a/docs/content/docs/options.md +++ b/docs/content/docs/options.md @@ -22,8 +22,8 @@ const bento = new BentoCache({ // Store level 👇 ttl: '30m', grace: false, - }) - } + }), + }, }) bento.getOrSet({ @@ -104,6 +104,7 @@ Levels: `global`, `store`, `operation` A duration to define a hard [timeout](./timeouts.md#hard-timeouts). ### `forceFresh` + Default: `false` Levels: `operation` @@ -117,7 +118,7 @@ Default: `undefined` Levels: `global`, `store`, `operation` -The maximum amount of time (in milliseconds) that the in-memory lock for [stampeded protection](./stampede_protection.md) can be held. If the lock is not released before this timeout, it will be released automatically. +The maximum amount of time (in milliseconds) that the in-memory lock for [stampeded protection](./stampede_protection.md) can be held. If the lock is not released before this timeout, it will be released automatically. This is usually not needed, but can provide an extra layer of protection against theoretical deadlocks. @@ -146,9 +147,9 @@ Default: `undefined (disabled)` Levels: `global`, `store`, `operation` -This option allows you to enable a simple circuit breaker system for the L2 Cache. If defined, the circuit breaker will open when a call to our distributed cache fails. It will stay open for `l2CircuitBreakerDuration` seconds. +This option allows you to enable a simple circuit breaker system for the L2 Cache. If defined, the circuit breaker will open when a call to our distributed cache fails. It will stay open for `l2CircuitBreakerDuration` seconds. -If you're not familiar with the circuit breaker system, to summarize it very simply: if an operation on the L2 Cache fails and the circuit breaker option is activated, then all future operations on the L2 Cache will be rejected for `l2CircuitBreakerDuration` seconds, in order to avoid overloading the L2 Cache with operations that are likely to fail. +If you're not familiar with the circuit breaker system, to summarize it very simply: if an operation on the L2 Cache fails and the circuit breaker option is activated, then all future operations on the L2 Cache will be rejected for `l2CircuitBreakerDuration` seconds, in order to avoid overloading the L2 Cache with operations that are likely to fail. Once the `l2CircuitBreakerDuration` seconds have passed, the circuit breaker closes and operations on the L2 Cache can resume. @@ -180,8 +181,8 @@ import superjson from 'superjson' const bento = new BentoCache({ serializer: { serialize: superjson.stringify, - deserialize: superjson.parse - } + deserialize: superjson.parse, + }, }) ``` diff --git a/docs/content/docs/plugins/prometheus.md b/docs/content/docs/plugins/prometheus.md index de6409d..3c0db8d 100644 --- a/docs/content/docs/plugins/prometheus.md +++ b/docs/content/docs/plugins/prometheus.md @@ -51,9 +51,7 @@ An array of `[RegExp, ((match: RegExpMatchArray) => string) | string]` tuples. T ```ts prometheusPlugin({ - keyGroups: [ - [/^users:(\d+)$/, 'users:*'], - ] + keyGroups: [[/^users:(\d+)$/, 'users:*']], }) ``` @@ -132,5 +130,4 @@ Counter. The number of messages received from the bus. No labels. We have a pretty basic but ready-to-use Grafana dashboard for Bentocache. You can find it [here](https://github.com/Julien-R44/bentocache/blob/main/packages/prometheus/dashboards/basic.json). - Happy to accept any PRs that improve it! diff --git a/docs/content/docs/quick_setup.md b/docs/content/docs/quick_setup.md index da08824..58e792d 100644 --- a/docs/content/docs/quick_setup.md +++ b/docs/content/docs/quick_setup.md @@ -7,6 +7,7 @@ summary: Setup Bentocache in your application You can install Bentocache via your favorite package manager. :::codegroup + ```sh // title: npm npm i bentocache @@ -21,8 +22,8 @@ pnpm add bentocache // title: yarn yarn add bentocache ``` -::: +::: ## Setup @@ -36,19 +37,20 @@ import { redisDriver } from 'bentocache/drivers/redis' const bento = new BentoCache({ default: 'myCache', stores: { - // A first cache store named "myCache" using + // A first cache store named "myCache" using // only L1 in-memory cache - myCache: bentostore() - .useL1Layer(memoryDriver({ maxSize: '10mb' })), + myCache: bentostore().useL1Layer(memoryDriver({ maxSize: '10mb' })), // A second cache store named "multitier" using // a in-memory cache as L1 and a Redis cache as L2 multitier: bentostore() .useL1Layer(memoryDriver({ maxSize: '10mb' })) - .useL2Layer(redisDriver({ - connection: { host: '127.0.0.1', port: 6379 } - })) - } + .useL2Layer( + redisDriver({ + connection: { host: '127.0.0.1', port: 6379 }, + }), + ), + }, }) ``` @@ -71,23 +73,29 @@ const bento = new BentoCache({ stores: { cache: bentostore() .useL1Layer(memoryDriver({ maxSize: '10mb' })) - .useL2Layer(redisDriver({ /* ... */ })) - .useBus(redisBusDriver({ /* ... */ })) + .useL2Layer( + redisDriver({ + /* ... */ + }), + ) + .useBus( + redisBusDriver({ + /* ... */ + }), + ), }, }) await bento.set({ key: 'user:42', value: { name: 'jul' } }) -console.log( - await bento.get({ key: 'user:42' }) -) +console.log(await bento.get({ key: 'user:42' })) ``` With this setup, your in-memory cache will serve as the first level ( L1 ) cache. If an item is stored in the in-memory cache, Bentocache will not fetch it from Redis, allowing for huge speed gains. In a multi-instance application, your different in-memory caches will be synchronized using the bus you have configured. This way, if an instance updates an item in the cache, the other instances will be notified and will update their local cache. -If you are running your application on a single instance, you don't need to bother with the bus. +If you are running your application on a single instance, you don't need to bother with the bus. More information on the [multi-tier here](./multi_tier.md) @@ -117,13 +125,13 @@ export default class UsersController { } ``` -Multiple things to note here : +Multiple things to note here : - We are using a namespace. Namespaces are a way to group keys together. In this case, we are grouping all the users in a namespace called `users`. This will allow us to easily invalidate all the users at once later. -- We are using the `getOrSet` method. This method will first try to fetch the user from the cache. If it is not found, it will execute the *factory* and store the result in the cache for 5 minutes. +- We are using the `getOrSet` method. This method will first try to fetch the user from the cache. If it is not found, it will execute the _factory_ and store the result in the cache for 5 minutes. - The Factory here is just retrieving the user from the database. -So first time this endpoint is called, it will fetch the user from the database, then store it in the cache. Next time the endpoint is called, it will retrieve the user from the cache. +So first time this endpoint is called, it will fetch the user from the database, then store it in the cache. Next time the endpoint is called, it will retrieve the user from the cache. ### Invalidating the cache @@ -160,5 +168,5 @@ export default class UsersController { } ``` -As simple as that, we just need to call the `delete` method on the namespace, passing the key we want to delete. +As simple as that, we just need to call the `delete` method on the namespace, passing the key we want to delete. Note that if you are using a multi-tier setup, the `delete` call will notify the other instances to delete the key from their local cache as well. diff --git a/docs/content/docs/stampede_protection.md b/docs/content/docs/stampede_protection.md index 8623feb..35986f7 100644 --- a/docs/content/docs/stampede_protection.md +++ b/docs/content/docs/stampede_protection.md @@ -2,7 +2,6 @@ summary: How BentoCache protects you from cache stampede and how it works --- - # Stampede Protection To begin with, what is a [Cache Stampede](https://en.wikipedia.org/wiki/Cache_stampede)? @@ -14,13 +13,13 @@ Imagine this simple route that allows retrieving a post by its ID. ```ts router.get('/posts/:id', async (request) => { const { id } = request.params - + const post = await bento.getOrSet({ - key: `post:${id}`, + key: `post:${id}`, factory: () => getPostFromDb(id), ttl: '1h', }) - + return user }) ``` diff --git a/docs/content/docs/tags.md b/docs/content/docs/tags.md index e5c507e..eca05fa 100644 --- a/docs/content/docs/tags.md +++ b/docs/content/docs/tags.md @@ -12,16 +12,16 @@ Tagging allows associating a cache entry with one or more tags to simplify inval await bento.getOrSet({ key: 'foo', factory: getFromDb(), - tags: ['tag-1', 'tag-2'] -}); + tags: ['tag-1', 'tag-2'], +}) -await bento.set({ key: 'foo', tags: ['tag-1'] }); +await bento.set({ key: 'foo', tags: ['tag-1'] }) ``` To invalidate all entries linked to a tag: ```ts -await bento.deleteByTag({ tags: ['tag-1'] }); +await bento.deleteByTag({ tags: ['tag-1'] }) ``` Now, imagine that the tags depend on the cached value itself. In that case, you can use [adaptive caching](./adaptive_caching.md) to update tags dynamically based on the computed value. @@ -30,14 +30,13 @@ Now, imagine that the tags depend on the cached value itself. In that case, you const product = await bento.getOrSet({ key: `product:${id}`, factory: async (ctx) => { - const product = await fetchProduct(id); - ctx.setTags(product.tags); - return product; - } + const product = await fetchProduct(id) + ctx.setTags(product.tags) + return product + }, }) ``` - ## How it works If you are interested in how Bentocache handles tags internally, read on. @@ -74,6 +73,7 @@ This approach does not scale in a distributed cache with millions of entries, as Instead of directly deleting entries with a given tag, Bentocache uses a more efficient approach. Core idea is pretty simple: + - When a tag is invalidated, Bentocache stores an **invalidation timestamp** in the cache. - When fetching an entry, Bentocache checks whether it was cached before or after its associated tags were invalidated. @@ -83,8 +83,8 @@ Let's take a concrete example. Here we just cached an entry with the tags `tag-1 await bento.getOrSet({ key: 'foo', factory: getFromDb(), - tags: ['tag-1', 'tag-2'] -}); + tags: ['tag-1', 'tag-2'], +}) ``` Internally, Bentocache stores something like: @@ -98,7 +98,7 @@ Note that we also store the creation date of the entry as `createdAt`. Now, we invalidate the `tag-1` tag: ```ts -await bento.deleteByTag({ tags: ['tag-1'] }); +await bento.deleteByTag({ tags: ['tag-1'] }) ``` Instead of scanning and deleting every entry associated with `tag-1`, Bentocache simply stores the invalidation timestamp under a special cache key: diff --git a/docs/content/docs/telemetry.md b/docs/content/docs/telemetry.md index f16d036..64327da 100644 --- a/docs/content/docs/telemetry.md +++ b/docs/content/docs/telemetry.md @@ -10,7 +10,6 @@ This package is experimental and may change in future versions. Bentocache provides an official OpenTelemetry instrumentation package: `@bentocache/otel`. - It listens to Bentocache tracing channels and emits spans like: - `cache.get` @@ -72,6 +71,7 @@ tracingChannels.cacheOperation.subscribe({ ## Install :::codegroup + ```sh // title: npm npm i @bentocache/otel @@ -86,6 +86,7 @@ pnpm add @bentocache/otel // title: yarn yarn add @bentocache/otel ``` + ::: ## Basic setup diff --git a/docs/content/docs/timeouts.md b/docs/content/docs/timeouts.md index 26abc97..51fb490 100644 --- a/docs/content/docs/timeouts.md +++ b/docs/content/docs/timeouts.md @@ -19,7 +19,7 @@ const result = await bento.getOrSet({ ttl: '10m', grace: '6h', timeout: '200ms', -}); +}) ``` Here, suppose we have an expired entry still under grace period in the cache. A new request comes in, so the factory `() => Product.all()` will be called. @@ -43,7 +43,7 @@ const result = await bento.getOrSet({ factory: () => Product.all(), ttl: '10m', hardTimeout: '1s', -}); +}) ``` Here, if the factory takes more than 1s to execute, then an exception will be thrown. You can handle it like this: @@ -57,7 +57,7 @@ try { factory: () => Product.all(), ttl: '10m', hardTimeout: '1s', - }); + }) } catch (e) { if (e instanceof errors.E_FACTORY_HARD_TIMEOUT) { // handle timeout error @@ -75,5 +75,5 @@ const result = await bento.getOrSet({ grace: '6h', timeout: '200ms', hardTimeout: '1s', -}); +}) ``` diff --git a/docs/content/docs/walkthrough.md b/docs/content/docs/walkthrough.md index ee2a596..ee9aec8 100644 --- a/docs/content/docs/walkthrough.md +++ b/docs/content/docs/walkthrough.md @@ -4,9 +4,9 @@ summary: Quick walkthrough of how Bentocache can help you improve your applicati # Walkthrough guide -Let's try to take a real-word scenario to see how can Bentocache can help us. +Let's try to take a real-word scenario to see how can Bentocache can help us. -We have a simple JSON API that is serving some products. Our JSON API is backed with PM2 and run in cluster modes with 3 instances equally served with round-robin distribution. +We have a simple JSON API that is serving some products. Our JSON API is backed with PM2 and run in cluster modes with 3 instances equally served with round-robin distribution. Let's also imagine the given numbers in a 10-minute window : @@ -14,6 +14,7 @@ Let's also imagine the given numbers in a 10-minute window : - The database is down for the last 3 minutes of the 10-minute window. That means, without any caching, here's how the numbers would look like : + - **Every 10 Seconds**: 1,000 x 100 x 3 = 300,000 database calls. - **Every Minute**: 300,000 x 6 = 1,800,000 database calls. - **Every 10 Minutes**: 1,800,000 x 10 = 18,000,000 database calls. @@ -40,9 +41,9 @@ Now, let's make the first easy step by adding a simple memory-cache to our app : // title: Memory Cache const bento = new BentoCache({ default: 'cache', - stores: { - cache: bentostore().useL1Layer(memoryDriver()) - } + stores: { + cache: bentostore().useL1Layer(memoryDriver()), + }, }) router.get('/products/:id', async (req, res) => { @@ -55,7 +56,7 @@ router.get('/products/:id', async (req, res) => { res.json(product) }) - ``` +``` By caching the product for 1 minute, we significantly reduce the database load by making only one request per minute, per product and per instance. However, we still need to consider the 3 minutes of downtime when we can't cache anything, so we'll keep hitting the database. @@ -67,16 +68,18 @@ By caching the product for 1 minute, we significantly reduce the database load b
**Normal Operations (7 minutes):** + - Database calls per minute: 300,000
-1,000 products * 100 concurrent requests * 3 instances + 1,000 products _ 100 concurrent requests _ 3 instances - Total database calls: 3,000,000
-300,000 calls * 7 minutes + 300,000 calls \* 7 minutes **During Downtime (3 minutes):** + - Database Calls per Minute: 1,800,000
-1,000 products * 100 concurrent requests * 3 instances * 6 ( every 10 seconds ) + 1,000 products _ 100 concurrent requests _ 3 instances \* 6 ( every 10 seconds ) - Total database calls: 5,400,000
-1,800,000 calls * 3 minutes + 1,800,000 calls \* 3 minutes @@ -86,7 +89,7 @@ If we look at the above calculation, it is not exactly true. We made the assumpt We said we were receiving 100 concurrent requests for each 1.000 products. That means, at the start of each minute, when entries are expired, we will have 100 concurrent requests trying to fetch the same product from the database. This is called a cache stampede. And guess what, Bentocache has a built-in mechanism to prevent this. And this is totally transparent for you. -So if we take this into account, results would be : +So if we take this into account, results would be : **Database calls in 10m before: 8,400,000**
**Database calls in 10m: 5,430,000** @@ -98,22 +101,24 @@ Note that in downtime case, we are not benefiting from the cache stampede protec
**Normal Operations (7 minutes):** + - Database calls per minute: 3,000
-1,000 products * 3 instances + 1,000 products \* 3 instances - Total database calls: 30,000
-3,000 calls * 7 minutes + 3,000 calls \* 7 minutes **During Downtime (3 minutes):** + - Database Calls per Minute: 1,800,000
-1,000 products * 100 concurrent requests * 3 instances * 6 ( every 10 seconds ) + 1,000 products _ 100 concurrent requests _ 3 instances \* 6 ( every 10 seconds ) - Total database calls: 5,400,000
-1,800,000 calls * 3 minutes + 1,800,000 calls \* 3 minutes ## Adding grace periods -We have this nasty problem where the database is down during 3 minutes. During this period, since we're using a 1-minute TTL, we can't cache anything, causing an overload of database calls and probably forcing us to show an error page to users. But there's a way to enhance our system's resilience: grace periods. +We have this nasty problem where the database is down during 3 minutes. During this period, since we're using a 1-minute TTL, we can't cache anything, causing an overload of database calls and probably forcing us to show an error page to users. But there's a way to enhance our system's resilience: grace periods. Grace periods extend the time that cached data can be served even after their expiration. They improve not only system robustness during downtimes but also the user experience under heavy load. @@ -126,8 +131,8 @@ const bento = new BentoCache({ graceBackoff: '30s', // highlight-end stores: { - cache: bentostore().useL1Layer(memoryDriver()) - } + cache: bentostore().useL1Layer(memoryDriver()), + }, }) router.get('/products/:id', async (req, res) => { @@ -150,7 +155,7 @@ A particular aspect to highlight is the `graceBackoff` parameter, set here to 30 By avoiding repeated calls to the database when the factory fails, it prevents what could be **likened to a self-inflicted DDoS attack**. It not only maintains service but does so in a way that doesn't further strain the system. -In summary, that means, during this downtime of 3 minutes, we now only have 2 calls per minute to our database. This gives : +In summary, that means, during this downtime of 3 minutes, we now only have 2 calls per minute to our database. This gives : **Database calls in 10m before: 5,430,000**
**Database calls in 10m: 39,000** @@ -162,19 +167,20 @@ This is a huge improvement. Sure, we are serving some stale data, but depending
**Normal Operations (7 minutes):** + - Database calls per minute: 3,000
-1,000 products * 3 instances + 1,000 products \* 3 instances - Total database calls: 30,000
-3,000 calls * 7 minutes + 3,000 calls \* 7 minutes **During Downtime (3 minutes with Grace Period):** + - Database Calls per Minute: 1,000
-1,000 products * 3 minutes * 1 factory calls * 3 nodes + 1,000 products _ 3 minutes _ 1 factory calls \* 3 nodes - Total database calls: 9,000
-1,000 calls * 3 minutes * 3 nodes +1,000 calls _ 3 minutes _ 3 nodes - ## Adding a distributed cache behind our memory-cache Up until now, we've been working with a memory cache that has its own limitations, particularly when dealing with multiple instances. Each instance maintains its state, leading to potential redundancy and inefficiency in data retrieval. To illustrate this, consider the following scenario: @@ -189,12 +195,12 @@ See the problem ? Let's introduce our Multi-Tier cache setup : const connection = process.env.REDIS_CREDENTIALS! const bento = new BentoCache({ default: 'cache', - grace: '6h', - stores: { + grace: '6h', + stores: { cache: bentostore() .useL1Layer(memoryDriver()) .useL2Layer(redisDriver({ connection })) - .useBus(redisBusDriver({ connection })) + .useBus(redisBusDriver({ connection })), }, }) @@ -215,14 +221,14 @@ Nice. We now have a robust two-level cache system. It also introduces a new conc Returning to our original problem of different instances redundantly fetching the same data from the database, let's estimate that this occurs 35% of the time. By using a multi-tier cache and bus, we can reduce database calls by this percentage. - We previously calculated 39,000 requests in 10 minutes. -- With the new setup, we have reduced this to 25,350 requests in 10 minutes (39,000 * 0.65). +- With the new setup, we have reduced this to 25,350 requests in 10 minutes (39,000 \* 0.65). **Database calls in 10m before: 39,000**
**Database calls in 10m: 25,350** ## Adding soft timeouts -We have likely achieved a more rational amount of database calls at this stage. +We have likely achieved a more rational amount of database calls at this stage. However, it sometimes happens that the database's response time is prolonged, sometimes taking up to 2 seconds. This delay becomes an issue when a key has just expired and must be refreshed, leaving the end-user waiting for the database's response before accessing the data. This is the scenario where soft timeouts become essential. @@ -231,15 +237,15 @@ However, it sometimes happens that the database's response time is prolonged, so const connection = process.env.REDIS_CREDENTIALS! const bento = new BentoCache({ default: 'cache', - grace: '6h', + grace: '6h', // highlight-start timeout: '500ms', // highlight-end - stores: { + stores: { cache: bentostore() .useL1Layer(memoryDriver()) .useL2Layer(redisDriver({ connection })) - .useBus(redisBusDriver({ connection })) + .useBus(redisBusDriver({ connection })), }, }) @@ -255,12 +261,12 @@ router.get('/products/:id', async (req, res) => { }) ``` -Soft timeouts operate alongside grace periods. In this example, a soft timeout of 200ms has been configured. If the factory (ie the database call) takes more than 200ms to execute, and grace period data is still available, that data will be returned. +Soft timeouts operate alongside grace periods. In this example, a soft timeout of 200ms has been configured. If the factory (ie the database call) takes more than 200ms to execute, and grace period data is still available, that data will be returned. During this time, the factory will continue to run in the background. And the next time the key is requested, it will be fresh and immediately returned. ## Conclusion -There are some other features to discover in Bentocache that can help you improve your user experience, resilience, and response time. But I believe this is a good introduction. +There are some other features to discover in Bentocache that can help you improve your user experience, resilience, and response time. But I believe this is a good introduction. By using different features of Bentocache, we were able to reduce the number of database calls **from 18,000,000 to 25,350**. We even managed to reduce the **response time to a maximum of 500ms instead of 2s** sometimes. These are all fictional numbers and a highly theoretical scenario, but I hope you get the idea of how Bentocache can help you. diff --git a/docs/public/site.webmanifest b/docs/public/site.webmanifest index b20abb7..fa99de7 100644 --- a/docs/public/site.webmanifest +++ b/docs/public/site.webmanifest @@ -1,19 +1,19 @@ { - "name": "", - "short_name": "", - "icons": [ - { - "src": "/android-chrome-192x192.png", - "sizes": "192x192", - "type": "image/png" - }, - { - "src": "/android-chrome-512x512.png", - "sizes": "512x512", - "type": "image/png" - } - ], - "theme_color": "#ffffff", - "background_color": "#ffffff", - "display": "standalone" + "name": "", + "short_name": "", + "icons": [ + { + "src": "/android-chrome-192x192.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "/android-chrome-512x512.png", + "sizes": "512x512", + "type": "image/png" + } + ], + "theme_color": "#ffffff", + "background_color": "#ffffff", + "display": "standalone" } diff --git a/package.json b/package.json index a6bcf99..8efda74 100644 --- a/package.json +++ b/package.json @@ -13,6 +13,7 @@ "typecheck": "pnpm run -r --parallel typecheck", "build": "pnpm run -r build", "lint": "eslint .", + "format": "prettier --check .", "checks": "pnpm lint && pnpm typecheck", "test": "pnpm run -r --parallel test" }, diff --git a/packages/bentocache/CHANGELOG.md b/packages/bentocache/CHANGELOG.md index de3fca5..b9881e7 100644 --- a/packages/bentocache/CHANGELOG.md +++ b/packages/bentocache/CHANGELOG.md @@ -138,6 +138,7 @@ ``` - 4d1feb5: Added a super simple circuit breaker system to the L2 Cache : + - a `l2CircuitBreakerDuration` parameter to set the duration of the circuit breaker. How many seconds the circuit breaker will stay open. - If defined, the circuit breaker will open when a call to our distributed cache fails. It will stay open for `l2CircuitBreakerDuration` seconds. @@ -246,6 +247,7 @@ - 2578357: Added a `serialize: false` to the memory driver. It means that, the data stored in the memory cache will not be serialized/parsed using `JSON.stringify` and `JSON.parse`. This allows for a much faster throughput but at the expense of: + - not being able to limit the size of the stored data, because we can't really know the size of an unserialized object - Having inconsistent return between the L1 and L2 cache. The data stored in the L2 Cache will always be serialized because it passes over the network. Therefore, depending on whether the data is retrieved from the L1 and L2, we can have data that does not have the same form. For example, a Date instance will become a string if retrieved from the L2, but will remain a Date instance if retrieved from the L1. So, you should put extra care when using this feature with an additional L2 cache. diff --git a/packages/bentocache/README.md b/packages/bentocache/README.md index 7e3da3c..1b8a1c4 100644 --- a/packages/bentocache/README.md +++ b/packages/bentocache/README.md @@ -43,6 +43,7 @@ The one-level mode is a standard caching method. Choose from a variety of driver In addition to this, you benefit from many features that allow you to efficiently manage your cache, such as **cache stampede protection**, **grace periods**, **timeouts**, **namespaces**, etc. ### Two-levels + For those looking to go further, you can use the two-levels caching system. Here's basically how it works: - **L1: Local Cache**: First level cache. Data is stored in memory with an LRU algorithm for quick access @@ -59,7 +60,6 @@ The major benefit of multi-tier caching, is that it allows for responses between In fact, it's a quite common pattern : to quote an example, it's [what Stackoverflow does](https://nickcraver.com/blog/2019/08/06/stack-overflow-how-we-do-app-caching/#layers-of-cache-at-stack-overflow). - To give some perspective, here's a simple benchmark that shows the difference between a simple distributed cache ( using Redis ) vs a multi-tier cache ( using Redis + In-memory cache ) : ![Redis vs Multi-tier caching](./assets/redis_vs_mtier.png) @@ -98,10 +98,10 @@ Allows associating a cache entry with one or more tags to simplify invalidation. await bento.getOrSet({ key: 'foo', factory: getFromDb(), - tags: ['tag-1', 'tag-2'] -}); + tags: ['tag-1', 'tag-2'], +}) -await bento.deleteByTag({ tags: ['tag-1'] }); +await bento.deleteByTag({ tags: ['tag-1'] }) ``` ### Namespaces @@ -150,7 +150,7 @@ You can pass a logger to Bentocache, and it will log everything that happens. Ca import { pino } from 'pino' const bento = new BentoCache({ - logger: pino() + logger: pino(), }) ```