diff --git a/.changeset/soft-boats-protect.md b/.changeset/soft-boats-protect.md
new file mode 100644
index 0000000000..ca18c04d54
--- /dev/null
+++ b/.changeset/soft-boats-protect.md
@@ -0,0 +1,6 @@
+---
+"@latticexyz/protocol-parser": patch
+"@latticexyz/store-sync": patch
+---
+
+Added `store-sync` helper libraries to interact with the indexer's experimental SQL API endpoint. Documentation is available at [https://mud.dev/indexer/sql](https://mud.dev/indexer/sql).
diff --git a/docs/components/common-text/FilterTypes.mdx b/docs/components/common-text/FilterTypes.mdx
new file mode 100644
index 0000000000..9237ef9b3e
--- /dev/null
+++ b/docs/components/common-text/FilterTypes.mdx
@@ -0,0 +1,13 @@
+import { Callout } from "nextra/components";
+
+
+MUD initial data hydration, and therefore filtering, comes in two flavors: [SQL](/indexer/sql) and [generic](/guides/hello-world/filter-sync).
+Note that this is for the initial hydration, currently limits on on-going synchronization are limited to [the generic method](/guides/hello-world/filter-sync).
+
+| | SQL | Generic |
+| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Filtering | Can filter on most SQL functions | Can only filter on tables and the first two key fields (limited by [`eth_getLogs`](https://ethereum.github.io/execution-apis/api-documentation/) filters) |
+| Availability | [Redstone](https://redstone.xyz/docs/network-info), [Garnet](https://garnetchain.com/docs/network-info), or elsewhere if you run your own instance | Any EVM chain |
+| Security assumptions | The indexer instance returns accurate information | The endpoint returns accurate information (same assumption as any other blockchain app) |
+
+
diff --git a/docs/pages/guides/hello-world/filter-sync.mdx b/docs/pages/guides/hello-world/filter-sync.mdx
index 11cde7387f..e9078f93c9 100644
--- a/docs/pages/guides/hello-world/filter-sync.mdx
+++ b/docs/pages/guides/hello-world/filter-sync.mdx
@@ -1,10 +1,25 @@
import { CollapseCode } from "../../../components/CollapseCode";
+import FilterTypes from "../../../components/common-text/FilterTypes.mdx";
# Filter data synchronization
In this tutorial you modify `networkSetup.ts` to filter the information you synchronize.
Filtering information this way allows you to reduce the use of network resources and makes loading times faster.
+
+
+
+
+Why are only the first two key fields available for filtering?
+
+Ethereum log entries can have [up to four indexed fields](https://www.evm.codes/?fork=cancun#a4).
+However, Solidity only supports [three indexed fields](https://www.alchemy.com/overviews/solidity-events) because the first indexed field is used for the event name and type.
+In MUD, [this field](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol) specifies whether [a new record is created](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol#L26-L32), a record is changed (either [static fields](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol#L43) or [dynamic fields](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol#L56-L64)), or [a record is deleted](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol#L71).
+The second indexed fields is always the table's [resource ID](/world/resource-ids).
+This leaves two fields for key fields.
+
+
+
## Setup
To see the effects of filtering we need a table with entries to filter. To get such a table:
diff --git a/docs/pages/indexer/_meta.js b/docs/pages/indexer/_meta.js
index 9e27e90228..dfb6f364a6 100644
--- a/docs/pages/indexer/_meta.js
+++ b/docs/pages/indexer/_meta.js
@@ -2,5 +2,6 @@ export default {
"using": "Using the Indexer",
"sqlite": "SQLite Indexer",
"postgres-event-only": "PostgreSQL for events",
- "postgres-decoded": "PostgreSQL for data (and events)",
+ "postgres-decoded": "PostgreSQL for data (and events)",
+ sql: "SQL API (Experimental)",
};
diff --git a/docs/pages/indexer/sql.mdx b/docs/pages/indexer/sql.mdx
new file mode 100644
index 0000000000..345ca9491f
--- /dev/null
+++ b/docs/pages/indexer/sql.mdx
@@ -0,0 +1,905 @@
+import { CollapseCode } from "../../components/CollapseCode";
+import FilterTypes from "../../components/common-text/FilterTypes.mdx";
+import { Callout } from "nextra/components";
+
+# SQL API
+
+
+
+If there is a SQL-enabled indexer instance serving a blockchain, as there is for [Redstone](https://redstone.xyz/) and [Garnet](https://garnetchain.com/docs/what-is-redstone), you can use it to:
+
+- Run queries on the data of any `World` on that blockchain.
+- [Speed up the initial hydration](#mud-state-hydration-via-sql-api) by reducing the amount of data that needs to be synchronized.
+ This is important for the user experience, because until the initial hydration is done the client is typically unusable.
+
+The query language is a subset of [the SQL `SELECT` command]().
+
+## SQL-enabled indexer URLs
+
+- [Redstone](https://redstone.xyz/) - `https://indexer.mud.redstonechain.com/q`
+- [Garnet](https://garnetchain.com/) - `https://indexer.mud.garnetchain.com/q`
+
+## Example `World`
+
+On Garnet there is a `World` at address [`0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e`](https://explorer.garnetchain.com/address/0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e) that runs a slightly modified version of [the React template](https://github.com/latticexyz/mud/tree/main/templates/react).
+You can see the data schema for the `World` [in the block explorer](https://explorer.garnetchain.com/address/0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e?tab=mud).
+
+## Curl queries
+
+You can run SQL queries by communicating directly with the server's API, for example using [curl](https://curl.se/).
+
+### Simple query
+
+This query looks for some fields from a single table.
+
+1. Create a file, `query.json`, with this content.
+
+ ```json filename="query.json" copy
+ [
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": "SELECT id, description FROM app__Tasks"
+ }
+ ]
+ ```
+
+
+
+ The API does not support `SELECT * FROM `, you have to specify column names.
+
+
+
+1. Run this command. Install `curl` and `jq` first if necessary.
+
+ ```sh copy
+ curl https://indexer.mud.garnetchain.com/q --compressed \
+ -H 'Accept-Encoding: gzip' \
+ -H 'Content-Type: application/json' \
+ -d @query.json | jq
+ ```
+
+The output is a mapping with two fields, the block height for which the result is valid, and the result itself.
+The result is a list of query responses, here it contains just one item because we only submitted a single query.
+The query response is also a list.
+The first entry is the field names, and all the other entries are rows returned by `SELECT`.
+
+```
+{
+ "block_height": 5699682,
+ "result": [
+ [
+ [
+ "id",
+ "description"
+ ],
+ [
+ "0x3100000000000000000000000000000000000000000000000000000000000000",
+ "Walk the dog"
+ ],
+ [
+ "0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf",
+ "Test"
+ ],
+ ]
+ ]
+}
+```
+
+Here we only care about the first result, so from now on we use this command line to tell `jq` to only show us that information.
+
+```sh copy
+curl https://indexer.mud.garnetchain.com/q --compressed \
+ -H 'Accept-Encoding: gzip' \
+ -H 'Content-Type: application/json' \
+ -d @query.json | jq '.result[0]'
+```
+
+### Conditions
+
+If we want to see only those tasks that haven't been completed we can use a `WHERE` clause.
+
+```json filename="query.json" copy
+[
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": "SELECT id, description FROM app__Tasks WHERE completedAt=0"
+ }
+]
+```
+
+
+
+Results
+
+```json
+[
+ ["id", "description"],
+ ["0x3100000000000000000000000000000000000000000000000000000000000000", "Walk the dog"],
+ ["0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf", "Test"]
+]
+```
+
+
+
+### Limited results
+
+If you only want to see a few results, you can use a `LIMIT` clause.
+
+```json filename="query.json" copy
+[
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": "SELECT id, description FROM app__Tasks LIMIT 2"
+ }
+]
+```
+
+
+
+Results
+
+```json
+[
+ ["id", "description"],
+ ["0x3100000000000000000000000000000000000000000000000000000000000000", "Walk the dog"],
+ ["0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf", "Test"]
+]
+```
+
+
+
+You can use `OFFSET` to get a paging effect.
+For example, if you use this `query.json` you get two results, and the last row of the first one is repeated as the first row of the second one.
+
+```json filename="query.json" copy
+[
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": "SELECT id, description FROM app__Tasks LIMIT 3"
+ },
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": "SELECT id, description FROM app__Tasks LIMIT 3 OFFSET 2"
+ }
+]
+```
+
+
+
+Results
+
+Use this command to see the results of both queries.
+
+```sh copy
+curl https://indexer.mud.garnetchain.com/q --compressed \
+ -H 'Accept-Encoding: gzip' \
+ -H 'Content-Type: application/json' -d @query.json \
+ | jq '.result'
+```
+
+The result is:
+
+```json
+[
+ [
+ ["id", "description"],
+ ["0x3100000000000000000000000000000000000000000000000000000000000000", "Walk the dog"],
+ ["0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf", "Test"],
+ ["0xb15fd0e41ab0bb6eb992e0a3d4f30fce6ee24a5fc9c30f725fdfc96d9d16ed95", "Do the dishes"]
+ ],
+ [
+ ["id", "description"],
+ ["0xb15fd0e41ab0bb6eb992e0a3d4f30fce6ee24a5fc9c30f725fdfc96d9d16ed95", "Do the dishes"],
+ ["0xb81d5036d0b62e0f2536635cbd5d7cec1d1f0706c0c6c1a9fa74293d7b0888eb", "Take out the trash"]
+ ]
+]
+```
+
+
+
+### Sorted results
+
+If you want to control the order in which you get results, you can use an `ORDER BY` clause.
+
+```json filename="query.json" copy
+[
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": "SELECT description, createdAt FROM app__Tasks ORDER BY createdAt"
+ }
+]
+```
+
+Note that the sort field(s) need to be part of the selected columns.
+
+
+
+Results
+
+```json
+[
+ ["description", "createdat"],
+ ["Walk the dog", "1723495628"],
+ ["Take out the trash", "1723495640"],
+ ["Do the dishes", "1723495642"],
+ ["Test", "1723495964"],
+ ["Test from a different account", "1723576522"],
+ ["Another test", "1723576522"],
+ ["Yet another test", "1723646440"]
+]
+```
+
+
+
+### Multiple tables
+
+You can join multiple tables, using the same syntax SQL uses.
+
+```json filename="query.json" copy
+[
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": "SELECT app__Creator.id, description, taskCreator FROM app__Tasks, app__Creator WHERE app__Creator.id=app__Tasks.id"
+ }
+]
+```
+
+
+
+Results
+
+```json
+[
+ ["id", "description", "taskcreator"],
+ [
+ "0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf",
+ "Test",
+ "0x735b2f2c662ebedffa94027a7196f0559f7f18a4"
+ ],
+ [
+ "0x727d7bfe00b6db638c69595059dc10e21c52a7912d090905a7c7dc8659efd3b8",
+ "Test from a different account",
+ "0x428b1853e5ec29d35c84a218ec5170efc7621b58"
+ ],
+ [
+ "0xb15fd0e41ab0bb6eb992e0a3d4f30fce6ee24a5fc9c30f725fdfc96d9d16ed95",
+ "Do the dishes",
+ "0x8225d72f2c39f3729d7f3fc03c6aa8731eaeef48"
+ ],
+ [
+ "0xb81d5036d0b62e0f2536635cbd5d7cec1d1f0706c0c6c1a9fa74293d7b0888eb",
+ "Take out the trash",
+ "0x8225d72f2c39f3729d7f3fc03c6aa8731eaeef48"
+ ],
+ [
+ "0xd43394ecf79077f65cd83b534dd44d3b4e9e2aa553e95aafecd14b8529543cda",
+ "Another test",
+ "0x428b1853e5ec29d35c84a218ec5170efc7621b58"
+ ]
+]
+```
+
+
+
+### Grouping results
+
+You can use `GROUP BY` to identify different groups.
+For example, this query gets you the different task creators.
+
+```json filename="query.json" copy
+[
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": "SELECT taskCreator FROM app__Creator GROUP BY taskCreator"
+ }
+]
+```
+
+
+
+Results
+
+```json
+[
+ ["taskcreator"],
+ ["0x428b1853e5ec29d35c84a218ec5170efc7621b58"],
+ ["0x735b2f2c662ebedffa94027a7196f0559f7f18a4"],
+ ["0x8225d72f2c39f3729d7f3fc03c6aa8731eaeef48"]
+]
+```
+
+
+
+### Metadata
+
+You can use the `/tables` path to get the list of either all tables, or all tables that match a string.
+As per the SQL standard, the wildcard is `%`.
+
+1. Create a file, `tables.json`.
+
+ ```json filename="tables.json"
+ {
+ "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ "query": {
+ "name": "%"
+ }
+ }
+ ```
+
+1. Run this command.
+
+ ```sh copy
+ curl https://indexer.mud.garnetchain.com/tables --compressed \
+ -H 'Accept-Encoding: gzip' \
+ -H 'Content-Type: application/json' \
+ -d @tables.json | jq
+ ```
+
+
+
+Results
+
+```json
+[
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x746273746f72650000000000000000005461626c657300000000000000000000",
+ "key_names": ["tableId"],
+ "val_names": ["fieldLayout", "keySchema", "valueSchema", "abiEncodedKeyNames", "abiEncodedFieldNames"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x006003025f5f5fc4c40000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x746273746f72650000000000000000005265736f757263654964730000000000",
+ "key_names": ["resourceId"],
+ "val_names": ["exists"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x0001010060000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x746273746f726500000000000000000053746f7265486f6f6b73000000000000",
+ "key_names": ["tableId"],
+ "val_names": ["hooks"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x00000001b6000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c640000000000000000004e616d6573706163654f776e65720000",
+ "key_names": ["namespaceId"],
+ "val_names": ["owner"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x0014010061000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c6400000000000000000042616c616e6365730000000000000000",
+ "key_names": ["namespaceId"],
+ "val_names": ["balance"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x002001001f000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c64000000000000000000496e7374616c6c65644d6f64756c6573",
+ "key_names": ["moduleAddress", "argumentsHash"],
+ "val_names": ["isInstalled"],
+ "key_schema": "0x00340200615f0000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x0001010060000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c640000000000000000005573657244656c65676174696f6e436f",
+ "key_names": ["delegator", "delegatee"],
+ "val_names": ["delegationControlId"],
+ "key_schema": "0x0028020061610000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c640000000000000000004e616d65737061636544656c65676174",
+ "key_names": ["namespaceId"],
+ "val_names": ["delegationControlId"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c640000000000000000005265736f757263654163636573730000",
+ "key_names": ["resourceId", "caller"],
+ "val_names": ["access"],
+ "key_schema": "0x003402005f610000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x0001010060000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c6400000000000000000053797374656d73000000000000000000",
+ "key_names": ["systemId"],
+ "val_names": ["system", "publicAccess"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x0015020061600000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c6400000000000000000046756e6374696f6e53656c6563746f72",
+ "key_names": ["worldFunctionSelector"],
+ "val_names": ["systemId", "systemFunctionSelector"],
+ "key_schema": "0x0004010043000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x002402005f430000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x6f74776f726c6400000000000000000046756e6374696f6e5369676e61747572",
+ "key_names": ["functionSelector"],
+ "val_names": ["functionSignature"],
+ "key_schema": "0x0004010043000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x00000001c5000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c6400000000000000000053797374656d486f6f6b730000000000",
+ "key_names": ["systemId"],
+ "val_names": ["value"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x00000001b6000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c6400000000000000000053797374656d52656769737472790000",
+ "key_names": ["system"],
+ "val_names": ["systemId"],
+ "key_schema": "0x0014010061000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462776f726c64000000000000000000496e69744d6f64756c65416464726573",
+ "key_names": [],
+ "val_names": ["value"],
+ "key_schema": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x0014010061000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x7462617070000000000000000000000043726561746f72000000000000000000",
+ "key_names": ["id"],
+ "val_names": ["taskCreator"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x0014010061000000000000000000000000000000000000000000000000000000",
+ "query_name": null
+ },
+ {
+ "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e",
+ "table_id": "0x746261707000000000000000000000005461736b730000000000000000000000",
+ "key_names": ["id"],
+ "val_names": ["createdAt", "completedAt", "description"],
+ "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000",
+ "val_schema": "0x004002011f1fc500000000000000000000000000000000000000000000000000",
+ "query_name": null
+ }
+]
+```
+
+
+
+To interpret the results, [see the table documentation](/store/tables#advanced).
+
+## Typescript queries
+
+You can query the SQL API from [Typescript](https://www.typescriptlang.org/) without using MUD client synchronization.
+
+1. Create the project (in an empty directory) and install the software.
+
+ ```sh copy
+ pnpm create ts-node
+ pnpm install
+ ```
+
+1. Add the package that includes the library.
+
+ ```sh copy
+ pnpm install @latticexyz/store-sync @latticexyz/store
+ ```
+
+1. Replace `src/main.ts` with this file.
+
+ ```ts filename="main.ts"
+ import { fetchRecordsSql, selectFrom } from "@latticexyz/store-sync/internal";
+ import { defineStore } from "@latticexyz/store";
+
+ const config = defineStore({
+ namespace: "app",
+ tables: {
+ Tasks: {
+ schema: {
+ id: "bytes32",
+ createdAt: "uint256",
+ completedAt: "uint256",
+ description: "string",
+ },
+ key: ["id"],
+ },
+ Creator: {
+ schema: {
+ id: "bytes32",
+ taskCreator: "address",
+ },
+ key: ["id"],
+ },
+ },
+ });
+
+ const queryUncompleted = selectFrom({
+ table: config.tables.app__Tasks,
+ where: "completedAt = 0",
+ limit: 2,
+ });
+
+ const queryResult = await fetchRecordsSql({
+ indexerUrl: "https://indexer.mud.garnetchain.com/q",
+ storeAddress: "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ queries: [queryUncompleted],
+ });
+
+ console.log("\n\nTwo uncompleted tasks");
+ console.log(`SQL: ${queryUncompleted.sql}\nResult:`);
+ console.log(queryResult.result[0].records);
+ ```
+
+1. Compile and execute the application.
+
+ ```sh copy
+ pnpm build && pnpm start
+ ```
+
+
+
+Explanation
+
+```ts
+import { fetchRecordsSql, selectFrom } from "@latticexyz/store-sync/internal";
+import { defineStore } from "@latticexyz/store";
+```
+
+Import the necessary definitions.
+
+```typescript
+const config = defineStore({
+ namespace: "app",
+ tables: {
+ ...
+ },
+})
+```
+
+Create the table configuration.
+The input to `defineStore` is the same as used in the [the `mud.config.ts` file](/config).
+
+```typescript
+const queryUncompleted = selectFrom({
+ table: config.tables.app__Tasks,
+ where: "completedAt = 0",
+ limit: 2,
+});
+```
+
+Create a query using [`selectFrom`](https://github.com/latticexyz/mud/blob/main/packages/store-sync/src/sql/selectFrom.ts).
+The queries supported by `selectFrom` are a subset of those the SQL API supports.
+The results come from a single table, and only `WHERE` and `LIMIT` clauses are supported.
+
+```typescript
+const queryResult = await fetchRecordsSql({
+ indexrUrl: "https://indexer.mud.garnetchain.com/q",
+ storeAddress: "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e",
+ queries: [queryUncompleted],
+});
+```
+
+Run the query.
+
+```typescript
+console.log("\n\nTwo uncompleted tasks");
+console.log(`SQL: ${queryUncompleted.sql}\nResult:`);
+```
+
+The SQL query that generated the resulting records.
+
+```typescript
+console.log(queryResult.result[0].records);
+```
+
+The actual records.
+
+
+
+## MUD state hydration via SQL API
+
+You can also use the SQL API in a MUD client to speed up the initial hydration.
+
+### Create a client to access the `World`
+
+These are the steps to create a client that can access the `World`.
+
+1. Create and run a react template application.
+
+ ```sh copy
+ pnpm create mud@latest tasks --template react
+ cd tasks
+ pnpm dev
+ ```
+
+1. [Browse to the application](http://localhost:3000/?chainId=17069&worldAddress=0x95f5d049b014114e2feeb5d8d994358ce4ffd06e).
+ The URL specifies the `chainId` and `worldAddress` for the `World`.
+
+1. In MUD DevTools see your account address and [fund it on Garnet](https://garnetchain.com/faucet).
+ You may need to get test ETH for your own address, and then transfer it to the account address the application uses.
+
+1. You can now create, complete, and delete tasks.
+
+1. To see the content of the `app__Creator` table, edit `packages/contracts/mud.config.ts` to add the `Creator` table definition.
+
+
+
+ ```typescript filename="mud.config.ts" copy showLineNumbers {15-21}
+ import { defineWorld } from "@latticexyz/world";
+
+ export default defineWorld({
+ namespace: "app",
+ tables: {
+ Tasks: {
+ schema: {
+ id: "bytes32",
+ createdAt: "uint256",
+ completedAt: "uint256",
+ description: "string",
+ },
+ key: ["id"],
+ },
+ Creator: {
+ schema: {
+ id: "bytes32",
+ taskCreator: "address",
+ },
+ key: ["id"],
+ },
+ },
+ });
+ ```
+
+
+
+### Updating the client to use the SQL API
+
+The main purpose of the SQL API is to allow MUD clients to specify the subset of table records that a client needs, instead of synchronizing whole tables.
+
+To update the client, you change `packages/client/src/mud/setupNetwork.ts` to:
+
+
+
+```typescript filename="setupNetwork.ts" copy showLineNumbers {17, 80-97, 106-107}
+/*
+ * The MUD client code is built on top of viem
+ * (https://viem.sh/docs/getting-started.html).
+ * This line imports the functions we need from it.
+ */
+import {
+ createPublicClient,
+ fallback,
+ webSocket,
+ http,
+ createWalletClient,
+ Hex,
+ ClientConfig,
+ getContract,
+} from "viem";
+
+import { SyncFilter, getSnapshot, selectFrom } from "@latticexyz/store-sync/internal";
+
+import { syncToZustand } from "@latticexyz/store-sync/zustand";
+import { getNetworkConfig } from "./getNetworkConfig";
+import IWorldAbi from "contracts/out/IWorld.sol/IWorld.abi.json";
+import { createBurnerAccount, transportObserver, ContractWrite } from "@latticexyz/common";
+import { transactionQueue, writeObserver } from "@latticexyz/common/actions";
+import { Subject, share } from "rxjs";
+
+/*
+ * Import our MUD config, which includes strong types for
+ * our tables and other config options. We use this to generate
+ * things like RECS components and get back strong types for them.
+ *
+ * See https://mud.dev/templates/typescript/contracts#mudconfigts
+ * for the source of this information.
+ */
+import mudConfig from "contracts/mud.config";
+
+export type SetupNetworkResult = Awaited>;
+
+export async function setupNetwork() {
+ const networkConfig = await getNetworkConfig();
+
+ /*
+ * Create a viem public (read only) client
+ * (https://viem.sh/docs/clients/public.html)
+ */
+ const clientOptions = {
+ chain: networkConfig.chain,
+ transport: transportObserver(fallback([webSocket(), http()])),
+ pollingInterval: 1000,
+ } as const satisfies ClientConfig;
+
+ const publicClient = createPublicClient(clientOptions);
+
+ /*
+ * Create an observable for contract writes that we can
+ * pass into MUD dev tools for transaction observability.
+ */
+ const write$ = new Subject();
+
+ /*
+ * Create a temporary wallet and a viem client for it
+ * (see https://viem.sh/docs/clients/wallet.html).
+ */
+ const burnerAccount = createBurnerAccount(networkConfig.privateKey as Hex);
+ const burnerWalletClient = createWalletClient({
+ ...clientOptions,
+ account: burnerAccount,
+ })
+ .extend(transactionQueue())
+ .extend(writeObserver({ onWrite: (write) => write$.next(write) }));
+
+ /*
+ * Create an object for communicating with the deployed World.
+ */
+ const worldContract = getContract({
+ address: networkConfig.worldAddress as Hex,
+ abi: IWorldAbi,
+ client: { public: publicClient, wallet: burnerWalletClient },
+ });
+
+ const indexerUrl = "https://indexer.mud.garnetchain.com/q";
+ const yesterday = Date.now() / 1000 - 24 * 60 * 60;
+ const filters: SyncFilter[] = [
+ selectFrom({
+ table: mudConfig.tables.app__Tasks,
+ where: `"createdAt" > ${yesterday}`,
+ }),
+ { table: mudConfig.tables.app__Creator },
+ ];
+ const { initialBlockLogs } = await getSnapshot({
+ indexerUrl,
+ storeAddress: networkConfig.worldAddress as Hex,
+ filters,
+ chainId: networkConfig.chainId,
+ });
+ const liveSyncFilters = filters.map((filter) => ({
+ tableId: filter.table.tableId,
+ }));
+
+ /*
+ * Sync on-chain state into RECS and keeps our client in sync.
+ * Uses the MUD indexer if available, otherwise falls back
+ * to the viem publicClient to make RPC calls to fetch MUD
+ * events from the chain.
+ */
+ const { tables, useStore, latestBlock$, storedBlockLogs$, waitForTransaction } = await syncToZustand({
+ initialBlockLogs,
+ filters: liveSyncFilters,
+ config: mudConfig,
+ address: networkConfig.worldAddress as Hex,
+ publicClient,
+ startBlock: BigInt(networkConfig.initialBlockNumber),
+ });
+
+ return {
+ tables,
+ useStore,
+ publicClient,
+ walletClient: burnerWalletClient,
+ latestBlock$,
+ storedBlockLogs$,
+ waitForTransaction,
+ worldContract,
+ write$: write$.asObservable().pipe(share()),
+ };
+}
+```
+
+
+
+
+
+Explanation
+
+```typescript
+import { SyncFilter, getSnapshot, selectFrom } from "@latticexyz/store-sync/internal";
+```
+
+Import the definitions we need.
+
+```typescript
+const indexerUrl = "https://indexer.mud.garnetchain.com/q";
+```
+
+The URL for the SQL-enabled indexer.
+This is simplified testing code, on a production system this will probably be a lookup table based on the chainId.
+
+```typescript
+const yesterday = Date.now() / 1000 - 24 * 60 * 60;
+```
+
+In JavaScript (and therefore TypeScript), time is stored as milliseconds since [the beginning of the epoch](https://en.wikipedia.org/wiki/Unix_time).
+In UNIX, and therefore in Ethereum, time is stored as seconds since that same point.
+This is the timestamp 24 hours ago.
+
+```typescript
+ const filters: SyncFilter[] = [
+```
+
+We create the filters for the tables we're interested in.
+
+```typescript
+ selectFrom({
+ table: mudConfig.tables.app__Tasks,
+ where: `"createdAt" > ${yesterday}`,
+ }),
+```
+
+From the `app__Tasks` table we only want entries created in the last 24 hours.
+To verify that the filter works as expected you can later change the code to only look for entries older than 24 hours.
+
+```typescript
+ { table: mudConfig.tables.app__Creator },
+ ];
+```
+
+We also want the `app__Counter` table.
+
+```typescript
+const { initialBlockLogs } = await getSnapshot({
+ indexerUrl,
+ storeAddress: networkConfig.worldAddress as Hex,
+ filters,
+ chainId: networkConfig.chainId,
+});
+```
+
+Get the initial snapshot to hydrate (fill with initial information) the data store.
+Note that this snapshot does not have the actual data, but the events that created it.
+
+```typescript
+const liveSyncFilters = filters.map((filter) => ({
+ tableId: filter.table.tableId,
+}));
+```
+
+The synchronization filters are a lot more limited.
+[You can read the description of these filters here](/guides/hello-world/filter-sync#filtering).
+
+```typescript
+ const { ... } = await syncToZustand({
+ initialBlockLogs,
+ filters: liveSyncFilters,
+ ...
+ });
+```
+
+Finally, we provide `initialBlockLogs` for the hydration and `filters` for the updates to the synchronization function (either `syncToRecs` or `syncToZustand`).
+
+
diff --git a/packages/protocol-parser/src/exports/internal.ts b/packages/protocol-parser/src/exports/internal.ts
index 88fe58411d..2f8eb4074d 100644
--- a/packages/protocol-parser/src/exports/internal.ts
+++ b/packages/protocol-parser/src/exports/internal.ts
@@ -25,7 +25,8 @@ export * from "../schemaToHex";
export * from "../staticDataLength";
export * from "../valueSchemaToFieldLayoutHex";
export * from "../valueSchemaToHex";
-
+export * from "../getKey";
+export * from "../getValue";
export * from "../getFieldIndex";
export * from "../getKeySchema";
export * from "../getKeyTuple";
diff --git a/packages/protocol-parser/src/getKey.test.ts b/packages/protocol-parser/src/getKey.test.ts
new file mode 100644
index 0000000000..46e50255d2
--- /dev/null
+++ b/packages/protocol-parser/src/getKey.test.ts
@@ -0,0 +1,25 @@
+import { describe, expect, it } from "vitest";
+import { getKey } from "./getKey";
+
+describe("getKey", () => {
+ it("should return the key fields of the record", () => {
+ const table = {
+ schema: {
+ key1: { type: "uint32", internalType: "uint32" },
+ key2: { type: "uint256", internalType: "uint256" },
+ value1: { type: "string", internalType: "string" },
+ value2: { type: "string", internalType: "string" },
+ },
+ key: ["key1", "key2"],
+ } as const;
+ const record = { key1: 1, key2: 2n, value1: "hello", value2: "world" };
+ const key = getKey(table, record);
+
+ expect(key).toMatchInlineSnapshot(`
+ {
+ "key1": 1,
+ "key2": 2n,
+ }
+ `);
+ });
+});
diff --git a/packages/protocol-parser/src/getKey.ts b/packages/protocol-parser/src/getKey.ts
new file mode 100644
index 0000000000..539367f981
--- /dev/null
+++ b/packages/protocol-parser/src/getKey.ts
@@ -0,0 +1,12 @@
+import { Table } from "@latticexyz/config";
+import { getKeySchema } from "./getKeySchema";
+import { getSchemaPrimitives } from "./getSchemaPrimitives";
+
+type PartialTable = Pick;
+
+export function getKey(
+ table: table,
+ record: getSchemaPrimitives,
+): getSchemaPrimitives> {
+ return Object.fromEntries(table.key.map((fieldName) => [fieldName, record[fieldName]])) as never;
+}
diff --git a/packages/protocol-parser/src/getKeySchema.ts b/packages/protocol-parser/src/getKeySchema.ts
index 9f8bd452fa..3d6ed45efe 100644
--- a/packages/protocol-parser/src/getKeySchema.ts
+++ b/packages/protocol-parser/src/getKeySchema.ts
@@ -1,11 +1,22 @@
-import { Schema, Table } from "@latticexyz/config";
+import { StaticAbiType, Table } from "@latticexyz/config";
type PartialTable = Pick;
+export type ResolvedKeySchema = {
+ readonly [fieldName: string]: {
+ /** the Solidity primitive ABI type */
+ readonly type: StaticAbiType;
+ /** the user defined type or Solidity primitive ABI type */
+ readonly internalType: string;
+ };
+};
+
export type getKeySchema = PartialTable extends table
- ? Schema
+ ? ResolvedKeySchema
: {
- readonly [fieldName in Extract]: table["schema"][fieldName];
+ readonly [fieldName in Extract]: table["schema"][fieldName] & {
+ type: StaticAbiType;
+ };
};
export function getKeySchema(table: table): getKeySchema {
diff --git a/packages/protocol-parser/src/getValue.test.ts b/packages/protocol-parser/src/getValue.test.ts
new file mode 100644
index 0000000000..612270d26b
--- /dev/null
+++ b/packages/protocol-parser/src/getValue.test.ts
@@ -0,0 +1,25 @@
+import { describe, expect, it } from "vitest";
+import { getValue } from "./getValue";
+
+describe("getValue", () => {
+ it("should return the key fields of the record", () => {
+ const table = {
+ schema: {
+ key1: { type: "uint32", internalType: "uint32" },
+ key2: { type: "uint256", internalType: "uint256" },
+ value1: { type: "string", internalType: "string" },
+ value2: { type: "string", internalType: "string" },
+ },
+ key: ["key1", "key2"],
+ } as const;
+ const record = { key1: 1, key2: 2n, value1: "hello", value2: "world" };
+ const value = getValue(table, record);
+
+ expect(value).toMatchInlineSnapshot(`
+ {
+ "value1": "hello",
+ "value2": "world",
+ }
+ `);
+ });
+});
diff --git a/packages/protocol-parser/src/getValue.ts b/packages/protocol-parser/src/getValue.ts
new file mode 100644
index 0000000000..73bf65f2a5
--- /dev/null
+++ b/packages/protocol-parser/src/getValue.ts
@@ -0,0 +1,16 @@
+import { Table } from "@latticexyz/config";
+import { getValueSchema } from "./getValueSchema";
+import { getSchemaPrimitives } from "./getSchemaPrimitives";
+
+type PartialTable = Pick;
+
+export function getValue(
+ table: table,
+ record: getSchemaPrimitives,
+): getSchemaPrimitives> {
+ return Object.fromEntries(
+ Object.keys(table.schema)
+ .filter((fieldName) => !table.key.includes(fieldName))
+ .map((fieldName) => [fieldName, record[fieldName]]),
+ ) as never;
+}
diff --git a/packages/store-sync/package.json b/packages/store-sync/package.json
index 3e35d9f221..3cff5ad1ed 100644
--- a/packages/store-sync/package.json
+++ b/packages/store-sync/package.json
@@ -12,6 +12,7 @@
"exports": {
".": "./dist/index.js",
"./indexer-client": "./dist/indexer-client/index.js",
+ "./internal": "./dist/exports/internal.js",
"./postgres": "./dist/postgres/index.js",
"./postgres-decoded": "./dist/postgres-decoded/index.js",
"./recs": "./dist/recs/index.js",
@@ -27,6 +28,9 @@
"indexer-client": [
"./dist/indexer-client/index.d.ts"
],
+ "internal": [
+ "./dist/exports/internal.d.ts"
+ ],
"postgres": [
"./dist/postgres/index.d.ts"
],
diff --git a/packages/store-sync/src/exports/internal.ts b/packages/store-sync/src/exports/internal.ts
new file mode 100644
index 0000000000..be3db7892b
--- /dev/null
+++ b/packages/store-sync/src/exports/internal.ts
@@ -0,0 +1 @@
+export * from "../sql";
diff --git a/packages/store-sync/src/index.ts b/packages/store-sync/src/index.ts
index b69f95f5ce..de276bd499 100644
--- a/packages/store-sync/src/index.ts
+++ b/packages/store-sync/src/index.ts
@@ -6,3 +6,4 @@ export * from "./isTableRegistrationLog";
export * from "./logToTable";
export * from "./tablesWithRecordsToLogs";
export * from "./tableToLog";
+export * from "./recordToLog";
diff --git a/packages/store-sync/src/logToRecord.test.ts b/packages/store-sync/src/logToRecord.test.ts
new file mode 100644
index 0000000000..fc2704e749
--- /dev/null
+++ b/packages/store-sync/src/logToRecord.test.ts
@@ -0,0 +1,43 @@
+/* eslint-disable max-len */
+import { describe, it, expect } from "vitest";
+import { defineTable } from "@latticexyz/store/config/v2";
+import { logToRecord } from "./logToRecord";
+
+describe("logToRecord", () => {
+ it("should convert a Store_SetRecord log into a decoded table record", async () => {
+ const table = defineTable({
+ label: "Test",
+ schema: {
+ key1: "uint32",
+ key2: "uint256",
+ value1: "address",
+ value2: "string",
+ },
+ key: ["key1", "key2"],
+ });
+
+ const record = {
+ key1: 1,
+ key2: 2n,
+ value1: "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c",
+ value2: "hello",
+ } as const;
+
+ const log = {
+ address: "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c",
+ args: {
+ dynamicData: "0x68656c6c6f",
+ encodedLengths: "0x0000000000000000000000000000000000000000000000000500000000000005",
+ keyTuple: [
+ "0x0000000000000000000000000000000000000000000000000000000000000001",
+ "0x0000000000000000000000000000000000000000000000000000000000000002",
+ ],
+ staticData: "0x3aa5ebb10dc797cac828524e59a333d0a371443c",
+ tableId: "0x7462000000000000000000000000000054657374000000000000000000000000",
+ },
+ eventName: "Store_SetRecord",
+ } as const;
+
+ expect(logToRecord({ table, log })).toStrictEqual(record);
+ });
+});
diff --git a/packages/store-sync/src/logToRecord.ts b/packages/store-sync/src/logToRecord.ts
new file mode 100644
index 0000000000..ccac1f08a8
--- /dev/null
+++ b/packages/store-sync/src/logToRecord.ts
@@ -0,0 +1,28 @@
+import { Table } from "@latticexyz/config";
+import {
+ SchemaToPrimitives,
+ decodeKey,
+ decodeValueArgs,
+ getKeySchema,
+ getSchemaTypes,
+ getValueSchema,
+} from "@latticexyz/protocol-parser/internal";
+import { StorageAdapterLog } from "./common";
+
+type PartialTable = Pick;
+
+type LogToRecordArgs = {
+ table: table;
+ log: StorageAdapterLog & { eventName: "Store_SetRecord" };
+};
+
+export function logToRecord({
+ table,
+ log,
+}: LogToRecordArgs): SchemaToPrimitives> {
+ const keySchema = getSchemaTypes(getKeySchema(table));
+ const valueSchema = getSchemaTypes(getValueSchema(table));
+ const key = decodeKey(keySchema, log.args.keyTuple);
+ const value = decodeValueArgs(valueSchema, log.args);
+ return { ...key, ...value };
+}
diff --git a/packages/store-sync/src/recordToLog.test.ts b/packages/store-sync/src/recordToLog.test.ts
new file mode 100644
index 0000000000..3ddf07166d
--- /dev/null
+++ b/packages/store-sync/src/recordToLog.test.ts
@@ -0,0 +1,52 @@
+/* eslint-disable max-len */
+import { describe, it, expect } from "vitest";
+import { recordToLog } from "./recordToLog";
+import { defineTable } from "@latticexyz/store/config/v2";
+import { logToRecord } from "./logToRecord";
+
+describe("recordToLog", () => {
+ it("should convert table record into a Store_SetRecord log", async () => {
+ const table = defineTable({
+ label: "Test",
+ schema: {
+ key1: "uint32",
+ key2: "uint256",
+ value1: "address",
+ value2: "string",
+ },
+ key: ["key1", "key2"],
+ });
+
+ const record = {
+ key1: 1,
+ key2: 2n,
+ value1: "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c",
+ value2: "hello",
+ } as const;
+
+ const log = recordToLog({
+ address: "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c",
+ table,
+ record,
+ });
+
+ expect(log).toMatchInlineSnapshot(`
+ {
+ "address": "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c",
+ "args": {
+ "dynamicData": "0x68656c6c6f",
+ "encodedLengths": "0x0000000000000000000000000000000000000000000000000500000000000005",
+ "keyTuple": [
+ "0x0000000000000000000000000000000000000000000000000000000000000001",
+ "0x0000000000000000000000000000000000000000000000000000000000000002",
+ ],
+ "staticData": "0x3aa5ebb10dc797cac828524e59a333d0a371443c",
+ "tableId": "0x7462000000000000000000000000000054657374000000000000000000000000",
+ },
+ "eventName": "Store_SetRecord",
+ }
+ `);
+
+ expect(logToRecord({ table, log })).toStrictEqual(record);
+ });
+});
diff --git a/packages/store-sync/src/recordToLog.ts b/packages/store-sync/src/recordToLog.ts
new file mode 100644
index 0000000000..f912004388
--- /dev/null
+++ b/packages/store-sync/src/recordToLog.ts
@@ -0,0 +1,40 @@
+import {
+ SchemaToPrimitives,
+ encodeKey,
+ encodeValueArgs,
+ getKeySchema,
+ getSchemaTypes,
+ getValueSchema,
+ getKey,
+ getValue,
+} from "@latticexyz/protocol-parser/internal";
+import { StorageAdapterLog } from "./common";
+import { Table } from "@latticexyz/config";
+import { Hex } from "viem";
+
+type PartialTable = Pick;
+
+type RecordToLogArgs = {
+ address: Hex;
+ table: table;
+ record: SchemaToPrimitives>;
+};
+
+export function recordToLog({
+ table,
+ record,
+ address,
+}: RecordToLogArgs): StorageAdapterLog & { eventName: "Store_SetRecord" } {
+ const keySchema = getSchemaTypes(getKeySchema(table));
+ const valueSchema = getSchemaTypes(getValueSchema(table));
+
+ return {
+ eventName: "Store_SetRecord",
+ address: address,
+ args: {
+ tableId: table.tableId,
+ keyTuple: encodeKey(keySchema, getKey(table, record)),
+ ...encodeValueArgs(valueSchema, getValue(table, record)),
+ },
+ };
+}
diff --git a/packages/store-sync/src/sql/common.ts b/packages/store-sync/src/sql/common.ts
new file mode 100644
index 0000000000..cad1dea569
--- /dev/null
+++ b/packages/store-sync/src/sql/common.ts
@@ -0,0 +1,30 @@
+import { Table } from "@latticexyz/config";
+import { Hex } from "viem";
+
+export type TableQuery = {
+ table: Table;
+ /**
+ * SQL to filter the records of this table.
+ * The SQL result is expected to be of the same culumn shape as the table.
+ * Use the `selectFrom` helper to ensure the expected column shape.
+ * Note: requires an indexer with SQL API
+ */
+ sql: string;
+};
+
+export type LogFilter = {
+ /**
+ * Filter logs by the table ID.
+ */
+ table: Table;
+ /**
+ * Optionally filter by the `bytes32` value of the key in the first position (index zero of the record's key tuple).
+ */
+ key0?: Hex;
+ /**
+ * Optionally filter by the `bytes32` value of the key in the second position (index one of the record's key tuple).
+ */
+ key1?: Hex;
+};
+
+export type SyncFilter = TableQuery | LogFilter;
diff --git a/packages/store-sync/src/sql/decodeField.test.ts b/packages/store-sync/src/sql/decodeField.test.ts
new file mode 100644
index 0000000000..aa56131aee
--- /dev/null
+++ b/packages/store-sync/src/sql/decodeField.test.ts
@@ -0,0 +1,9 @@
+import { describe, expect, it } from "vitest";
+import { decodeField } from "./decodeField";
+
+describe("decodeField", () => {
+ it("should decode numbers to the expected value type", () => {
+ expect(decodeField("uint48", "1")).toBe(1);
+ expect(decodeField("uint56", "1")).toBe(1n);
+ });
+});
diff --git a/packages/store-sync/src/sql/decodeField.ts b/packages/store-sync/src/sql/decodeField.ts
new file mode 100644
index 0000000000..d79c5c6586
--- /dev/null
+++ b/packages/store-sync/src/sql/decodeField.ts
@@ -0,0 +1,24 @@
+import { AbiType } from "@latticexyz/config";
+import {
+ ArrayAbiType,
+ SchemaAbiTypeToPrimitiveType,
+ arrayToStaticAbiType,
+ schemaAbiTypeToDefaultValue,
+} from "@latticexyz/schema-type/internal";
+
+export function decodeField(
+ abiType: abiType,
+ data: string | boolean | string[],
+): SchemaAbiTypeToPrimitiveType {
+ const defaultValueType = typeof schemaAbiTypeToDefaultValue[abiType];
+ if (Array.isArray(data)) {
+ return data.map((element) => decodeField(arrayToStaticAbiType(abiType as ArrayAbiType), element)) as never;
+ }
+ if (defaultValueType === "number") {
+ return Number(data) as never;
+ }
+ if (defaultValueType === "bigint") {
+ return BigInt(data) as never;
+ }
+ return data as never;
+}
diff --git a/packages/store-sync/src/sql/decodeRecords.test.ts b/packages/store-sync/src/sql/decodeRecords.test.ts
new file mode 100644
index 0000000000..fc5b491774
--- /dev/null
+++ b/packages/store-sync/src/sql/decodeRecords.test.ts
@@ -0,0 +1,40 @@
+import { describe, expect, it } from "vitest";
+import { decodeRecords } from "./decodeRecords";
+
+describe("decodeRecord", () => {
+ const schema = {
+ address: { type: "address", internalType: "address" },
+ uint256: { type: "uint256", internalType: "uint256" },
+ uint32: { type: "uint32", internalType: "uint32" },
+ bool: { type: "bool", internalType: "bool" },
+ bytes: { type: "bytes", internalType: "bytes" },
+ string: { type: "string", internalType: "string" },
+ uint32Arr: { type: "uint32[]", internalType: "uint32[]" },
+ } as const;
+
+ it("decodes record", () => {
+ const header = Object.keys(schema);
+
+ const record = [
+ "0x0000000000000000000000000000000000000000",
+ "1234",
+ "1234",
+ true,
+ "0x1234",
+ "hello world",
+ ["1234", "5678"],
+ ];
+ const decodedRecord = {
+ address: "0x0000000000000000000000000000000000000000",
+ uint256: 1234n,
+ uint32: 1234,
+ bool: true,
+ bytes: "0x1234",
+ string: "hello world",
+ uint32Arr: [1234, 5678],
+ };
+
+ const decoded = decodeRecords({ schema, records: [header, record] });
+ expect(decoded).toStrictEqual([decodedRecord]);
+ });
+});
diff --git a/packages/store-sync/src/sql/decodeRecords.ts b/packages/store-sync/src/sql/decodeRecords.ts
new file mode 100644
index 0000000000..91cbcfc1e5
--- /dev/null
+++ b/packages/store-sync/src/sql/decodeRecords.ts
@@ -0,0 +1,41 @@
+import { Schema } from "@latticexyz/config";
+import { getSchemaPrimitives } from "@latticexyz/protocol-parser/internal";
+import { decodeField } from "./decodeField";
+
+type QueryHeader = string[];
+type QueryRecord = (string | boolean | string[])[];
+
+// First item in the result is the header
+export type QueryResult = [QueryHeader, ...QueryRecord[]];
+
+/**
+ * Trim the header row from the query result
+ */
+function trimHeader(result: QueryResult): QueryRecord[] {
+ return result.slice(1);
+}
+
+export type DecodeRecordsArgs = {
+ schema: Schema;
+ records: QueryResult;
+};
+
+export type DecodeRecordsResult = getSchemaPrimitives[];
+
+export function decodeRecords({
+ schema,
+ records,
+}: DecodeRecordsArgs): DecodeRecordsResult {
+ const fieldNames = Object.keys(schema);
+ if (records.length > 0 && fieldNames.length !== records[0].length) {
+ throw new Error(
+ `Mismatch between schema and query result.\nSchema: [${fieldNames.join(", ")}]\nQuery result: [${records[0].join(", ")}]`,
+ );
+ }
+
+ return trimHeader(records).map((record) =>
+ Object.fromEntries(
+ Object.keys(schema).map((fieldName, index) => [fieldName, decodeField(schema[fieldName].type, record[index])]),
+ ),
+ ) as never;
+}
diff --git a/packages/store-sync/src/sql/fetchRecords.test.ts b/packages/store-sync/src/sql/fetchRecords.test.ts
new file mode 100644
index 0000000000..9b029733ac
--- /dev/null
+++ b/packages/store-sync/src/sql/fetchRecords.test.ts
@@ -0,0 +1,136 @@
+import { describe, expect, it } from "vitest";
+import { fetchRecords } from "./fetchRecords";
+import mudConfig from "@latticexyz/world/mud.config";
+import { selectFrom } from "./selectFrom";
+
+describe("fetchRecords", () => {
+ // TODO: set up CI test case for this
+ it.skip("should fetch sql", async () => {
+ const result = await fetchRecords({
+ indexerUrl: "https://indexer.mud.redstonechain.com/q",
+ storeAddress: "0x9d05cc196c87104a7196fcca41280729b505dbbf",
+ queries: [
+ selectFrom({ table: mudConfig.tables.world__Balances, where: '"balance" > 0', limit: 2 }),
+ selectFrom({ table: mudConfig.tables.world__FunctionSignatures, limit: 10 }),
+ ],
+ });
+
+ expect(result).toMatchInlineSnapshot(`
+ {
+ "blockHeight": 4909521n,
+ "result": [
+ {
+ "records": [
+ {
+ "balance": 308500000000000000n,
+ "namespaceId": "0x6e73000000000000000000000000000000000000000000000000000000000000",
+ },
+ ],
+ "table": {
+ "codegen": {
+ "dataStruct": false,
+ "outputDirectory": "tables",
+ "storeArgument": false,
+ "tableIdArgument": false,
+ },
+ "deploy": {
+ "disabled": false,
+ },
+ "key": [
+ "namespaceId",
+ ],
+ "label": "Balances",
+ "name": "Balances",
+ "namespace": "world",
+ "schema": {
+ "balance": {
+ "internalType": "uint256",
+ "type": "uint256",
+ },
+ "namespaceId": {
+ "internalType": "ResourceId",
+ "type": "bytes32",
+ },
+ },
+ "tableId": "0x7462776f726c6400000000000000000042616c616e6365730000000000000000",
+ "type": "table",
+ },
+ },
+ {
+ "records": [
+ {
+ "functionSelector": "0x0560912900000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "unregisterStoreHook(bytes32,address)",
+ },
+ {
+ "functionSelector": "0x0ba51f4900000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "registerTable(bytes32,bytes32,bytes32,bytes32,string[],string[])",
+ },
+ {
+ "functionSelector": "0x127de47a00000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "createMatch(string,bytes32,bytes32,bytes32)",
+ },
+ {
+ "functionSelector": "0x17902d6100000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "createMatchSeasonPass(string,bytes32,bytes32,bytes32,bytes32,uint256,uint256[],bool)",
+ },
+ {
+ "functionSelector": "0x1b9a91a400000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "withdrawEth(address,uint256)",
+ },
+ {
+ "functionSelector": "0x1d2257ba00000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "registerDelegation(address,bytes32,bytes)",
+ },
+ {
+ "functionSelector": "0x1fc595cd00000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "setOfficial(bytes32,bool)",
+ },
+ {
+ "functionSelector": "0x219adc2e00000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "renounceOwnership(bytes32)",
+ },
+ {
+ "functionSelector": "0x220ca1f600000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "toggleReady(bytes32)",
+ },
+ {
+ "functionSelector": "0x231bb4cd00000000000000000000000000000000000000000000000000000000",
+ "functionSignature": "createNewSeasonPass(bytes14,uint256,uint256,uint256,uint256,uint256,uint256,uint256)",
+ },
+ ],
+ "table": {
+ "codegen": {
+ "dataStruct": false,
+ "outputDirectory": "tables",
+ "storeArgument": false,
+ "tableIdArgument": false,
+ },
+ "deploy": {
+ "disabled": false,
+ },
+ "key": [
+ "functionSelector",
+ ],
+ "label": "FunctionSignatures",
+ "name": "FunctionSignatur",
+ "namespace": "world",
+ "schema": {
+ "functionSelector": {
+ "internalType": "bytes4",
+ "type": "bytes4",
+ },
+ "functionSignature": {
+ "internalType": "string",
+ "type": "string",
+ },
+ },
+ "tableId": "0x6f74776f726c6400000000000000000046756e6374696f6e5369676e61747572",
+ "type": "offchainTable",
+ },
+ },
+ ],
+ }
+ `);
+ });
+});
diff --git a/packages/store-sync/src/sql/fetchRecords.ts b/packages/store-sync/src/sql/fetchRecords.ts
new file mode 100644
index 0000000000..c68befa1a9
--- /dev/null
+++ b/packages/store-sync/src/sql/fetchRecords.ts
@@ -0,0 +1,66 @@
+import { DecodeRecordsResult, QueryResult, decodeRecords } from "./decodeRecords";
+import { Hex } from "viem";
+import { TableQuery } from "./common";
+import { Table } from "@latticexyz/config";
+
+type ResponseSuccess = {
+ block_height: string;
+ result: QueryResult[];
+};
+
+type ResponseFail = { msg: string };
+
+type Response = ResponseSuccess | ResponseFail;
+
+function isResponseFail(response: Response): response is ResponseFail {
+ return "msg" in response;
+}
+
+type FetchRecordsArgs = {
+ indexerUrl: string;
+ storeAddress: Hex;
+ queries: TableQuery[];
+};
+
+type FetchRecordsResult = {
+ blockHeight: bigint;
+ result: {
+ table: Table;
+ records: DecodeRecordsResult;
+ }[];
+};
+
+export async function fetchRecords({
+ indexerUrl,
+ queries,
+ storeAddress,
+}: FetchRecordsArgs): Promise {
+ const query = JSON.stringify(queries.map((query) => ({ address: storeAddress, query: query.sql })));
+
+ const response: Response = await fetch(indexerUrl, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: query,
+ }).then((res) => res.json());
+
+ if (isResponseFail(response)) {
+ throw new Error(`Response: ${response.msg}\n\nTry reproducing via cURL:
+ curl ${indexerUrl} \\
+ --compressed \\
+ -H 'Accept-Encoding: gzip' \\
+ -H 'Content-Type: application/json' \\
+ -d '${query.replaceAll("'", "\\'")}'`);
+ }
+
+ const result: FetchRecordsResult = {
+ blockHeight: BigInt(response.block_height),
+ result: response.result.map((records, index) => ({
+ table: queries[index].table,
+ records: decodeRecords({ schema: queries[index].table.schema, records }),
+ })),
+ };
+
+ return result;
+}
diff --git a/packages/store-sync/src/sql/getSnapshot.ts b/packages/store-sync/src/sql/getSnapshot.ts
new file mode 100644
index 0000000000..1dd68ff9c1
--- /dev/null
+++ b/packages/store-sync/src/sql/getSnapshot.ts
@@ -0,0 +1,88 @@
+import { LogFilter, SyncFilter, TableQuery } from "./common";
+import { Hex } from "viem";
+import { StorageAdapterBlock, SyncFilter as LegacyLogFilter } from "../common";
+import { fetchRecords } from "./fetchRecords";
+import { recordToLog } from "../recordToLog";
+import { getSnapshot as getSnapshotLogs } from "../getSnapshot";
+import { bigIntMin, isDefined } from "@latticexyz/common/utils";
+
+export type GetSnapshotArgs = {
+ indexerUrl: string;
+ storeAddress: Hex;
+ filters?: SyncFilter[];
+ startBlock?: bigint;
+ chainId: number;
+};
+
+export type GetSnapshotResult = {
+ initialBlockLogs: StorageAdapterBlock;
+};
+
+export async function getSnapshot({
+ indexerUrl,
+ storeAddress,
+ filters,
+ startBlock = 0n,
+ chainId,
+}: GetSnapshotArgs): Promise {
+ try {
+ // We execute the list of provided SQL queries for hydration. For performance
+ // reasons the queries are not executed against a fixed block height, but against
+ // the latest state. We therefore pass the min block number of all query results
+ // as overall block number. This means some logs will be re-fetched again during
+ // the hydration process, but after the hydration is complete, the state will be
+ // correct. Intermediate state updates during hydration might be incorrect (for
+ // partial updates), so we only notify consumers of state updates after the
+ // initial hydration is complete.
+
+ const sqlFilters = filters ? (filters.filter((filter) => "sql" in filter) as TableQuery[]) : [];
+
+ const fetchLogs = async (): Promise => {
+ // Fetch the tables without SQL filter from the snapshot logs API for better performance.
+ const logsFilters =
+ filters &&
+ filters
+ .filter((filter) => !("sql" in filter))
+ .map((filter) => {
+ const { table, key0, key1 } = filter as LogFilter;
+ return { tableId: table.tableId, key0, key1 } as LegacyLogFilter;
+ });
+
+ if (logsFilters && logsFilters.length === 0) {
+ return undefined;
+ }
+
+ return getSnapshotLogs({
+ chainId,
+ address: storeAddress,
+ filters: logsFilters,
+ indexerUrl,
+ });
+ };
+
+ const fetchSql = async (query: TableQuery): Promise => {
+ const result = await fetchRecords({ indexerUrl, storeAddress, queries: [query] });
+ return {
+ blockNumber: result.blockHeight,
+ logs: result.result.flatMap(({ table, records }) =>
+ records.map((record) => recordToLog({ table, record, address: storeAddress })),
+ ),
+ };
+ };
+
+ // Execute individual SQL queries as separate requests to parallelize on the backend.
+ // Each individual request is expected to be executed against the same db state so it
+ // can't be parallelized.
+ const results = (await Promise.all([fetchLogs(), ...sqlFilters.map(fetchSql)])).filter(isDefined);
+ // The block number passed in the overall result will be the min of all queries and the logs.
+ const initialBlockLogs = {
+ blockNumber: results.length > 0 ? bigIntMin(...results.map((result) => result.blockNumber)) : startBlock,
+ logs: results.flatMap((result) => result.logs),
+ };
+
+ return { initialBlockLogs };
+ } catch (e) {
+ console.warn(`Failed to load snapshot. ${e}`);
+ return { initialBlockLogs: { blockNumber: startBlock - 1n, logs: [] } };
+ }
+}
diff --git a/packages/store-sync/src/sql/index.ts b/packages/store-sync/src/sql/index.ts
new file mode 100644
index 0000000000..aeeec5224e
--- /dev/null
+++ b/packages/store-sync/src/sql/index.ts
@@ -0,0 +1,4 @@
+export * from "./common";
+export * from "./fetchRecords";
+export * from "./selectFrom";
+export * from "./getSnapshot";
diff --git a/packages/store-sync/src/sql/selectFrom.ts b/packages/store-sync/src/sql/selectFrom.ts
new file mode 100644
index 0000000000..ff79278c7d
--- /dev/null
+++ b/packages/store-sync/src/sql/selectFrom.ts
@@ -0,0 +1,19 @@
+import { Table } from "@latticexyz/config";
+import { TableQuery } from "./common";
+
+// For autocompletion but still allowing all SQL strings
+export type Where = `"${keyof table["schema"] & string}"` | (string & {});
+
+export type SelectFromArgs = { table: table; where?: Where; limit?: number };
+
+export function selectFrom({ table, where, limit }: SelectFromArgs): TableQuery {
+ const indexerTableLabel = table.namespace === "" ? table.name : `${table.namespace}__${table.name}`;
+ return {
+ table: table,
+ sql: `select ${Object.keys(table.schema)
+ .map((key) => `"${key}"`)
+ .join(
+ ", ",
+ )} from ${indexerTableLabel}${where != null ? ` where ${where}` : ""}${limit != null ? ` limit ${limit}` : ""}`,
+ };
+}
diff --git a/packages/store-sync/tsup.config.ts b/packages/store-sync/tsup.config.ts
index d8f6860bea..87fbf6d134 100644
--- a/packages/store-sync/tsup.config.ts
+++ b/packages/store-sync/tsup.config.ts
@@ -10,6 +10,7 @@ export default defineConfig({
"src/trpc-indexer/index.ts",
"src/indexer-client/index.ts",
"src/zustand/index.ts",
+ "src/exports/internal.ts",
],
target: "esnext",
format: ["esm"],