diff --git a/.gitignore b/.gitignore index db8c0dff..4e199e0d 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,8 @@ node_modules **/secrets.yaml **/secrets.*.yaml **/conf +docker/abi +**/context **/.env dist **/data diff --git a/LICENSE b/LICENSE index 8ac9f497..0302ff30 100644 --- a/LICENSE +++ b/LICENSE @@ -4,7 +4,7 @@ Parameters Licensor: Kenshi Licensed Work: Kenshi Unchained - The Licensed Work is (c) 2023 Kenshi. + The Licensed Work is (c) 2023-2024 Kenshi. Additional Use Grant: You are granted the right to use the Licensed Work as-is, exclusively on networks officially designated by Kenshi or Unchained. This right is akin to the permissible use of @@ -19,15 +19,25 @@ Additional Use Grant: You are granted the right to use the Licensed Work as-is, research, personal network use, and academic purposes. This clause is to facilitate access to the Licensed Work for non-profit and developmental activities. - + + Additionally, entities with an annual revenue of less than + 100,000 CHF are granted the right to operate the Licensed + Work on private networks for their internal business + operations free of charge. This concession is aimed at + supporting small businesses and startups in leveraging the + Licensed Work for their growth and innovation, without the + immediate financial burden of acquiring a commercial + license. + Operating the Licensed Work on private networks for - commercial purposes is prohibited unless a separate - commercial license is obtained from Kenshi. Commercial - purposes encompass any use involving monetary or other - forms of compensation, or as part of a commercial - offering or service. This restriction is to protect - Kenshi's commercial interests and ensure the Licensed - Work is appropriately licensed for commercial uses. + commercial purposes, outside of the aforementioned + exception, is prohibited unless a separate commercial + license is obtained from Kenshi. Commercial purposes + encompass any use involving monetary or other forms of + compensation, or as part of a commercial offering or + service. This restriction is to protect Kenshi's + commercial interests and ensure the Licensed Work is + appropriately licensed for commercial uses. Change Date: Ten years from the date the Licensed Work is published. diff --git a/conf.broker.yaml.template b/conf.broker.yaml.template index b7b94201..5bb92a15 100644 --- a/conf.broker.yaml.template +++ b/conf.broker.yaml.template @@ -13,20 +13,26 @@ rpc: plugins: uniswap: + schedule: + ethereum: 5000 + tokens: - name: ethereum + chain: ethereum pair: "0x88e6a0c2ddd26feeb64f039a2c41296fcb3f5640" - delta: 6 + delta: 12 invert: true unit: USDT - name: arbitrum + chain: ethereum pair: "0x59354356Ec5d56306791873f567d61EBf11dfbD5" delta: 0 invert: false unit: ETH - name: bitcoin + chain: ethereum pair: "0x9db9e0e53058c89e5b94e29621a205198648425b" delta: 2 invert: false diff --git a/conf.remote.yaml.template b/conf.remote.yaml.template index b7b94201..5bb92a15 100644 --- a/conf.remote.yaml.template +++ b/conf.remote.yaml.template @@ -13,20 +13,26 @@ rpc: plugins: uniswap: + schedule: + ethereum: 5000 + tokens: - name: ethereum + chain: ethereum pair: "0x88e6a0c2ddd26feeb64f039a2c41296fcb3f5640" - delta: 6 + delta: 12 invert: true unit: USDT - name: arbitrum + chain: ethereum pair: "0x59354356Ec5d56306791873f567d61EBf11dfbD5" delta: 0 invert: false unit: ETH - name: bitcoin + chain: ethereum pair: "0x9db9e0e53058c89e5b94e29621a205198648425b" delta: 2 invert: false diff --git a/conf.standalone.yaml.template b/conf.standalone.yaml.template new file mode 100644 index 00000000..fbcccea6 --- /dev/null +++ b/conf.standalone.yaml.template @@ -0,0 +1,68 @@ +log: info +name: + +database: + url: postgres://:@:/?sslmode=disable + +rpc: + ethereum: + - https://ethereum.publicnode.com + - https://eth.llamarpc.com + - wss://ethereum.publicnode.com + - https://eth.rpc.blxrbdn.com + + arbitrum: + - https://arbitrum-one.publicnode.com + - https://arbitrum.llamarpc.com + - wss://arbitrum-one.publicnode.com + - https://arbitrum-one.public.blastapi.io + +plugins: + logs: + schedule: + arbitrum: 100 + ethereum: 5000 + + events: + - name: DAI + chain: ethereum + abi: ./abi/ERC20.json + event: Transfer + address: "0x6B175474E89094C44Da98b954EedeAC495271d0F" + from: 19271250 + step: 8 + confirmations: 8 + store: true + + uniswap: + schedule: + arbitrum: 100 + ethereum: 5000 + + tokens: + - name: ethereum + chain: ethereum + pair: "0x88e6a0c2ddd26feeb64f039a2c41296fcb3f5640" + delta: 12 + invert: true + unit: USDT + send: false + store: true + + - name: arbitrum + chain: ethereum + pair: "0x59354356Ec5d56306791873f567d61EBf11dfbD5" + delta: 0 + invert: false + unit: ETH + send: false + store: true + + - name: bitcoin + chain: ethereum + pair: "0x9db9e0e53058c89e5b94e29621a205198648425b" + delta: 2 + invert: false + unit: USDT + send: false + store: true diff --git a/conf.worker.yaml.template b/conf.worker.yaml.template index bb83b630..37697795 100644 --- a/conf.worker.yaml.template +++ b/conf.worker.yaml.template @@ -1,6 +1,9 @@ log: info name: +broker: + uri: wss://shinobi.brokers.kenshi.io + rpc: ethereum: - https://ethereum.publicnode.com @@ -10,21 +13,30 @@ rpc: plugins: uniswap: + schedule: + ethereum: 5000 + tokens: - name: ethereum + chain: ethereum pair: "0x88e6a0c2ddd26feeb64f039a2c41296fcb3f5640" - delta: 6 + delta: 12 invert: true unit: USDT + send: true - name: arbitrum + chain: ethereum pair: "0x59354356Ec5d56306791873f567d61EBf11dfbD5" delta: 0 invert: false unit: ETH + send: true - name: bitcoin + chain: ethereum pair: "0x9db9e0e53058c89e5b94e29621a205198648425b" delta: 2 invert: false unit: USDT + send: true diff --git a/docker/compose.yaml b/docker/compose.yaml index fcaff827..217e8094 100644 --- a/docker/compose.yaml +++ b/docker/compose.yaml @@ -15,21 +15,38 @@ services: # profiles: ["broker"] # restart: always - #postgres: - # image: postgres:16.1 - # container_name: postgres - # hostname: postgres - # env_file: - # - .env - # volumes: - # - ./data:/var/lib/postgresql/data - # profiles: ["broker"] - # restart: always - # healthcheck: - # test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"] - # interval: 10s - # timeout: 5s - # retries: 5 + unchained_standalone: + image: ghcr.io/kenshitech/unchained:latest + container_name: unchained_standalone + hostname: unchained_standalone + volumes: + - ./conf:/app/conf + - ./abi:/app/abi + - ./context:/app/context + depends_on: + postgres: + condition: service_healthy + environment: + - UNCHAINED_NODE_TYPE=standalone + - UNCHAINED_CMD=worker + profiles: ["standalone"] + restart: always + + postgres: + image: postgres:16.1 + container_name: postgres + hostname: postgres + env_file: + - .env + volumes: + - ./data:/var/lib/postgresql/data + profiles: ["standalone"] + restart: always + healthcheck: + test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"] + interval: 10s + timeout: 5s + retries: 5 #grafana: # image: grafana/grafana-oss:latest @@ -54,8 +71,10 @@ services: hostname: unchained_worker volumes: - ./conf:/app/conf + - ./context:/app/context environment: - UNCHAINED_NODE_TYPE=worker + - UNCHAINED_CMD=worker profiles: ["worker"] restart: always diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 334d2524..c3e8313a 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -2,12 +2,4 @@ echo "Running a $UNCHAINED_NODE_TYPE node." -#if [ $UNCHAINED_NODE_TYPE = "broker" ]; then -# unchained postgres migrate conf.yaml -# retVal=$? -# if [ $retVal -ne 0 ]; then -# exit $retVal -# fi -#fi - -./unchained $UNCHAINED_NODE_TYPE -c conf/conf.$UNCHAINED_NODE_TYPE.yaml -s conf/secrets.$UNCHAINED_NODE_TYPE.yaml +./unchained $UNCHAINED_CMD -c conf/conf.$UNCHAINED_NODE_TYPE.yaml -s conf/secrets.$UNCHAINED_NODE_TYPE.yaml -x context/$UNCHAINED_NODE_TYPE diff --git a/docker/unchained.sh b/docker/unchained.sh index 8c7e5a1f..ed549f15 100755 --- a/docker/unchained.sh +++ b/docker/unchained.sh @@ -4,8 +4,9 @@ usage() { echo "Usage: $0 [node] [options]" echo "Node:" #echo " broker - Manage unchained broker node" - #echo " remote - Manage unchained remote node" - echo " worker - Manage unchained worker node" + #echo " remote - Manage unchained remote node" + echo " worker - Manage unchained worker node" + echo " standalone - Manage unchained standalone node" echo "Options:" echo " Additional options passed directly to 'docker compose'" echo "Examples:" @@ -29,7 +30,7 @@ if ! docker compose version &>/dev/null; then exit 1 fi -if [ ! $1 == 'worker' ]; then #&& [ ! $1 == 'remote' ] && [ ! $1 == 'lite' ] || [ -z $2 ]; then +if [ ! $1 == 'worker' ] && [ ! $1 == 'standalone' ]; then #&& [ ! $1 == 'remote' ] && [ ! $1 == 'lite' ] || [ -z $2 ]; then usage exit 1 fi diff --git a/go.work.sum b/go.work.sum index 03794e69..30f4a5f4 100644 --- a/go.work.sum +++ b/go.work.sum @@ -28,6 +28,7 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsP github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= @@ -48,7 +49,6 @@ github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u1 github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= @@ -96,6 +96,7 @@ github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8 github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= @@ -111,7 +112,6 @@ go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaod go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= diff --git a/quickstart.md b/quickstart.md index 6629ea52..9cb0c337 100644 --- a/quickstart.md +++ b/quickstart.md @@ -199,6 +199,9 @@ config: log: info name: +broker: + uri: wss://shinobi.brokers.kenshi.io + rpc: ethereum: - https://ethereum.publicnode.com @@ -208,24 +211,33 @@ rpc: plugins: uniswap: + schedule: + ethereum: 5000 + tokens: - name: ethereum + chain: ethereum pair: "0x88e6a0c2ddd26feeb64f039a2c41296fcb3f5640" - delta: 6 + delta: 12 invert: true unit: USDT + send: true - name: arbitrum + chain: ethereum pair: "0x59354356Ec5d56306791873f567d61EBf11dfbD5" delta: 0 invert: false unit: ETH + send: true - name: bitcoin + chain: ethereum pair: "0x9db9e0e53058c89e5b94e29621a205198648425b" delta: 2 invert: false unit: USDT + send: true ``` Save the above configuration in a file named `conf.yaml` on your system and make diff --git a/src/bls/store.go b/src/bls/store.go index 0a19ccc7..9cc422ac 100644 --- a/src/bls/store.go +++ b/src/bls/store.go @@ -15,12 +15,13 @@ import ( var ClientSecretKey *big.Int var ClientPublicKey *bls12381.G2Affine var ClientShortPublicKey *bls12381.G1Affine +var ClientSigner Signer func InitClientIdentity() { var err error var pkBytes [96]byte - if config.Secrets.InConfig("secretKey") { + if config.Secrets.IsSet("secretKey") { decoded := base58.Decode(config.Secrets.GetString("secretKey")) @@ -52,6 +53,12 @@ func InitClientIdentity() { ClientShortPublicKey = GetShortPublicKey(ClientSecretKey) addrStr := address.Calculate(pkBytes[:]) + ClientSigner = Signer{ + Name: config.Config.GetString("name"), + PublicKey: ClientPublicKey.Bytes(), + ShortPublicKey: ClientShortPublicKey.Bytes(), + } + log.Logger. With("Address", addrStr). Info("Unchained") @@ -59,7 +66,7 @@ func InitClientIdentity() { // TODO: Avoid recalculating this config.Secrets.Set("publicKey", base58.Encode(pkBytes[:])) - if !config.Secrets.InConfig("address") { + if !config.Secrets.IsSet("address") { config.Secrets.Set("address", addrStr) err := config.Secrets.WriteConfig() diff --git a/src/cmd/broker.go b/src/cmd/broker.go index 1b84fba1..f73be7bd 100644 --- a/src/cmd/broker.go +++ b/src/cmd/broker.go @@ -8,6 +8,7 @@ import ( "github.com/KenshiTech/unchained/db" "github.com/KenshiTech/unchained/ethereum" "github.com/KenshiTech/unchained/net" + "github.com/KenshiTech/unchained/plugins/logs" "github.com/KenshiTech/unchained/plugins/uniswap" "github.com/spf13/cobra" @@ -23,6 +24,7 @@ var brokerCmd = &cobra.Command{ db.Start() ethereum.Start() uniswap.Setup() + logs.Setup() net.StartServer() }, } diff --git a/src/cmd/root.go b/src/cmd/root.go index bfeee118..fd29639e 100644 --- a/src/cmd/root.go +++ b/src/cmd/root.go @@ -10,6 +10,7 @@ import ( var configPath string var secretsPath string +var contextPath string var printVersion bool // rootCmd represents the base command when called without any subcommands @@ -50,6 +51,7 @@ func init() { rootCmd.PersistentFlags().StringVarP(&configPath, "config", "c", "./conf.yaml", "Config file") rootCmd.PersistentFlags().StringVarP(&secretsPath, "secrets", "s", "./secrets.yaml", "Secrets file") + rootCmd.PersistentFlags().StringVarP(&contextPath, "context", "x", "./context", "Context DB") rootCmd.MarkFlagFilename("config", "yaml") rootCmd.MarkFlagRequired("config") } diff --git a/src/cmd/worker.go b/src/cmd/worker.go index 61692b0e..0865ded9 100644 --- a/src/cmd/worker.go +++ b/src/cmd/worker.go @@ -7,9 +7,11 @@ import ( "github.com/KenshiTech/unchained/bls" "github.com/KenshiTech/unchained/config" "github.com/KenshiTech/unchained/constants" + "github.com/KenshiTech/unchained/db" "github.com/KenshiTech/unchained/ethereum" "github.com/KenshiTech/unchained/log" "github.com/KenshiTech/unchained/net/client" + "github.com/KenshiTech/unchained/persistence" "github.com/KenshiTech/unchained/plugins/logs" "github.com/KenshiTech/unchained/plugins/uniswap" @@ -35,11 +37,14 @@ var workerCmd = &cobra.Command{ config.LoadConfig(configPath, secretsPath) bls.InitClientIdentity() + db.Start() client.StartClient() ethereum.Start() uniswap.Setup() uniswap.Start() + logs.Setup() logs.Start() + persistence.Start(contextPath) client.ClientBlock() }, } diff --git a/src/config/config.go b/src/config/config.go index c866bb8e..920b666e 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -11,7 +11,6 @@ func defaults() { Config.SetDefault("name", petname.Generate(3, "-")) Config.SetDefault("log", "info") Config.SetDefault("rpc.ethereum", "https://ethereum.publicnode.com") - Config.SetDefault("broker.uri", "wss://shinobi.brokers.kenshi.io") Config.SetDefault("broker.bind", "0.0.0.0:9123") } diff --git a/src/constants/constants.go b/src/constants/constants.go index aabc59cc..0f73ebaa 100644 --- a/src/constants/constants.go +++ b/src/constants/constants.go @@ -1,4 +1,4 @@ package constants -var Version = "0.11.8" -var ProtocolVersion = "0.11.8" +var Version = "0.11.9" +var ProtocolVersion = "0.11.9" diff --git a/src/constants/opcodes/opcodes.go b/src/constants/opcodes/opcodes.go new file mode 100644 index 00000000..a3f75dea --- /dev/null +++ b/src/constants/opcodes/opcodes.go @@ -0,0 +1,19 @@ +package opcodes + +// TODO: Should we have a Data opcode instead of PriceReport & EventLog? +const ( + Hello = iota + KoskChallenge + KoskResult + + RegisterConsumer + + Feedback + Error + + PriceReport + PriceReportBroadcast + + EventLog + EventLogBroadcast +) diff --git a/src/datasets/logs.go b/src/datasets/logs.go new file mode 100644 index 00000000..3fef8fff --- /dev/null +++ b/src/datasets/logs.go @@ -0,0 +1,27 @@ +package datasets + +type EventLogArg struct { + Name string + Value any +} + +type EventLog struct { + LogIndex uint64 + Block uint64 + Address string + Event string + Chain string + TxHash [32]byte + Args []EventLogArg +} + +type EventLogReport struct { + EventLog + Signature [48]byte +} + +type BroadcastEventPacket struct { + Info EventLog + Signature [48]byte + Signers [][]byte +} diff --git a/src/datasets/uniswap.go b/src/datasets/uniswap.go index dda382cd..4b73fb05 100644 --- a/src/datasets/uniswap.go +++ b/src/datasets/uniswap.go @@ -15,7 +15,7 @@ type PriceReport struct { Signature [48]byte } -type BroadcastPacket struct { +type BroadcastPricePacket struct { Info PriceInfo Signature [48]byte Signers [][]byte diff --git a/src/db/db.go b/src/db/db.go index 93dd2b10..9ba95c10 100644 --- a/src/db/db.go +++ b/src/db/db.go @@ -14,6 +14,10 @@ var dbClient *ent.Client func Start() { + if !config.Config.IsSet("database.url") { + return + } + var err error dbUrl := config.Config.GetString("database.url") diff --git a/src/ent/client.go b/src/ent/client.go index 109fdd56..08e03597 100644 --- a/src/ent/client.go +++ b/src/ent/client.go @@ -16,6 +16,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/KenshiTech/unchained/ent/assetprice" + "github.com/KenshiTech/unchained/ent/eventlog" "github.com/KenshiTech/unchained/ent/signer" ) @@ -26,6 +27,8 @@ type Client struct { Schema *migrate.Schema // AssetPrice is the client for interacting with the AssetPrice builders. AssetPrice *AssetPriceClient + // EventLog is the client for interacting with the EventLog builders. + EventLog *EventLogClient // Signer is the client for interacting with the Signer builders. Signer *SignerClient } @@ -40,6 +43,7 @@ func NewClient(opts ...Option) *Client { func (c *Client) init() { c.Schema = migrate.NewSchema(c.driver) c.AssetPrice = NewAssetPriceClient(c.config) + c.EventLog = NewEventLogClient(c.config) c.Signer = NewSignerClient(c.config) } @@ -134,6 +138,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { ctx: ctx, config: cfg, AssetPrice: NewAssetPriceClient(cfg), + EventLog: NewEventLogClient(cfg), Signer: NewSignerClient(cfg), }, nil } @@ -155,6 +160,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) ctx: ctx, config: cfg, AssetPrice: NewAssetPriceClient(cfg), + EventLog: NewEventLogClient(cfg), Signer: NewSignerClient(cfg), }, nil } @@ -185,6 +191,7 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { c.AssetPrice.Use(hooks...) + c.EventLog.Use(hooks...) c.Signer.Use(hooks...) } @@ -192,6 +199,7 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { c.AssetPrice.Intercept(interceptors...) + c.EventLog.Intercept(interceptors...) c.Signer.Intercept(interceptors...) } @@ -200,6 +208,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { switch m := m.(type) { case *AssetPriceMutation: return c.AssetPrice.mutate(ctx, m) + case *EventLogMutation: + return c.EventLog.mutate(ctx, m) case *SignerMutation: return c.Signer.mutate(ctx, m) default: @@ -356,6 +366,155 @@ func (c *AssetPriceClient) mutate(ctx context.Context, m *AssetPriceMutation) (V } } +// EventLogClient is a client for the EventLog schema. +type EventLogClient struct { + config +} + +// NewEventLogClient returns a client for the EventLog from the given config. +func NewEventLogClient(c config) *EventLogClient { + return &EventLogClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `eventlog.Hooks(f(g(h())))`. +func (c *EventLogClient) Use(hooks ...Hook) { + c.hooks.EventLog = append(c.hooks.EventLog, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `eventlog.Intercept(f(g(h())))`. +func (c *EventLogClient) Intercept(interceptors ...Interceptor) { + c.inters.EventLog = append(c.inters.EventLog, interceptors...) +} + +// Create returns a builder for creating a EventLog entity. +func (c *EventLogClient) Create() *EventLogCreate { + mutation := newEventLogMutation(c.config, OpCreate) + return &EventLogCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of EventLog entities. +func (c *EventLogClient) CreateBulk(builders ...*EventLogCreate) *EventLogCreateBulk { + return &EventLogCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *EventLogClient) MapCreateBulk(slice any, setFunc func(*EventLogCreate, int)) *EventLogCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &EventLogCreateBulk{err: fmt.Errorf("calling to EventLogClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*EventLogCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &EventLogCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for EventLog. +func (c *EventLogClient) Update() *EventLogUpdate { + mutation := newEventLogMutation(c.config, OpUpdate) + return &EventLogUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *EventLogClient) UpdateOne(el *EventLog) *EventLogUpdateOne { + mutation := newEventLogMutation(c.config, OpUpdateOne, withEventLog(el)) + return &EventLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *EventLogClient) UpdateOneID(id int) *EventLogUpdateOne { + mutation := newEventLogMutation(c.config, OpUpdateOne, withEventLogID(id)) + return &EventLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for EventLog. +func (c *EventLogClient) Delete() *EventLogDelete { + mutation := newEventLogMutation(c.config, OpDelete) + return &EventLogDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *EventLogClient) DeleteOne(el *EventLog) *EventLogDeleteOne { + return c.DeleteOneID(el.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *EventLogClient) DeleteOneID(id int) *EventLogDeleteOne { + builder := c.Delete().Where(eventlog.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &EventLogDeleteOne{builder} +} + +// Query returns a query builder for EventLog. +func (c *EventLogClient) Query() *EventLogQuery { + return &EventLogQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeEventLog}, + inters: c.Interceptors(), + } +} + +// Get returns a EventLog entity by its id. +func (c *EventLogClient) Get(ctx context.Context, id int) (*EventLog, error) { + return c.Query().Where(eventlog.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *EventLogClient) GetX(ctx context.Context, id int) *EventLog { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QuerySigners queries the signers edge of a EventLog. +func (c *EventLogClient) QuerySigners(el *EventLog) *SignerQuery { + query := (&SignerClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := el.ID + step := sqlgraph.NewStep( + sqlgraph.From(eventlog.Table, eventlog.FieldID, id), + sqlgraph.To(signer.Table, signer.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, eventlog.SignersTable, eventlog.SignersPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(el.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *EventLogClient) Hooks() []Hook { + return c.hooks.EventLog +} + +// Interceptors returns the client interceptors. +func (c *EventLogClient) Interceptors() []Interceptor { + return c.inters.EventLog +} + +func (c *EventLogClient) mutate(ctx context.Context, m *EventLogMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&EventLogCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&EventLogUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&EventLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&EventLogDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown EventLog mutation op: %q", m.Op()) + } +} + // SignerClient is a client for the Signer schema. type SignerClient struct { config @@ -480,6 +639,22 @@ func (c *SignerClient) QueryAssetPrice(s *Signer) *AssetPriceQuery { return query } +// QueryEventLogs queries the eventLogs edge of a Signer. +func (c *SignerClient) QueryEventLogs(s *Signer) *EventLogQuery { + query := (&EventLogClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := s.ID + step := sqlgraph.NewStep( + sqlgraph.From(signer.Table, signer.FieldID, id), + sqlgraph.To(eventlog.Table, eventlog.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, signer.EventLogsTable, signer.EventLogsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(s.driver.Dialect(), step) + return fromV, nil + } + return query +} + // Hooks returns the client hooks. func (c *SignerClient) Hooks() []Hook { return c.hooks.Signer @@ -508,9 +683,9 @@ func (c *SignerClient) mutate(ctx context.Context, m *SignerMutation) (Value, er // hooks and interceptors per client, for fast access. type ( hooks struct { - AssetPrice, Signer []ent.Hook + AssetPrice, EventLog, Signer []ent.Hook } inters struct { - AssetPrice, Signer []ent.Interceptor + AssetPrice, EventLog, Signer []ent.Interceptor } ) diff --git a/src/ent/ent.go b/src/ent/ent.go index 3bd1f0b6..01f80a4f 100644 --- a/src/ent/ent.go +++ b/src/ent/ent.go @@ -13,6 +13,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/KenshiTech/unchained/ent/assetprice" + "github.com/KenshiTech/unchained/ent/eventlog" "github.com/KenshiTech/unchained/ent/signer" ) @@ -75,6 +76,7 @@ func checkColumn(table, column string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ assetprice.Table: assetprice.ValidColumn, + eventlog.Table: eventlog.ValidColumn, signer.Table: signer.ValidColumn, }) }) diff --git a/src/ent/eventlog.go b/src/ent/eventlog.go new file mode 100644 index 00000000..9c7ca5ae --- /dev/null +++ b/src/ent/eventlog.go @@ -0,0 +1,223 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/KenshiTech/unchained/datasets" + "github.com/KenshiTech/unchained/ent/eventlog" +) + +// EventLog is the model entity for the EventLog schema. +type EventLog struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Block holds the value of the "block" field. + Block uint64 `json:"block,omitempty"` + // SignersCount holds the value of the "signersCount" field. + SignersCount uint64 `json:"signersCount,omitempty"` + // Signature holds the value of the "signature" field. + Signature []byte `json:"signature,omitempty"` + // Address holds the value of the "address" field. + Address string `json:"address,omitempty"` + // Chain holds the value of the "chain" field. + Chain string `json:"chain,omitempty"` + // Index holds the value of the "index" field. + Index uint64 `json:"index,omitempty"` + // Event holds the value of the "event" field. + Event string `json:"event,omitempty"` + // Transaction holds the value of the "transaction" field. + Transaction []byte `json:"transaction,omitempty"` + // Args holds the value of the "args" field. + Args []datasets.EventLogArg `json:"args,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the EventLogQuery when eager-loading is set. + Edges EventLogEdges `json:"edges"` + selectValues sql.SelectValues +} + +// EventLogEdges holds the relations/edges for other nodes in the graph. +type EventLogEdges struct { + // Signers holds the value of the signers edge. + Signers []*Signer `json:"signers,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// SignersOrErr returns the Signers value or an error if the edge +// was not loaded in eager-loading. +func (e EventLogEdges) SignersOrErr() ([]*Signer, error) { + if e.loadedTypes[0] { + return e.Signers, nil + } + return nil, &NotLoadedError{edge: "signers"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*EventLog) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case eventlog.FieldSignature, eventlog.FieldTransaction, eventlog.FieldArgs: + values[i] = new([]byte) + case eventlog.FieldID, eventlog.FieldBlock, eventlog.FieldSignersCount, eventlog.FieldIndex: + values[i] = new(sql.NullInt64) + case eventlog.FieldAddress, eventlog.FieldChain, eventlog.FieldEvent: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the EventLog fields. +func (el *EventLog) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case eventlog.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + el.ID = int(value.Int64) + case eventlog.FieldBlock: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field block", values[i]) + } else if value.Valid { + el.Block = uint64(value.Int64) + } + case eventlog.FieldSignersCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field signersCount", values[i]) + } else if value.Valid { + el.SignersCount = uint64(value.Int64) + } + case eventlog.FieldSignature: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field signature", values[i]) + } else if value != nil { + el.Signature = *value + } + case eventlog.FieldAddress: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field address", values[i]) + } else if value.Valid { + el.Address = value.String + } + case eventlog.FieldChain: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field chain", values[i]) + } else if value.Valid { + el.Chain = value.String + } + case eventlog.FieldIndex: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field index", values[i]) + } else if value.Valid { + el.Index = uint64(value.Int64) + } + case eventlog.FieldEvent: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field event", values[i]) + } else if value.Valid { + el.Event = value.String + } + case eventlog.FieldTransaction: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field transaction", values[i]) + } else if value != nil { + el.Transaction = *value + } + case eventlog.FieldArgs: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field args", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &el.Args); err != nil { + return fmt.Errorf("unmarshal field args: %w", err) + } + } + default: + el.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the EventLog. +// This includes values selected through modifiers, order, etc. +func (el *EventLog) Value(name string) (ent.Value, error) { + return el.selectValues.Get(name) +} + +// QuerySigners queries the "signers" edge of the EventLog entity. +func (el *EventLog) QuerySigners() *SignerQuery { + return NewEventLogClient(el.config).QuerySigners(el) +} + +// Update returns a builder for updating this EventLog. +// Note that you need to call EventLog.Unwrap() before calling this method if this EventLog +// was returned from a transaction, and the transaction was committed or rolled back. +func (el *EventLog) Update() *EventLogUpdateOne { + return NewEventLogClient(el.config).UpdateOne(el) +} + +// Unwrap unwraps the EventLog entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (el *EventLog) Unwrap() *EventLog { + _tx, ok := el.config.driver.(*txDriver) + if !ok { + panic("ent: EventLog is not a transactional entity") + } + el.config.driver = _tx.drv + return el +} + +// String implements the fmt.Stringer. +func (el *EventLog) String() string { + var builder strings.Builder + builder.WriteString("EventLog(") + builder.WriteString(fmt.Sprintf("id=%v, ", el.ID)) + builder.WriteString("block=") + builder.WriteString(fmt.Sprintf("%v", el.Block)) + builder.WriteString(", ") + builder.WriteString("signersCount=") + builder.WriteString(fmt.Sprintf("%v", el.SignersCount)) + builder.WriteString(", ") + builder.WriteString("signature=") + builder.WriteString(fmt.Sprintf("%v", el.Signature)) + builder.WriteString(", ") + builder.WriteString("address=") + builder.WriteString(el.Address) + builder.WriteString(", ") + builder.WriteString("chain=") + builder.WriteString(el.Chain) + builder.WriteString(", ") + builder.WriteString("index=") + builder.WriteString(fmt.Sprintf("%v", el.Index)) + builder.WriteString(", ") + builder.WriteString("event=") + builder.WriteString(el.Event) + builder.WriteString(", ") + builder.WriteString("transaction=") + builder.WriteString(fmt.Sprintf("%v", el.Transaction)) + builder.WriteString(", ") + builder.WriteString("args=") + builder.WriteString(fmt.Sprintf("%v", el.Args)) + builder.WriteByte(')') + return builder.String() +} + +// EventLogs is a parsable slice of EventLog. +type EventLogs []*EventLog diff --git a/src/ent/eventlog/eventlog.go b/src/ent/eventlog/eventlog.go new file mode 100644 index 00000000..03c87df7 --- /dev/null +++ b/src/ent/eventlog/eventlog.go @@ -0,0 +1,138 @@ +// Code generated by ent, DO NOT EDIT. + +package eventlog + +import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the eventlog type in the database. + Label = "event_log" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldBlock holds the string denoting the block field in the database. + FieldBlock = "block" + // FieldSignersCount holds the string denoting the signerscount field in the database. + FieldSignersCount = "signers_count" + // FieldSignature holds the string denoting the signature field in the database. + FieldSignature = "signature" + // FieldAddress holds the string denoting the address field in the database. + FieldAddress = "address" + // FieldChain holds the string denoting the chain field in the database. + FieldChain = "chain" + // FieldIndex holds the string denoting the index field in the database. + FieldIndex = "index" + // FieldEvent holds the string denoting the event field in the database. + FieldEvent = "event" + // FieldTransaction holds the string denoting the transaction field in the database. + FieldTransaction = "transaction" + // FieldArgs holds the string denoting the args field in the database. + FieldArgs = "args" + // EdgeSigners holds the string denoting the signers edge name in mutations. + EdgeSigners = "signers" + // Table holds the table name of the eventlog in the database. + Table = "event_logs" + // SignersTable is the table that holds the signers relation/edge. The primary key declared below. + SignersTable = "event_log_signers" + // SignersInverseTable is the table name for the Signer entity. + // It exists in this package in order to avoid circular dependency with the "signer" package. + SignersInverseTable = "signers" +) + +// Columns holds all SQL columns for eventlog fields. +var Columns = []string{ + FieldID, + FieldBlock, + FieldSignersCount, + FieldSignature, + FieldAddress, + FieldChain, + FieldIndex, + FieldEvent, + FieldTransaction, + FieldArgs, +} + +var ( + // SignersPrimaryKey and SignersColumn2 are the table columns denoting the + // primary key for the signers relation (M2M). + SignersPrimaryKey = []string{"event_log_id", "signer_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // SignatureValidator is a validator for the "signature" field. It is called by the builders before save. + SignatureValidator func([]byte) error + // TransactionValidator is a validator for the "transaction" field. It is called by the builders before save. + TransactionValidator func([]byte) error +) + +// OrderOption defines the ordering options for the EventLog queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByBlock orders the results by the block field. +func ByBlock(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBlock, opts...).ToFunc() +} + +// BySignersCountField orders the results by the signersCount field. +func BySignersCountField(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSignersCount, opts...).ToFunc() +} + +// ByAddress orders the results by the address field. +func ByAddress(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAddress, opts...).ToFunc() +} + +// ByChain orders the results by the chain field. +func ByChain(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldChain, opts...).ToFunc() +} + +// ByIndex orders the results by the index field. +func ByIndex(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIndex, opts...).ToFunc() +} + +// ByEvent orders the results by the event field. +func ByEvent(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEvent, opts...).ToFunc() +} + +// BySignersCount orders the results by signers count. +func BySignersCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newSignersStep(), opts...) + } +} + +// BySigners orders the results by signers terms. +func BySigners(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newSignersStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newSignersStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(SignersInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, SignersTable, SignersPrimaryKey...), + ) +} diff --git a/src/ent/eventlog/where.go b/src/ent/eventlog/where.go new file mode 100644 index 00000000..9261bad1 --- /dev/null +++ b/src/ent/eventlog/where.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package eventlog + +import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/KenshiTech/unchained/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldID, id)) +} + +// Block applies equality check predicate on the "block" field. It's identical to BlockEQ. +func Block(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldBlock, v)) +} + +// SignersCount applies equality check predicate on the "signersCount" field. It's identical to SignersCountEQ. +func SignersCount(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldSignersCount, v)) +} + +// Signature applies equality check predicate on the "signature" field. It's identical to SignatureEQ. +func Signature(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldSignature, v)) +} + +// Address applies equality check predicate on the "address" field. It's identical to AddressEQ. +func Address(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldAddress, v)) +} + +// Chain applies equality check predicate on the "chain" field. It's identical to ChainEQ. +func Chain(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldChain, v)) +} + +// Index applies equality check predicate on the "index" field. It's identical to IndexEQ. +func Index(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldIndex, v)) +} + +// Event applies equality check predicate on the "event" field. It's identical to EventEQ. +func Event(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldEvent, v)) +} + +// Transaction applies equality check predicate on the "transaction" field. It's identical to TransactionEQ. +func Transaction(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldTransaction, v)) +} + +// BlockEQ applies the EQ predicate on the "block" field. +func BlockEQ(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldBlock, v)) +} + +// BlockNEQ applies the NEQ predicate on the "block" field. +func BlockNEQ(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldBlock, v)) +} + +// BlockIn applies the In predicate on the "block" field. +func BlockIn(vs ...uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldBlock, vs...)) +} + +// BlockNotIn applies the NotIn predicate on the "block" field. +func BlockNotIn(vs ...uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldBlock, vs...)) +} + +// BlockGT applies the GT predicate on the "block" field. +func BlockGT(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldBlock, v)) +} + +// BlockGTE applies the GTE predicate on the "block" field. +func BlockGTE(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldBlock, v)) +} + +// BlockLT applies the LT predicate on the "block" field. +func BlockLT(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldBlock, v)) +} + +// BlockLTE applies the LTE predicate on the "block" field. +func BlockLTE(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldBlock, v)) +} + +// SignersCountEQ applies the EQ predicate on the "signersCount" field. +func SignersCountEQ(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldSignersCount, v)) +} + +// SignersCountNEQ applies the NEQ predicate on the "signersCount" field. +func SignersCountNEQ(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldSignersCount, v)) +} + +// SignersCountIn applies the In predicate on the "signersCount" field. +func SignersCountIn(vs ...uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldSignersCount, vs...)) +} + +// SignersCountNotIn applies the NotIn predicate on the "signersCount" field. +func SignersCountNotIn(vs ...uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldSignersCount, vs...)) +} + +// SignersCountGT applies the GT predicate on the "signersCount" field. +func SignersCountGT(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldSignersCount, v)) +} + +// SignersCountGTE applies the GTE predicate on the "signersCount" field. +func SignersCountGTE(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldSignersCount, v)) +} + +// SignersCountLT applies the LT predicate on the "signersCount" field. +func SignersCountLT(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldSignersCount, v)) +} + +// SignersCountLTE applies the LTE predicate on the "signersCount" field. +func SignersCountLTE(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldSignersCount, v)) +} + +// SignatureEQ applies the EQ predicate on the "signature" field. +func SignatureEQ(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldSignature, v)) +} + +// SignatureNEQ applies the NEQ predicate on the "signature" field. +func SignatureNEQ(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldSignature, v)) +} + +// SignatureIn applies the In predicate on the "signature" field. +func SignatureIn(vs ...[]byte) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldSignature, vs...)) +} + +// SignatureNotIn applies the NotIn predicate on the "signature" field. +func SignatureNotIn(vs ...[]byte) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldSignature, vs...)) +} + +// SignatureGT applies the GT predicate on the "signature" field. +func SignatureGT(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldSignature, v)) +} + +// SignatureGTE applies the GTE predicate on the "signature" field. +func SignatureGTE(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldSignature, v)) +} + +// SignatureLT applies the LT predicate on the "signature" field. +func SignatureLT(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldSignature, v)) +} + +// SignatureLTE applies the LTE predicate on the "signature" field. +func SignatureLTE(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldSignature, v)) +} + +// AddressEQ applies the EQ predicate on the "address" field. +func AddressEQ(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldAddress, v)) +} + +// AddressNEQ applies the NEQ predicate on the "address" field. +func AddressNEQ(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldAddress, v)) +} + +// AddressIn applies the In predicate on the "address" field. +func AddressIn(vs ...string) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldAddress, vs...)) +} + +// AddressNotIn applies the NotIn predicate on the "address" field. +func AddressNotIn(vs ...string) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldAddress, vs...)) +} + +// AddressGT applies the GT predicate on the "address" field. +func AddressGT(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldAddress, v)) +} + +// AddressGTE applies the GTE predicate on the "address" field. +func AddressGTE(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldAddress, v)) +} + +// AddressLT applies the LT predicate on the "address" field. +func AddressLT(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldAddress, v)) +} + +// AddressLTE applies the LTE predicate on the "address" field. +func AddressLTE(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldAddress, v)) +} + +// AddressContains applies the Contains predicate on the "address" field. +func AddressContains(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldContains(FieldAddress, v)) +} + +// AddressHasPrefix applies the HasPrefix predicate on the "address" field. +func AddressHasPrefix(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldHasPrefix(FieldAddress, v)) +} + +// AddressHasSuffix applies the HasSuffix predicate on the "address" field. +func AddressHasSuffix(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldHasSuffix(FieldAddress, v)) +} + +// AddressEqualFold applies the EqualFold predicate on the "address" field. +func AddressEqualFold(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEqualFold(FieldAddress, v)) +} + +// AddressContainsFold applies the ContainsFold predicate on the "address" field. +func AddressContainsFold(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldContainsFold(FieldAddress, v)) +} + +// ChainEQ applies the EQ predicate on the "chain" field. +func ChainEQ(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldChain, v)) +} + +// ChainNEQ applies the NEQ predicate on the "chain" field. +func ChainNEQ(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldChain, v)) +} + +// ChainIn applies the In predicate on the "chain" field. +func ChainIn(vs ...string) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldChain, vs...)) +} + +// ChainNotIn applies the NotIn predicate on the "chain" field. +func ChainNotIn(vs ...string) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldChain, vs...)) +} + +// ChainGT applies the GT predicate on the "chain" field. +func ChainGT(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldChain, v)) +} + +// ChainGTE applies the GTE predicate on the "chain" field. +func ChainGTE(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldChain, v)) +} + +// ChainLT applies the LT predicate on the "chain" field. +func ChainLT(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldChain, v)) +} + +// ChainLTE applies the LTE predicate on the "chain" field. +func ChainLTE(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldChain, v)) +} + +// ChainContains applies the Contains predicate on the "chain" field. +func ChainContains(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldContains(FieldChain, v)) +} + +// ChainHasPrefix applies the HasPrefix predicate on the "chain" field. +func ChainHasPrefix(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldHasPrefix(FieldChain, v)) +} + +// ChainHasSuffix applies the HasSuffix predicate on the "chain" field. +func ChainHasSuffix(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldHasSuffix(FieldChain, v)) +} + +// ChainEqualFold applies the EqualFold predicate on the "chain" field. +func ChainEqualFold(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEqualFold(FieldChain, v)) +} + +// ChainContainsFold applies the ContainsFold predicate on the "chain" field. +func ChainContainsFold(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldContainsFold(FieldChain, v)) +} + +// IndexEQ applies the EQ predicate on the "index" field. +func IndexEQ(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldIndex, v)) +} + +// IndexNEQ applies the NEQ predicate on the "index" field. +func IndexNEQ(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldIndex, v)) +} + +// IndexIn applies the In predicate on the "index" field. +func IndexIn(vs ...uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldIndex, vs...)) +} + +// IndexNotIn applies the NotIn predicate on the "index" field. +func IndexNotIn(vs ...uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldIndex, vs...)) +} + +// IndexGT applies the GT predicate on the "index" field. +func IndexGT(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldIndex, v)) +} + +// IndexGTE applies the GTE predicate on the "index" field. +func IndexGTE(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldIndex, v)) +} + +// IndexLT applies the LT predicate on the "index" field. +func IndexLT(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldIndex, v)) +} + +// IndexLTE applies the LTE predicate on the "index" field. +func IndexLTE(v uint64) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldIndex, v)) +} + +// EventEQ applies the EQ predicate on the "event" field. +func EventEQ(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldEvent, v)) +} + +// EventNEQ applies the NEQ predicate on the "event" field. +func EventNEQ(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldEvent, v)) +} + +// EventIn applies the In predicate on the "event" field. +func EventIn(vs ...string) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldEvent, vs...)) +} + +// EventNotIn applies the NotIn predicate on the "event" field. +func EventNotIn(vs ...string) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldEvent, vs...)) +} + +// EventGT applies the GT predicate on the "event" field. +func EventGT(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldEvent, v)) +} + +// EventGTE applies the GTE predicate on the "event" field. +func EventGTE(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldEvent, v)) +} + +// EventLT applies the LT predicate on the "event" field. +func EventLT(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldEvent, v)) +} + +// EventLTE applies the LTE predicate on the "event" field. +func EventLTE(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldEvent, v)) +} + +// EventContains applies the Contains predicate on the "event" field. +func EventContains(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldContains(FieldEvent, v)) +} + +// EventHasPrefix applies the HasPrefix predicate on the "event" field. +func EventHasPrefix(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldHasPrefix(FieldEvent, v)) +} + +// EventHasSuffix applies the HasSuffix predicate on the "event" field. +func EventHasSuffix(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldHasSuffix(FieldEvent, v)) +} + +// EventEqualFold applies the EqualFold predicate on the "event" field. +func EventEqualFold(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldEqualFold(FieldEvent, v)) +} + +// EventContainsFold applies the ContainsFold predicate on the "event" field. +func EventContainsFold(v string) predicate.EventLog { + return predicate.EventLog(sql.FieldContainsFold(FieldEvent, v)) +} + +// TransactionEQ applies the EQ predicate on the "transaction" field. +func TransactionEQ(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldEQ(FieldTransaction, v)) +} + +// TransactionNEQ applies the NEQ predicate on the "transaction" field. +func TransactionNEQ(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldNEQ(FieldTransaction, v)) +} + +// TransactionIn applies the In predicate on the "transaction" field. +func TransactionIn(vs ...[]byte) predicate.EventLog { + return predicate.EventLog(sql.FieldIn(FieldTransaction, vs...)) +} + +// TransactionNotIn applies the NotIn predicate on the "transaction" field. +func TransactionNotIn(vs ...[]byte) predicate.EventLog { + return predicate.EventLog(sql.FieldNotIn(FieldTransaction, vs...)) +} + +// TransactionGT applies the GT predicate on the "transaction" field. +func TransactionGT(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldGT(FieldTransaction, v)) +} + +// TransactionGTE applies the GTE predicate on the "transaction" field. +func TransactionGTE(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldGTE(FieldTransaction, v)) +} + +// TransactionLT applies the LT predicate on the "transaction" field. +func TransactionLT(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldLT(FieldTransaction, v)) +} + +// TransactionLTE applies the LTE predicate on the "transaction" field. +func TransactionLTE(v []byte) predicate.EventLog { + return predicate.EventLog(sql.FieldLTE(FieldTransaction, v)) +} + +// HasSigners applies the HasEdge predicate on the "signers" edge. +func HasSigners() predicate.EventLog { + return predicate.EventLog(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, SignersTable, SignersPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasSignersWith applies the HasEdge predicate on the "signers" edge with a given conditions (other predicates). +func HasSignersWith(preds ...predicate.Signer) predicate.EventLog { + return predicate.EventLog(func(s *sql.Selector) { + step := newSignersStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.EventLog) predicate.EventLog { + return predicate.EventLog(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.EventLog) predicate.EventLog { + return predicate.EventLog(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.EventLog) predicate.EventLog { + return predicate.EventLog(sql.NotPredicates(p)) +} diff --git a/src/ent/eventlog_create.go b/src/ent/eventlog_create.go new file mode 100644 index 00000000..19d86529 --- /dev/null +++ b/src/ent/eventlog_create.go @@ -0,0 +1,984 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/KenshiTech/unchained/datasets" + "github.com/KenshiTech/unchained/ent/eventlog" + "github.com/KenshiTech/unchained/ent/signer" +) + +// EventLogCreate is the builder for creating a EventLog entity. +type EventLogCreate struct { + config + mutation *EventLogMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetBlock sets the "block" field. +func (elc *EventLogCreate) SetBlock(u uint64) *EventLogCreate { + elc.mutation.SetBlock(u) + return elc +} + +// SetSignersCount sets the "signersCount" field. +func (elc *EventLogCreate) SetSignersCount(u uint64) *EventLogCreate { + elc.mutation.SetSignersCount(u) + return elc +} + +// SetSignature sets the "signature" field. +func (elc *EventLogCreate) SetSignature(b []byte) *EventLogCreate { + elc.mutation.SetSignature(b) + return elc +} + +// SetAddress sets the "address" field. +func (elc *EventLogCreate) SetAddress(s string) *EventLogCreate { + elc.mutation.SetAddress(s) + return elc +} + +// SetChain sets the "chain" field. +func (elc *EventLogCreate) SetChain(s string) *EventLogCreate { + elc.mutation.SetChain(s) + return elc +} + +// SetIndex sets the "index" field. +func (elc *EventLogCreate) SetIndex(u uint64) *EventLogCreate { + elc.mutation.SetIndex(u) + return elc +} + +// SetEvent sets the "event" field. +func (elc *EventLogCreate) SetEvent(s string) *EventLogCreate { + elc.mutation.SetEvent(s) + return elc +} + +// SetTransaction sets the "transaction" field. +func (elc *EventLogCreate) SetTransaction(b []byte) *EventLogCreate { + elc.mutation.SetTransaction(b) + return elc +} + +// SetArgs sets the "args" field. +func (elc *EventLogCreate) SetArgs(dla []datasets.EventLogArg) *EventLogCreate { + elc.mutation.SetArgs(dla) + return elc +} + +// AddSignerIDs adds the "signers" edge to the Signer entity by IDs. +func (elc *EventLogCreate) AddSignerIDs(ids ...int) *EventLogCreate { + elc.mutation.AddSignerIDs(ids...) + return elc +} + +// AddSigners adds the "signers" edges to the Signer entity. +func (elc *EventLogCreate) AddSigners(s ...*Signer) *EventLogCreate { + ids := make([]int, len(s)) + for i := range s { + ids[i] = s[i].ID + } + return elc.AddSignerIDs(ids...) +} + +// Mutation returns the EventLogMutation object of the builder. +func (elc *EventLogCreate) Mutation() *EventLogMutation { + return elc.mutation +} + +// Save creates the EventLog in the database. +func (elc *EventLogCreate) Save(ctx context.Context) (*EventLog, error) { + return withHooks(ctx, elc.sqlSave, elc.mutation, elc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (elc *EventLogCreate) SaveX(ctx context.Context) *EventLog { + v, err := elc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (elc *EventLogCreate) Exec(ctx context.Context) error { + _, err := elc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (elc *EventLogCreate) ExecX(ctx context.Context) { + if err := elc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (elc *EventLogCreate) check() error { + if _, ok := elc.mutation.Block(); !ok { + return &ValidationError{Name: "block", err: errors.New(`ent: missing required field "EventLog.block"`)} + } + if _, ok := elc.mutation.SignersCount(); !ok { + return &ValidationError{Name: "signersCount", err: errors.New(`ent: missing required field "EventLog.signersCount"`)} + } + if _, ok := elc.mutation.Signature(); !ok { + return &ValidationError{Name: "signature", err: errors.New(`ent: missing required field "EventLog.signature"`)} + } + if v, ok := elc.mutation.Signature(); ok { + if err := eventlog.SignatureValidator(v); err != nil { + return &ValidationError{Name: "signature", err: fmt.Errorf(`ent: validator failed for field "EventLog.signature": %w`, err)} + } + } + if _, ok := elc.mutation.Address(); !ok { + return &ValidationError{Name: "address", err: errors.New(`ent: missing required field "EventLog.address"`)} + } + if _, ok := elc.mutation.Chain(); !ok { + return &ValidationError{Name: "chain", err: errors.New(`ent: missing required field "EventLog.chain"`)} + } + if _, ok := elc.mutation.Index(); !ok { + return &ValidationError{Name: "index", err: errors.New(`ent: missing required field "EventLog.index"`)} + } + if _, ok := elc.mutation.Event(); !ok { + return &ValidationError{Name: "event", err: errors.New(`ent: missing required field "EventLog.event"`)} + } + if _, ok := elc.mutation.Transaction(); !ok { + return &ValidationError{Name: "transaction", err: errors.New(`ent: missing required field "EventLog.transaction"`)} + } + if v, ok := elc.mutation.Transaction(); ok { + if err := eventlog.TransactionValidator(v); err != nil { + return &ValidationError{Name: "transaction", err: fmt.Errorf(`ent: validator failed for field "EventLog.transaction": %w`, err)} + } + } + if _, ok := elc.mutation.Args(); !ok { + return &ValidationError{Name: "args", err: errors.New(`ent: missing required field "EventLog.args"`)} + } + if len(elc.mutation.SignersIDs()) == 0 { + return &ValidationError{Name: "signers", err: errors.New(`ent: missing required edge "EventLog.signers"`)} + } + return nil +} + +func (elc *EventLogCreate) sqlSave(ctx context.Context) (*EventLog, error) { + if err := elc.check(); err != nil { + return nil, err + } + _node, _spec := elc.createSpec() + if err := sqlgraph.CreateNode(ctx, elc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + elc.mutation.id = &_node.ID + elc.mutation.done = true + return _node, nil +} + +func (elc *EventLogCreate) createSpec() (*EventLog, *sqlgraph.CreateSpec) { + var ( + _node = &EventLog{config: elc.config} + _spec = sqlgraph.NewCreateSpec(eventlog.Table, sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt)) + ) + _spec.OnConflict = elc.conflict + if value, ok := elc.mutation.Block(); ok { + _spec.SetField(eventlog.FieldBlock, field.TypeUint64, value) + _node.Block = value + } + if value, ok := elc.mutation.SignersCount(); ok { + _spec.SetField(eventlog.FieldSignersCount, field.TypeUint64, value) + _node.SignersCount = value + } + if value, ok := elc.mutation.Signature(); ok { + _spec.SetField(eventlog.FieldSignature, field.TypeBytes, value) + _node.Signature = value + } + if value, ok := elc.mutation.Address(); ok { + _spec.SetField(eventlog.FieldAddress, field.TypeString, value) + _node.Address = value + } + if value, ok := elc.mutation.Chain(); ok { + _spec.SetField(eventlog.FieldChain, field.TypeString, value) + _node.Chain = value + } + if value, ok := elc.mutation.Index(); ok { + _spec.SetField(eventlog.FieldIndex, field.TypeUint64, value) + _node.Index = value + } + if value, ok := elc.mutation.Event(); ok { + _spec.SetField(eventlog.FieldEvent, field.TypeString, value) + _node.Event = value + } + if value, ok := elc.mutation.Transaction(); ok { + _spec.SetField(eventlog.FieldTransaction, field.TypeBytes, value) + _node.Transaction = value + } + if value, ok := elc.mutation.Args(); ok { + _spec.SetField(eventlog.FieldArgs, field.TypeJSON, value) + _node.Args = value + } + if nodes := elc.mutation.SignersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: eventlog.SignersTable, + Columns: eventlog.SignersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(signer.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.EventLog.Create(). +// SetBlock(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.EventLogUpsert) { +// SetBlock(v+v). +// }). +// Exec(ctx) +func (elc *EventLogCreate) OnConflict(opts ...sql.ConflictOption) *EventLogUpsertOne { + elc.conflict = opts + return &EventLogUpsertOne{ + create: elc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.EventLog.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (elc *EventLogCreate) OnConflictColumns(columns ...string) *EventLogUpsertOne { + elc.conflict = append(elc.conflict, sql.ConflictColumns(columns...)) + return &EventLogUpsertOne{ + create: elc, + } +} + +type ( + // EventLogUpsertOne is the builder for "upsert"-ing + // one EventLog node. + EventLogUpsertOne struct { + create *EventLogCreate + } + + // EventLogUpsert is the "OnConflict" setter. + EventLogUpsert struct { + *sql.UpdateSet + } +) + +// SetBlock sets the "block" field. +func (u *EventLogUpsert) SetBlock(v uint64) *EventLogUpsert { + u.Set(eventlog.FieldBlock, v) + return u +} + +// UpdateBlock sets the "block" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateBlock() *EventLogUpsert { + u.SetExcluded(eventlog.FieldBlock) + return u +} + +// AddBlock adds v to the "block" field. +func (u *EventLogUpsert) AddBlock(v uint64) *EventLogUpsert { + u.Add(eventlog.FieldBlock, v) + return u +} + +// SetSignersCount sets the "signersCount" field. +func (u *EventLogUpsert) SetSignersCount(v uint64) *EventLogUpsert { + u.Set(eventlog.FieldSignersCount, v) + return u +} + +// UpdateSignersCount sets the "signersCount" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateSignersCount() *EventLogUpsert { + u.SetExcluded(eventlog.FieldSignersCount) + return u +} + +// AddSignersCount adds v to the "signersCount" field. +func (u *EventLogUpsert) AddSignersCount(v uint64) *EventLogUpsert { + u.Add(eventlog.FieldSignersCount, v) + return u +} + +// SetSignature sets the "signature" field. +func (u *EventLogUpsert) SetSignature(v []byte) *EventLogUpsert { + u.Set(eventlog.FieldSignature, v) + return u +} + +// UpdateSignature sets the "signature" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateSignature() *EventLogUpsert { + u.SetExcluded(eventlog.FieldSignature) + return u +} + +// SetAddress sets the "address" field. +func (u *EventLogUpsert) SetAddress(v string) *EventLogUpsert { + u.Set(eventlog.FieldAddress, v) + return u +} + +// UpdateAddress sets the "address" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateAddress() *EventLogUpsert { + u.SetExcluded(eventlog.FieldAddress) + return u +} + +// SetChain sets the "chain" field. +func (u *EventLogUpsert) SetChain(v string) *EventLogUpsert { + u.Set(eventlog.FieldChain, v) + return u +} + +// UpdateChain sets the "chain" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateChain() *EventLogUpsert { + u.SetExcluded(eventlog.FieldChain) + return u +} + +// SetIndex sets the "index" field. +func (u *EventLogUpsert) SetIndex(v uint64) *EventLogUpsert { + u.Set(eventlog.FieldIndex, v) + return u +} + +// UpdateIndex sets the "index" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateIndex() *EventLogUpsert { + u.SetExcluded(eventlog.FieldIndex) + return u +} + +// AddIndex adds v to the "index" field. +func (u *EventLogUpsert) AddIndex(v uint64) *EventLogUpsert { + u.Add(eventlog.FieldIndex, v) + return u +} + +// SetEvent sets the "event" field. +func (u *EventLogUpsert) SetEvent(v string) *EventLogUpsert { + u.Set(eventlog.FieldEvent, v) + return u +} + +// UpdateEvent sets the "event" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateEvent() *EventLogUpsert { + u.SetExcluded(eventlog.FieldEvent) + return u +} + +// SetTransaction sets the "transaction" field. +func (u *EventLogUpsert) SetTransaction(v []byte) *EventLogUpsert { + u.Set(eventlog.FieldTransaction, v) + return u +} + +// UpdateTransaction sets the "transaction" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateTransaction() *EventLogUpsert { + u.SetExcluded(eventlog.FieldTransaction) + return u +} + +// SetArgs sets the "args" field. +func (u *EventLogUpsert) SetArgs(v []datasets.EventLogArg) *EventLogUpsert { + u.Set(eventlog.FieldArgs, v) + return u +} + +// UpdateArgs sets the "args" field to the value that was provided on create. +func (u *EventLogUpsert) UpdateArgs() *EventLogUpsert { + u.SetExcluded(eventlog.FieldArgs) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.EventLog.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *EventLogUpsertOne) UpdateNewValues() *EventLogUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.EventLog.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *EventLogUpsertOne) Ignore() *EventLogUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *EventLogUpsertOne) DoNothing() *EventLogUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the EventLogCreate.OnConflict +// documentation for more info. +func (u *EventLogUpsertOne) Update(set func(*EventLogUpsert)) *EventLogUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&EventLogUpsert{UpdateSet: update}) + })) + return u +} + +// SetBlock sets the "block" field. +func (u *EventLogUpsertOne) SetBlock(v uint64) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetBlock(v) + }) +} + +// AddBlock adds v to the "block" field. +func (u *EventLogUpsertOne) AddBlock(v uint64) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.AddBlock(v) + }) +} + +// UpdateBlock sets the "block" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateBlock() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateBlock() + }) +} + +// SetSignersCount sets the "signersCount" field. +func (u *EventLogUpsertOne) SetSignersCount(v uint64) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetSignersCount(v) + }) +} + +// AddSignersCount adds v to the "signersCount" field. +func (u *EventLogUpsertOne) AddSignersCount(v uint64) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.AddSignersCount(v) + }) +} + +// UpdateSignersCount sets the "signersCount" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateSignersCount() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateSignersCount() + }) +} + +// SetSignature sets the "signature" field. +func (u *EventLogUpsertOne) SetSignature(v []byte) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetSignature(v) + }) +} + +// UpdateSignature sets the "signature" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateSignature() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateSignature() + }) +} + +// SetAddress sets the "address" field. +func (u *EventLogUpsertOne) SetAddress(v string) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetAddress(v) + }) +} + +// UpdateAddress sets the "address" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateAddress() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateAddress() + }) +} + +// SetChain sets the "chain" field. +func (u *EventLogUpsertOne) SetChain(v string) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetChain(v) + }) +} + +// UpdateChain sets the "chain" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateChain() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateChain() + }) +} + +// SetIndex sets the "index" field. +func (u *EventLogUpsertOne) SetIndex(v uint64) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetIndex(v) + }) +} + +// AddIndex adds v to the "index" field. +func (u *EventLogUpsertOne) AddIndex(v uint64) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.AddIndex(v) + }) +} + +// UpdateIndex sets the "index" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateIndex() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateIndex() + }) +} + +// SetEvent sets the "event" field. +func (u *EventLogUpsertOne) SetEvent(v string) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetEvent(v) + }) +} + +// UpdateEvent sets the "event" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateEvent() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateEvent() + }) +} + +// SetTransaction sets the "transaction" field. +func (u *EventLogUpsertOne) SetTransaction(v []byte) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetTransaction(v) + }) +} + +// UpdateTransaction sets the "transaction" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateTransaction() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateTransaction() + }) +} + +// SetArgs sets the "args" field. +func (u *EventLogUpsertOne) SetArgs(v []datasets.EventLogArg) *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.SetArgs(v) + }) +} + +// UpdateArgs sets the "args" field to the value that was provided on create. +func (u *EventLogUpsertOne) UpdateArgs() *EventLogUpsertOne { + return u.Update(func(s *EventLogUpsert) { + s.UpdateArgs() + }) +} + +// Exec executes the query. +func (u *EventLogUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for EventLogCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *EventLogUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *EventLogUpsertOne) ID(ctx context.Context) (id int, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *EventLogUpsertOne) IDX(ctx context.Context) int { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// EventLogCreateBulk is the builder for creating many EventLog entities in bulk. +type EventLogCreateBulk struct { + config + err error + builders []*EventLogCreate + conflict []sql.ConflictOption +} + +// Save creates the EventLog entities in the database. +func (elcb *EventLogCreateBulk) Save(ctx context.Context) ([]*EventLog, error) { + if elcb.err != nil { + return nil, elcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(elcb.builders)) + nodes := make([]*EventLog, len(elcb.builders)) + mutators := make([]Mutator, len(elcb.builders)) + for i := range elcb.builders { + func(i int, root context.Context) { + builder := elcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventLogMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, elcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = elcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, elcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, elcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (elcb *EventLogCreateBulk) SaveX(ctx context.Context) []*EventLog { + v, err := elcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (elcb *EventLogCreateBulk) Exec(ctx context.Context) error { + _, err := elcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (elcb *EventLogCreateBulk) ExecX(ctx context.Context) { + if err := elcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.EventLog.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.EventLogUpsert) { +// SetBlock(v+v). +// }). +// Exec(ctx) +func (elcb *EventLogCreateBulk) OnConflict(opts ...sql.ConflictOption) *EventLogUpsertBulk { + elcb.conflict = opts + return &EventLogUpsertBulk{ + create: elcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.EventLog.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (elcb *EventLogCreateBulk) OnConflictColumns(columns ...string) *EventLogUpsertBulk { + elcb.conflict = append(elcb.conflict, sql.ConflictColumns(columns...)) + return &EventLogUpsertBulk{ + create: elcb, + } +} + +// EventLogUpsertBulk is the builder for "upsert"-ing +// a bulk of EventLog nodes. +type EventLogUpsertBulk struct { + create *EventLogCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.EventLog.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *EventLogUpsertBulk) UpdateNewValues() *EventLogUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.EventLog.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *EventLogUpsertBulk) Ignore() *EventLogUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *EventLogUpsertBulk) DoNothing() *EventLogUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the EventLogCreateBulk.OnConflict +// documentation for more info. +func (u *EventLogUpsertBulk) Update(set func(*EventLogUpsert)) *EventLogUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&EventLogUpsert{UpdateSet: update}) + })) + return u +} + +// SetBlock sets the "block" field. +func (u *EventLogUpsertBulk) SetBlock(v uint64) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetBlock(v) + }) +} + +// AddBlock adds v to the "block" field. +func (u *EventLogUpsertBulk) AddBlock(v uint64) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.AddBlock(v) + }) +} + +// UpdateBlock sets the "block" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateBlock() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateBlock() + }) +} + +// SetSignersCount sets the "signersCount" field. +func (u *EventLogUpsertBulk) SetSignersCount(v uint64) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetSignersCount(v) + }) +} + +// AddSignersCount adds v to the "signersCount" field. +func (u *EventLogUpsertBulk) AddSignersCount(v uint64) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.AddSignersCount(v) + }) +} + +// UpdateSignersCount sets the "signersCount" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateSignersCount() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateSignersCount() + }) +} + +// SetSignature sets the "signature" field. +func (u *EventLogUpsertBulk) SetSignature(v []byte) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetSignature(v) + }) +} + +// UpdateSignature sets the "signature" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateSignature() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateSignature() + }) +} + +// SetAddress sets the "address" field. +func (u *EventLogUpsertBulk) SetAddress(v string) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetAddress(v) + }) +} + +// UpdateAddress sets the "address" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateAddress() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateAddress() + }) +} + +// SetChain sets the "chain" field. +func (u *EventLogUpsertBulk) SetChain(v string) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetChain(v) + }) +} + +// UpdateChain sets the "chain" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateChain() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateChain() + }) +} + +// SetIndex sets the "index" field. +func (u *EventLogUpsertBulk) SetIndex(v uint64) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetIndex(v) + }) +} + +// AddIndex adds v to the "index" field. +func (u *EventLogUpsertBulk) AddIndex(v uint64) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.AddIndex(v) + }) +} + +// UpdateIndex sets the "index" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateIndex() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateIndex() + }) +} + +// SetEvent sets the "event" field. +func (u *EventLogUpsertBulk) SetEvent(v string) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetEvent(v) + }) +} + +// UpdateEvent sets the "event" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateEvent() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateEvent() + }) +} + +// SetTransaction sets the "transaction" field. +func (u *EventLogUpsertBulk) SetTransaction(v []byte) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetTransaction(v) + }) +} + +// UpdateTransaction sets the "transaction" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateTransaction() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateTransaction() + }) +} + +// SetArgs sets the "args" field. +func (u *EventLogUpsertBulk) SetArgs(v []datasets.EventLogArg) *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.SetArgs(v) + }) +} + +// UpdateArgs sets the "args" field to the value that was provided on create. +func (u *EventLogUpsertBulk) UpdateArgs() *EventLogUpsertBulk { + return u.Update(func(s *EventLogUpsert) { + s.UpdateArgs() + }) +} + +// Exec executes the query. +func (u *EventLogUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the EventLogCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for EventLogCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *EventLogUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/src/ent/eventlog_delete.go b/src/ent/eventlog_delete.go new file mode 100644 index 00000000..3551209d --- /dev/null +++ b/src/ent/eventlog_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/KenshiTech/unchained/ent/eventlog" + "github.com/KenshiTech/unchained/ent/predicate" +) + +// EventLogDelete is the builder for deleting a EventLog entity. +type EventLogDelete struct { + config + hooks []Hook + mutation *EventLogMutation +} + +// Where appends a list predicates to the EventLogDelete builder. +func (eld *EventLogDelete) Where(ps ...predicate.EventLog) *EventLogDelete { + eld.mutation.Where(ps...) + return eld +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (eld *EventLogDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, eld.sqlExec, eld.mutation, eld.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (eld *EventLogDelete) ExecX(ctx context.Context) int { + n, err := eld.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (eld *EventLogDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(eventlog.Table, sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt)) + if ps := eld.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, eld.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + eld.mutation.done = true + return affected, err +} + +// EventLogDeleteOne is the builder for deleting a single EventLog entity. +type EventLogDeleteOne struct { + eld *EventLogDelete +} + +// Where appends a list predicates to the EventLogDelete builder. +func (eldo *EventLogDeleteOne) Where(ps ...predicate.EventLog) *EventLogDeleteOne { + eldo.eld.mutation.Where(ps...) + return eldo +} + +// Exec executes the deletion query. +func (eldo *EventLogDeleteOne) Exec(ctx context.Context) error { + n, err := eldo.eld.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{eventlog.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (eldo *EventLogDeleteOne) ExecX(ctx context.Context) { + if err := eldo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/src/ent/eventlog_query.go b/src/ent/eventlog_query.go new file mode 100644 index 00000000..42a32dc5 --- /dev/null +++ b/src/ent/eventlog_query.go @@ -0,0 +1,636 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/KenshiTech/unchained/ent/eventlog" + "github.com/KenshiTech/unchained/ent/predicate" + "github.com/KenshiTech/unchained/ent/signer" +) + +// EventLogQuery is the builder for querying EventLog entities. +type EventLogQuery struct { + config + ctx *QueryContext + order []eventlog.OrderOption + inters []Interceptor + predicates []predicate.EventLog + withSigners *SignerQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the EventLogQuery builder. +func (elq *EventLogQuery) Where(ps ...predicate.EventLog) *EventLogQuery { + elq.predicates = append(elq.predicates, ps...) + return elq +} + +// Limit the number of records to be returned by this query. +func (elq *EventLogQuery) Limit(limit int) *EventLogQuery { + elq.ctx.Limit = &limit + return elq +} + +// Offset to start from. +func (elq *EventLogQuery) Offset(offset int) *EventLogQuery { + elq.ctx.Offset = &offset + return elq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (elq *EventLogQuery) Unique(unique bool) *EventLogQuery { + elq.ctx.Unique = &unique + return elq +} + +// Order specifies how the records should be ordered. +func (elq *EventLogQuery) Order(o ...eventlog.OrderOption) *EventLogQuery { + elq.order = append(elq.order, o...) + return elq +} + +// QuerySigners chains the current query on the "signers" edge. +func (elq *EventLogQuery) QuerySigners() *SignerQuery { + query := (&SignerClient{config: elq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := elq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := elq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(eventlog.Table, eventlog.FieldID, selector), + sqlgraph.To(signer.Table, signer.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, eventlog.SignersTable, eventlog.SignersPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(elq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first EventLog entity from the query. +// Returns a *NotFoundError when no EventLog was found. +func (elq *EventLogQuery) First(ctx context.Context) (*EventLog, error) { + nodes, err := elq.Limit(1).All(setContextOp(ctx, elq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{eventlog.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (elq *EventLogQuery) FirstX(ctx context.Context) *EventLog { + node, err := elq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first EventLog ID from the query. +// Returns a *NotFoundError when no EventLog ID was found. +func (elq *EventLogQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = elq.Limit(1).IDs(setContextOp(ctx, elq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{eventlog.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (elq *EventLogQuery) FirstIDX(ctx context.Context) int { + id, err := elq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single EventLog entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one EventLog entity is found. +// Returns a *NotFoundError when no EventLog entities are found. +func (elq *EventLogQuery) Only(ctx context.Context) (*EventLog, error) { + nodes, err := elq.Limit(2).All(setContextOp(ctx, elq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{eventlog.Label} + default: + return nil, &NotSingularError{eventlog.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (elq *EventLogQuery) OnlyX(ctx context.Context) *EventLog { + node, err := elq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only EventLog ID in the query. +// Returns a *NotSingularError when more than one EventLog ID is found. +// Returns a *NotFoundError when no entities are found. +func (elq *EventLogQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = elq.Limit(2).IDs(setContextOp(ctx, elq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{eventlog.Label} + default: + err = &NotSingularError{eventlog.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (elq *EventLogQuery) OnlyIDX(ctx context.Context) int { + id, err := elq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of EventLogs. +func (elq *EventLogQuery) All(ctx context.Context) ([]*EventLog, error) { + ctx = setContextOp(ctx, elq.ctx, "All") + if err := elq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*EventLog, *EventLogQuery]() + return withInterceptors[[]*EventLog](ctx, elq, qr, elq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (elq *EventLogQuery) AllX(ctx context.Context) []*EventLog { + nodes, err := elq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of EventLog IDs. +func (elq *EventLogQuery) IDs(ctx context.Context) (ids []int, err error) { + if elq.ctx.Unique == nil && elq.path != nil { + elq.Unique(true) + } + ctx = setContextOp(ctx, elq.ctx, "IDs") + if err = elq.Select(eventlog.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (elq *EventLogQuery) IDsX(ctx context.Context) []int { + ids, err := elq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (elq *EventLogQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, elq.ctx, "Count") + if err := elq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, elq, querierCount[*EventLogQuery](), elq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (elq *EventLogQuery) CountX(ctx context.Context) int { + count, err := elq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (elq *EventLogQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, elq.ctx, "Exist") + switch _, err := elq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (elq *EventLogQuery) ExistX(ctx context.Context) bool { + exist, err := elq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the EventLogQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (elq *EventLogQuery) Clone() *EventLogQuery { + if elq == nil { + return nil + } + return &EventLogQuery{ + config: elq.config, + ctx: elq.ctx.Clone(), + order: append([]eventlog.OrderOption{}, elq.order...), + inters: append([]Interceptor{}, elq.inters...), + predicates: append([]predicate.EventLog{}, elq.predicates...), + withSigners: elq.withSigners.Clone(), + // clone intermediate query. + sql: elq.sql.Clone(), + path: elq.path, + } +} + +// WithSigners tells the query-builder to eager-load the nodes that are connected to +// the "signers" edge. The optional arguments are used to configure the query builder of the edge. +func (elq *EventLogQuery) WithSigners(opts ...func(*SignerQuery)) *EventLogQuery { + query := (&SignerClient{config: elq.config}).Query() + for _, opt := range opts { + opt(query) + } + elq.withSigners = query + return elq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Block uint64 `json:"block,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.EventLog.Query(). +// GroupBy(eventlog.FieldBlock). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (elq *EventLogQuery) GroupBy(field string, fields ...string) *EventLogGroupBy { + elq.ctx.Fields = append([]string{field}, fields...) + grbuild := &EventLogGroupBy{build: elq} + grbuild.flds = &elq.ctx.Fields + grbuild.label = eventlog.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Block uint64 `json:"block,omitempty"` +// } +// +// client.EventLog.Query(). +// Select(eventlog.FieldBlock). +// Scan(ctx, &v) +func (elq *EventLogQuery) Select(fields ...string) *EventLogSelect { + elq.ctx.Fields = append(elq.ctx.Fields, fields...) + sbuild := &EventLogSelect{EventLogQuery: elq} + sbuild.label = eventlog.Label + sbuild.flds, sbuild.scan = &elq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a EventLogSelect configured with the given aggregations. +func (elq *EventLogQuery) Aggregate(fns ...AggregateFunc) *EventLogSelect { + return elq.Select().Aggregate(fns...) +} + +func (elq *EventLogQuery) prepareQuery(ctx context.Context) error { + for _, inter := range elq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, elq); err != nil { + return err + } + } + } + for _, f := range elq.ctx.Fields { + if !eventlog.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if elq.path != nil { + prev, err := elq.path(ctx) + if err != nil { + return err + } + elq.sql = prev + } + return nil +} + +func (elq *EventLogQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*EventLog, error) { + var ( + nodes = []*EventLog{} + _spec = elq.querySpec() + loadedTypes = [1]bool{ + elq.withSigners != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*EventLog).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &EventLog{config: elq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, elq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := elq.withSigners; query != nil { + if err := elq.loadSigners(ctx, query, nodes, + func(n *EventLog) { n.Edges.Signers = []*Signer{} }, + func(n *EventLog, e *Signer) { n.Edges.Signers = append(n.Edges.Signers, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (elq *EventLogQuery) loadSigners(ctx context.Context, query *SignerQuery, nodes []*EventLog, init func(*EventLog), assign func(*EventLog, *Signer)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int]*EventLog) + nids := make(map[int]map[*EventLog]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(eventlog.SignersTable) + s.Join(joinT).On(s.C(signer.FieldID), joinT.C(eventlog.SignersPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(eventlog.SignersPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(eventlog.SignersPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := int(values[0].(*sql.NullInt64).Int64) + inValue := int(values[1].(*sql.NullInt64).Int64) + if nids[inValue] == nil { + nids[inValue] = map[*EventLog]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Signer](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "signers" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} + +func (elq *EventLogQuery) sqlCount(ctx context.Context) (int, error) { + _spec := elq.querySpec() + _spec.Node.Columns = elq.ctx.Fields + if len(elq.ctx.Fields) > 0 { + _spec.Unique = elq.ctx.Unique != nil && *elq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, elq.driver, _spec) +} + +func (elq *EventLogQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(eventlog.Table, eventlog.Columns, sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt)) + _spec.From = elq.sql + if unique := elq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if elq.path != nil { + _spec.Unique = true + } + if fields := elq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, eventlog.FieldID) + for i := range fields { + if fields[i] != eventlog.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := elq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := elq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := elq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := elq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (elq *EventLogQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(elq.driver.Dialect()) + t1 := builder.Table(eventlog.Table) + columns := elq.ctx.Fields + if len(columns) == 0 { + columns = eventlog.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if elq.sql != nil { + selector = elq.sql + selector.Select(selector.Columns(columns...)...) + } + if elq.ctx.Unique != nil && *elq.ctx.Unique { + selector.Distinct() + } + for _, p := range elq.predicates { + p(selector) + } + for _, p := range elq.order { + p(selector) + } + if offset := elq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := elq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// EventLogGroupBy is the group-by builder for EventLog entities. +type EventLogGroupBy struct { + selector + build *EventLogQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (elgb *EventLogGroupBy) Aggregate(fns ...AggregateFunc) *EventLogGroupBy { + elgb.fns = append(elgb.fns, fns...) + return elgb +} + +// Scan applies the selector query and scans the result into the given value. +func (elgb *EventLogGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, elgb.build.ctx, "GroupBy") + if err := elgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*EventLogQuery, *EventLogGroupBy](ctx, elgb.build, elgb, elgb.build.inters, v) +} + +func (elgb *EventLogGroupBy) sqlScan(ctx context.Context, root *EventLogQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(elgb.fns)) + for _, fn := range elgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*elgb.flds)+len(elgb.fns)) + for _, f := range *elgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*elgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := elgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// EventLogSelect is the builder for selecting fields of EventLog entities. +type EventLogSelect struct { + *EventLogQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (els *EventLogSelect) Aggregate(fns ...AggregateFunc) *EventLogSelect { + els.fns = append(els.fns, fns...) + return els +} + +// Scan applies the selector query and scans the result into the given value. +func (els *EventLogSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, els.ctx, "Select") + if err := els.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*EventLogQuery, *EventLogSelect](ctx, els.EventLogQuery, els, els.inters, v) +} + +func (els *EventLogSelect) sqlScan(ctx context.Context, root *EventLogQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(els.fns)) + for _, fn := range els.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*els.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := els.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/src/ent/eventlog_update.go b/src/ent/eventlog_update.go new file mode 100644 index 00000000..d627e839 --- /dev/null +++ b/src/ent/eventlog_update.go @@ -0,0 +1,716 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/KenshiTech/unchained/datasets" + "github.com/KenshiTech/unchained/ent/eventlog" + "github.com/KenshiTech/unchained/ent/predicate" + "github.com/KenshiTech/unchained/ent/signer" +) + +// EventLogUpdate is the builder for updating EventLog entities. +type EventLogUpdate struct { + config + hooks []Hook + mutation *EventLogMutation +} + +// Where appends a list predicates to the EventLogUpdate builder. +func (elu *EventLogUpdate) Where(ps ...predicate.EventLog) *EventLogUpdate { + elu.mutation.Where(ps...) + return elu +} + +// SetBlock sets the "block" field. +func (elu *EventLogUpdate) SetBlock(u uint64) *EventLogUpdate { + elu.mutation.ResetBlock() + elu.mutation.SetBlock(u) + return elu +} + +// SetNillableBlock sets the "block" field if the given value is not nil. +func (elu *EventLogUpdate) SetNillableBlock(u *uint64) *EventLogUpdate { + if u != nil { + elu.SetBlock(*u) + } + return elu +} + +// AddBlock adds u to the "block" field. +func (elu *EventLogUpdate) AddBlock(u int64) *EventLogUpdate { + elu.mutation.AddBlock(u) + return elu +} + +// SetSignersCount sets the "signersCount" field. +func (elu *EventLogUpdate) SetSignersCount(u uint64) *EventLogUpdate { + elu.mutation.ResetSignersCount() + elu.mutation.SetSignersCount(u) + return elu +} + +// SetNillableSignersCount sets the "signersCount" field if the given value is not nil. +func (elu *EventLogUpdate) SetNillableSignersCount(u *uint64) *EventLogUpdate { + if u != nil { + elu.SetSignersCount(*u) + } + return elu +} + +// AddSignersCount adds u to the "signersCount" field. +func (elu *EventLogUpdate) AddSignersCount(u int64) *EventLogUpdate { + elu.mutation.AddSignersCount(u) + return elu +} + +// SetSignature sets the "signature" field. +func (elu *EventLogUpdate) SetSignature(b []byte) *EventLogUpdate { + elu.mutation.SetSignature(b) + return elu +} + +// SetAddress sets the "address" field. +func (elu *EventLogUpdate) SetAddress(s string) *EventLogUpdate { + elu.mutation.SetAddress(s) + return elu +} + +// SetNillableAddress sets the "address" field if the given value is not nil. +func (elu *EventLogUpdate) SetNillableAddress(s *string) *EventLogUpdate { + if s != nil { + elu.SetAddress(*s) + } + return elu +} + +// SetChain sets the "chain" field. +func (elu *EventLogUpdate) SetChain(s string) *EventLogUpdate { + elu.mutation.SetChain(s) + return elu +} + +// SetNillableChain sets the "chain" field if the given value is not nil. +func (elu *EventLogUpdate) SetNillableChain(s *string) *EventLogUpdate { + if s != nil { + elu.SetChain(*s) + } + return elu +} + +// SetIndex sets the "index" field. +func (elu *EventLogUpdate) SetIndex(u uint64) *EventLogUpdate { + elu.mutation.ResetIndex() + elu.mutation.SetIndex(u) + return elu +} + +// SetNillableIndex sets the "index" field if the given value is not nil. +func (elu *EventLogUpdate) SetNillableIndex(u *uint64) *EventLogUpdate { + if u != nil { + elu.SetIndex(*u) + } + return elu +} + +// AddIndex adds u to the "index" field. +func (elu *EventLogUpdate) AddIndex(u int64) *EventLogUpdate { + elu.mutation.AddIndex(u) + return elu +} + +// SetEvent sets the "event" field. +func (elu *EventLogUpdate) SetEvent(s string) *EventLogUpdate { + elu.mutation.SetEvent(s) + return elu +} + +// SetNillableEvent sets the "event" field if the given value is not nil. +func (elu *EventLogUpdate) SetNillableEvent(s *string) *EventLogUpdate { + if s != nil { + elu.SetEvent(*s) + } + return elu +} + +// SetTransaction sets the "transaction" field. +func (elu *EventLogUpdate) SetTransaction(b []byte) *EventLogUpdate { + elu.mutation.SetTransaction(b) + return elu +} + +// SetArgs sets the "args" field. +func (elu *EventLogUpdate) SetArgs(dla []datasets.EventLogArg) *EventLogUpdate { + elu.mutation.SetArgs(dla) + return elu +} + +// AppendArgs appends dla to the "args" field. +func (elu *EventLogUpdate) AppendArgs(dla []datasets.EventLogArg) *EventLogUpdate { + elu.mutation.AppendArgs(dla) + return elu +} + +// AddSignerIDs adds the "signers" edge to the Signer entity by IDs. +func (elu *EventLogUpdate) AddSignerIDs(ids ...int) *EventLogUpdate { + elu.mutation.AddSignerIDs(ids...) + return elu +} + +// AddSigners adds the "signers" edges to the Signer entity. +func (elu *EventLogUpdate) AddSigners(s ...*Signer) *EventLogUpdate { + ids := make([]int, len(s)) + for i := range s { + ids[i] = s[i].ID + } + return elu.AddSignerIDs(ids...) +} + +// Mutation returns the EventLogMutation object of the builder. +func (elu *EventLogUpdate) Mutation() *EventLogMutation { + return elu.mutation +} + +// ClearSigners clears all "signers" edges to the Signer entity. +func (elu *EventLogUpdate) ClearSigners() *EventLogUpdate { + elu.mutation.ClearSigners() + return elu +} + +// RemoveSignerIDs removes the "signers" edge to Signer entities by IDs. +func (elu *EventLogUpdate) RemoveSignerIDs(ids ...int) *EventLogUpdate { + elu.mutation.RemoveSignerIDs(ids...) + return elu +} + +// RemoveSigners removes "signers" edges to Signer entities. +func (elu *EventLogUpdate) RemoveSigners(s ...*Signer) *EventLogUpdate { + ids := make([]int, len(s)) + for i := range s { + ids[i] = s[i].ID + } + return elu.RemoveSignerIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (elu *EventLogUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, elu.sqlSave, elu.mutation, elu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (elu *EventLogUpdate) SaveX(ctx context.Context) int { + affected, err := elu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (elu *EventLogUpdate) Exec(ctx context.Context) error { + _, err := elu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (elu *EventLogUpdate) ExecX(ctx context.Context) { + if err := elu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (elu *EventLogUpdate) check() error { + if v, ok := elu.mutation.Signature(); ok { + if err := eventlog.SignatureValidator(v); err != nil { + return &ValidationError{Name: "signature", err: fmt.Errorf(`ent: validator failed for field "EventLog.signature": %w`, err)} + } + } + if v, ok := elu.mutation.Transaction(); ok { + if err := eventlog.TransactionValidator(v); err != nil { + return &ValidationError{Name: "transaction", err: fmt.Errorf(`ent: validator failed for field "EventLog.transaction": %w`, err)} + } + } + return nil +} + +func (elu *EventLogUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := elu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(eventlog.Table, eventlog.Columns, sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt)) + if ps := elu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := elu.mutation.Block(); ok { + _spec.SetField(eventlog.FieldBlock, field.TypeUint64, value) + } + if value, ok := elu.mutation.AddedBlock(); ok { + _spec.AddField(eventlog.FieldBlock, field.TypeUint64, value) + } + if value, ok := elu.mutation.SignersCount(); ok { + _spec.SetField(eventlog.FieldSignersCount, field.TypeUint64, value) + } + if value, ok := elu.mutation.AddedSignersCount(); ok { + _spec.AddField(eventlog.FieldSignersCount, field.TypeUint64, value) + } + if value, ok := elu.mutation.Signature(); ok { + _spec.SetField(eventlog.FieldSignature, field.TypeBytes, value) + } + if value, ok := elu.mutation.Address(); ok { + _spec.SetField(eventlog.FieldAddress, field.TypeString, value) + } + if value, ok := elu.mutation.Chain(); ok { + _spec.SetField(eventlog.FieldChain, field.TypeString, value) + } + if value, ok := elu.mutation.Index(); ok { + _spec.SetField(eventlog.FieldIndex, field.TypeUint64, value) + } + if value, ok := elu.mutation.AddedIndex(); ok { + _spec.AddField(eventlog.FieldIndex, field.TypeUint64, value) + } + if value, ok := elu.mutation.Event(); ok { + _spec.SetField(eventlog.FieldEvent, field.TypeString, value) + } + if value, ok := elu.mutation.Transaction(); ok { + _spec.SetField(eventlog.FieldTransaction, field.TypeBytes, value) + } + if value, ok := elu.mutation.Args(); ok { + _spec.SetField(eventlog.FieldArgs, field.TypeJSON, value) + } + if value, ok := elu.mutation.AppendedArgs(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, eventlog.FieldArgs, value) + }) + } + if elu.mutation.SignersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: eventlog.SignersTable, + Columns: eventlog.SignersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(signer.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := elu.mutation.RemovedSignersIDs(); len(nodes) > 0 && !elu.mutation.SignersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: eventlog.SignersTable, + Columns: eventlog.SignersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(signer.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := elu.mutation.SignersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: eventlog.SignersTable, + Columns: eventlog.SignersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(signer.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, elu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{eventlog.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + elu.mutation.done = true + return n, nil +} + +// EventLogUpdateOne is the builder for updating a single EventLog entity. +type EventLogUpdateOne struct { + config + fields []string + hooks []Hook + mutation *EventLogMutation +} + +// SetBlock sets the "block" field. +func (eluo *EventLogUpdateOne) SetBlock(u uint64) *EventLogUpdateOne { + eluo.mutation.ResetBlock() + eluo.mutation.SetBlock(u) + return eluo +} + +// SetNillableBlock sets the "block" field if the given value is not nil. +func (eluo *EventLogUpdateOne) SetNillableBlock(u *uint64) *EventLogUpdateOne { + if u != nil { + eluo.SetBlock(*u) + } + return eluo +} + +// AddBlock adds u to the "block" field. +func (eluo *EventLogUpdateOne) AddBlock(u int64) *EventLogUpdateOne { + eluo.mutation.AddBlock(u) + return eluo +} + +// SetSignersCount sets the "signersCount" field. +func (eluo *EventLogUpdateOne) SetSignersCount(u uint64) *EventLogUpdateOne { + eluo.mutation.ResetSignersCount() + eluo.mutation.SetSignersCount(u) + return eluo +} + +// SetNillableSignersCount sets the "signersCount" field if the given value is not nil. +func (eluo *EventLogUpdateOne) SetNillableSignersCount(u *uint64) *EventLogUpdateOne { + if u != nil { + eluo.SetSignersCount(*u) + } + return eluo +} + +// AddSignersCount adds u to the "signersCount" field. +func (eluo *EventLogUpdateOne) AddSignersCount(u int64) *EventLogUpdateOne { + eluo.mutation.AddSignersCount(u) + return eluo +} + +// SetSignature sets the "signature" field. +func (eluo *EventLogUpdateOne) SetSignature(b []byte) *EventLogUpdateOne { + eluo.mutation.SetSignature(b) + return eluo +} + +// SetAddress sets the "address" field. +func (eluo *EventLogUpdateOne) SetAddress(s string) *EventLogUpdateOne { + eluo.mutation.SetAddress(s) + return eluo +} + +// SetNillableAddress sets the "address" field if the given value is not nil. +func (eluo *EventLogUpdateOne) SetNillableAddress(s *string) *EventLogUpdateOne { + if s != nil { + eluo.SetAddress(*s) + } + return eluo +} + +// SetChain sets the "chain" field. +func (eluo *EventLogUpdateOne) SetChain(s string) *EventLogUpdateOne { + eluo.mutation.SetChain(s) + return eluo +} + +// SetNillableChain sets the "chain" field if the given value is not nil. +func (eluo *EventLogUpdateOne) SetNillableChain(s *string) *EventLogUpdateOne { + if s != nil { + eluo.SetChain(*s) + } + return eluo +} + +// SetIndex sets the "index" field. +func (eluo *EventLogUpdateOne) SetIndex(u uint64) *EventLogUpdateOne { + eluo.mutation.ResetIndex() + eluo.mutation.SetIndex(u) + return eluo +} + +// SetNillableIndex sets the "index" field if the given value is not nil. +func (eluo *EventLogUpdateOne) SetNillableIndex(u *uint64) *EventLogUpdateOne { + if u != nil { + eluo.SetIndex(*u) + } + return eluo +} + +// AddIndex adds u to the "index" field. +func (eluo *EventLogUpdateOne) AddIndex(u int64) *EventLogUpdateOne { + eluo.mutation.AddIndex(u) + return eluo +} + +// SetEvent sets the "event" field. +func (eluo *EventLogUpdateOne) SetEvent(s string) *EventLogUpdateOne { + eluo.mutation.SetEvent(s) + return eluo +} + +// SetNillableEvent sets the "event" field if the given value is not nil. +func (eluo *EventLogUpdateOne) SetNillableEvent(s *string) *EventLogUpdateOne { + if s != nil { + eluo.SetEvent(*s) + } + return eluo +} + +// SetTransaction sets the "transaction" field. +func (eluo *EventLogUpdateOne) SetTransaction(b []byte) *EventLogUpdateOne { + eluo.mutation.SetTransaction(b) + return eluo +} + +// SetArgs sets the "args" field. +func (eluo *EventLogUpdateOne) SetArgs(dla []datasets.EventLogArg) *EventLogUpdateOne { + eluo.mutation.SetArgs(dla) + return eluo +} + +// AppendArgs appends dla to the "args" field. +func (eluo *EventLogUpdateOne) AppendArgs(dla []datasets.EventLogArg) *EventLogUpdateOne { + eluo.mutation.AppendArgs(dla) + return eluo +} + +// AddSignerIDs adds the "signers" edge to the Signer entity by IDs. +func (eluo *EventLogUpdateOne) AddSignerIDs(ids ...int) *EventLogUpdateOne { + eluo.mutation.AddSignerIDs(ids...) + return eluo +} + +// AddSigners adds the "signers" edges to the Signer entity. +func (eluo *EventLogUpdateOne) AddSigners(s ...*Signer) *EventLogUpdateOne { + ids := make([]int, len(s)) + for i := range s { + ids[i] = s[i].ID + } + return eluo.AddSignerIDs(ids...) +} + +// Mutation returns the EventLogMutation object of the builder. +func (eluo *EventLogUpdateOne) Mutation() *EventLogMutation { + return eluo.mutation +} + +// ClearSigners clears all "signers" edges to the Signer entity. +func (eluo *EventLogUpdateOne) ClearSigners() *EventLogUpdateOne { + eluo.mutation.ClearSigners() + return eluo +} + +// RemoveSignerIDs removes the "signers" edge to Signer entities by IDs. +func (eluo *EventLogUpdateOne) RemoveSignerIDs(ids ...int) *EventLogUpdateOne { + eluo.mutation.RemoveSignerIDs(ids...) + return eluo +} + +// RemoveSigners removes "signers" edges to Signer entities. +func (eluo *EventLogUpdateOne) RemoveSigners(s ...*Signer) *EventLogUpdateOne { + ids := make([]int, len(s)) + for i := range s { + ids[i] = s[i].ID + } + return eluo.RemoveSignerIDs(ids...) +} + +// Where appends a list predicates to the EventLogUpdate builder. +func (eluo *EventLogUpdateOne) Where(ps ...predicate.EventLog) *EventLogUpdateOne { + eluo.mutation.Where(ps...) + return eluo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (eluo *EventLogUpdateOne) Select(field string, fields ...string) *EventLogUpdateOne { + eluo.fields = append([]string{field}, fields...) + return eluo +} + +// Save executes the query and returns the updated EventLog entity. +func (eluo *EventLogUpdateOne) Save(ctx context.Context) (*EventLog, error) { + return withHooks(ctx, eluo.sqlSave, eluo.mutation, eluo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (eluo *EventLogUpdateOne) SaveX(ctx context.Context) *EventLog { + node, err := eluo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (eluo *EventLogUpdateOne) Exec(ctx context.Context) error { + _, err := eluo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (eluo *EventLogUpdateOne) ExecX(ctx context.Context) { + if err := eluo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (eluo *EventLogUpdateOne) check() error { + if v, ok := eluo.mutation.Signature(); ok { + if err := eventlog.SignatureValidator(v); err != nil { + return &ValidationError{Name: "signature", err: fmt.Errorf(`ent: validator failed for field "EventLog.signature": %w`, err)} + } + } + if v, ok := eluo.mutation.Transaction(); ok { + if err := eventlog.TransactionValidator(v); err != nil { + return &ValidationError{Name: "transaction", err: fmt.Errorf(`ent: validator failed for field "EventLog.transaction": %w`, err)} + } + } + return nil +} + +func (eluo *EventLogUpdateOne) sqlSave(ctx context.Context) (_node *EventLog, err error) { + if err := eluo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(eventlog.Table, eventlog.Columns, sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt)) + id, ok := eluo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "EventLog.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := eluo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, eventlog.FieldID) + for _, f := range fields { + if !eventlog.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != eventlog.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := eluo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := eluo.mutation.Block(); ok { + _spec.SetField(eventlog.FieldBlock, field.TypeUint64, value) + } + if value, ok := eluo.mutation.AddedBlock(); ok { + _spec.AddField(eventlog.FieldBlock, field.TypeUint64, value) + } + if value, ok := eluo.mutation.SignersCount(); ok { + _spec.SetField(eventlog.FieldSignersCount, field.TypeUint64, value) + } + if value, ok := eluo.mutation.AddedSignersCount(); ok { + _spec.AddField(eventlog.FieldSignersCount, field.TypeUint64, value) + } + if value, ok := eluo.mutation.Signature(); ok { + _spec.SetField(eventlog.FieldSignature, field.TypeBytes, value) + } + if value, ok := eluo.mutation.Address(); ok { + _spec.SetField(eventlog.FieldAddress, field.TypeString, value) + } + if value, ok := eluo.mutation.Chain(); ok { + _spec.SetField(eventlog.FieldChain, field.TypeString, value) + } + if value, ok := eluo.mutation.Index(); ok { + _spec.SetField(eventlog.FieldIndex, field.TypeUint64, value) + } + if value, ok := eluo.mutation.AddedIndex(); ok { + _spec.AddField(eventlog.FieldIndex, field.TypeUint64, value) + } + if value, ok := eluo.mutation.Event(); ok { + _spec.SetField(eventlog.FieldEvent, field.TypeString, value) + } + if value, ok := eluo.mutation.Transaction(); ok { + _spec.SetField(eventlog.FieldTransaction, field.TypeBytes, value) + } + if value, ok := eluo.mutation.Args(); ok { + _spec.SetField(eventlog.FieldArgs, field.TypeJSON, value) + } + if value, ok := eluo.mutation.AppendedArgs(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, eventlog.FieldArgs, value) + }) + } + if eluo.mutation.SignersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: eventlog.SignersTable, + Columns: eventlog.SignersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(signer.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := eluo.mutation.RemovedSignersIDs(); len(nodes) > 0 && !eluo.mutation.SignersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: eventlog.SignersTable, + Columns: eventlog.SignersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(signer.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := eluo.mutation.SignersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: eventlog.SignersTable, + Columns: eventlog.SignersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(signer.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &EventLog{config: eluo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, eluo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{eventlog.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + eluo.mutation.done = true + return _node, nil +} diff --git a/src/ent/hook/hook.go b/src/ent/hook/hook.go index 7a430193..d972c064 100644 --- a/src/ent/hook/hook.go +++ b/src/ent/hook/hook.go @@ -21,6 +21,18 @@ func (f AssetPriceFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AssetPriceMutation", m) } +// The EventLogFunc type is an adapter to allow the use of ordinary +// function as EventLog mutator. +type EventLogFunc func(context.Context, *ent.EventLogMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f EventLogFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.EventLogMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventLogMutation", m) +} + // The SignerFunc type is an adapter to allow the use of ordinary // function as Signer mutator. type SignerFunc func(context.Context, *ent.SignerMutation) (ent.Value, error) diff --git a/src/ent/migrate/schema.go b/src/ent/migrate/schema.go index d61fbf45..a741d626 100644 --- a/src/ent/migrate/schema.go +++ b/src/ent/migrate/schema.go @@ -32,6 +32,37 @@ var ( }, }, } + // EventLogsColumns holds the columns for the "event_logs" table. + EventLogsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "block", Type: field.TypeUint64}, + {Name: "signers_count", Type: field.TypeUint64}, + {Name: "signature", Type: field.TypeBytes, Size: 96}, + {Name: "address", Type: field.TypeString}, + {Name: "chain", Type: field.TypeString}, + {Name: "index", Type: field.TypeUint64}, + {Name: "event", Type: field.TypeString}, + {Name: "transaction", Type: field.TypeBytes, Size: 32}, + {Name: "args", Type: field.TypeJSON}, + } + // EventLogsTable holds the schema information for the "event_logs" table. + EventLogsTable = &schema.Table{ + Name: "event_logs", + Columns: EventLogsColumns, + PrimaryKey: []*schema.Column{EventLogsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "eventlog_block_transaction_index", + Unique: true, + Columns: []*schema.Column{EventLogsColumns[1], EventLogsColumns[8], EventLogsColumns[6]}, + }, + { + Name: "eventlog_block_address_event", + Unique: false, + Columns: []*schema.Column{EventLogsColumns[1], EventLogsColumns[4], EventLogsColumns[7]}, + }, + }, + } // SignersColumns holds the columns for the "signers" table. SignersColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, @@ -83,15 +114,44 @@ var ( }, }, } + // EventLogSignersColumns holds the columns for the "event_log_signers" table. + EventLogSignersColumns = []*schema.Column{ + {Name: "event_log_id", Type: field.TypeInt}, + {Name: "signer_id", Type: field.TypeInt}, + } + // EventLogSignersTable holds the schema information for the "event_log_signers" table. + EventLogSignersTable = &schema.Table{ + Name: "event_log_signers", + Columns: EventLogSignersColumns, + PrimaryKey: []*schema.Column{EventLogSignersColumns[0], EventLogSignersColumns[1]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "event_log_signers_event_log_id", + Columns: []*schema.Column{EventLogSignersColumns[0]}, + RefColumns: []*schema.Column{EventLogsColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "event_log_signers_signer_id", + Columns: []*schema.Column{EventLogSignersColumns[1]}, + RefColumns: []*schema.Column{SignersColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } // Tables holds all the tables in the schema. Tables = []*schema.Table{ AssetPricesTable, + EventLogsTable, SignersTable, AssetPriceSignersTable, + EventLogSignersTable, } ) func init() { AssetPriceSignersTable.ForeignKeys[0].RefTable = AssetPricesTable AssetPriceSignersTable.ForeignKeys[1].RefTable = SignersTable + EventLogSignersTable.ForeignKeys[0].RefTable = EventLogsTable + EventLogSignersTable.ForeignKeys[1].RefTable = SignersTable } diff --git a/src/ent/mutation.go b/src/ent/mutation.go index bc23034e..7eefb569 100644 --- a/src/ent/mutation.go +++ b/src/ent/mutation.go @@ -11,7 +11,9 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" + "github.com/KenshiTech/unchained/datasets" "github.com/KenshiTech/unchained/ent/assetprice" + "github.com/KenshiTech/unchained/ent/eventlog" "github.com/KenshiTech/unchained/ent/predicate" "github.com/KenshiTech/unchained/ent/signer" ) @@ -26,6 +28,7 @@ const ( // Node types. TypeAssetPrice = "AssetPrice" + TypeEventLog = "EventLog" TypeSigner = "Signer" ) @@ -921,6 +924,975 @@ func (m *AssetPriceMutation) ResetEdge(name string) error { return fmt.Errorf("unknown AssetPrice edge %s", name) } +// EventLogMutation represents an operation that mutates the EventLog nodes in the graph. +type EventLogMutation struct { + config + op Op + typ string + id *int + block *uint64 + addblock *int64 + signersCount *uint64 + addsignersCount *int64 + signature *[]byte + address *string + chain *string + index *uint64 + addindex *int64 + event *string + transaction *[]byte + args *[]datasets.EventLogArg + appendargs []datasets.EventLogArg + clearedFields map[string]struct{} + signers map[int]struct{} + removedsigners map[int]struct{} + clearedsigners bool + done bool + oldValue func(context.Context) (*EventLog, error) + predicates []predicate.EventLog +} + +var _ ent.Mutation = (*EventLogMutation)(nil) + +// eventlogOption allows management of the mutation configuration using functional options. +type eventlogOption func(*EventLogMutation) + +// newEventLogMutation creates new mutation for the EventLog entity. +func newEventLogMutation(c config, op Op, opts ...eventlogOption) *EventLogMutation { + m := &EventLogMutation{ + config: c, + op: op, + typ: TypeEventLog, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withEventLogID sets the ID field of the mutation. +func withEventLogID(id int) eventlogOption { + return func(m *EventLogMutation) { + var ( + err error + once sync.Once + value *EventLog + ) + m.oldValue = func(ctx context.Context) (*EventLog, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().EventLog.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withEventLog sets the old EventLog of the mutation. +func withEventLog(node *EventLog) eventlogOption { + return func(m *EventLogMutation) { + m.oldValue = func(context.Context) (*EventLog, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m EventLogMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m EventLogMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *EventLogMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *EventLogMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().EventLog.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetBlock sets the "block" field. +func (m *EventLogMutation) SetBlock(u uint64) { + m.block = &u + m.addblock = nil +} + +// Block returns the value of the "block" field in the mutation. +func (m *EventLogMutation) Block() (r uint64, exists bool) { + v := m.block + if v == nil { + return + } + return *v, true +} + +// OldBlock returns the old "block" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldBlock(ctx context.Context) (v uint64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBlock is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBlock requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBlock: %w", err) + } + return oldValue.Block, nil +} + +// AddBlock adds u to the "block" field. +func (m *EventLogMutation) AddBlock(u int64) { + if m.addblock != nil { + *m.addblock += u + } else { + m.addblock = &u + } +} + +// AddedBlock returns the value that was added to the "block" field in this mutation. +func (m *EventLogMutation) AddedBlock() (r int64, exists bool) { + v := m.addblock + if v == nil { + return + } + return *v, true +} + +// ResetBlock resets all changes to the "block" field. +func (m *EventLogMutation) ResetBlock() { + m.block = nil + m.addblock = nil +} + +// SetSignersCount sets the "signersCount" field. +func (m *EventLogMutation) SetSignersCount(u uint64) { + m.signersCount = &u + m.addsignersCount = nil +} + +// SignersCount returns the value of the "signersCount" field in the mutation. +func (m *EventLogMutation) SignersCount() (r uint64, exists bool) { + v := m.signersCount + if v == nil { + return + } + return *v, true +} + +// OldSignersCount returns the old "signersCount" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldSignersCount(ctx context.Context) (v uint64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSignersCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSignersCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSignersCount: %w", err) + } + return oldValue.SignersCount, nil +} + +// AddSignersCount adds u to the "signersCount" field. +func (m *EventLogMutation) AddSignersCount(u int64) { + if m.addsignersCount != nil { + *m.addsignersCount += u + } else { + m.addsignersCount = &u + } +} + +// AddedSignersCount returns the value that was added to the "signersCount" field in this mutation. +func (m *EventLogMutation) AddedSignersCount() (r int64, exists bool) { + v := m.addsignersCount + if v == nil { + return + } + return *v, true +} + +// ResetSignersCount resets all changes to the "signersCount" field. +func (m *EventLogMutation) ResetSignersCount() { + m.signersCount = nil + m.addsignersCount = nil +} + +// SetSignature sets the "signature" field. +func (m *EventLogMutation) SetSignature(b []byte) { + m.signature = &b +} + +// Signature returns the value of the "signature" field in the mutation. +func (m *EventLogMutation) Signature() (r []byte, exists bool) { + v := m.signature + if v == nil { + return + } + return *v, true +} + +// OldSignature returns the old "signature" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldSignature(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSignature is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSignature requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSignature: %w", err) + } + return oldValue.Signature, nil +} + +// ResetSignature resets all changes to the "signature" field. +func (m *EventLogMutation) ResetSignature() { + m.signature = nil +} + +// SetAddress sets the "address" field. +func (m *EventLogMutation) SetAddress(s string) { + m.address = &s +} + +// Address returns the value of the "address" field in the mutation. +func (m *EventLogMutation) Address() (r string, exists bool) { + v := m.address + if v == nil { + return + } + return *v, true +} + +// OldAddress returns the old "address" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldAddress(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAddress is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAddress requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAddress: %w", err) + } + return oldValue.Address, nil +} + +// ResetAddress resets all changes to the "address" field. +func (m *EventLogMutation) ResetAddress() { + m.address = nil +} + +// SetChain sets the "chain" field. +func (m *EventLogMutation) SetChain(s string) { + m.chain = &s +} + +// Chain returns the value of the "chain" field in the mutation. +func (m *EventLogMutation) Chain() (r string, exists bool) { + v := m.chain + if v == nil { + return + } + return *v, true +} + +// OldChain returns the old "chain" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldChain(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldChain is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldChain requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldChain: %w", err) + } + return oldValue.Chain, nil +} + +// ResetChain resets all changes to the "chain" field. +func (m *EventLogMutation) ResetChain() { + m.chain = nil +} + +// SetIndex sets the "index" field. +func (m *EventLogMutation) SetIndex(u uint64) { + m.index = &u + m.addindex = nil +} + +// Index returns the value of the "index" field in the mutation. +func (m *EventLogMutation) Index() (r uint64, exists bool) { + v := m.index + if v == nil { + return + } + return *v, true +} + +// OldIndex returns the old "index" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldIndex(ctx context.Context) (v uint64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIndex is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIndex requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIndex: %w", err) + } + return oldValue.Index, nil +} + +// AddIndex adds u to the "index" field. +func (m *EventLogMutation) AddIndex(u int64) { + if m.addindex != nil { + *m.addindex += u + } else { + m.addindex = &u + } +} + +// AddedIndex returns the value that was added to the "index" field in this mutation. +func (m *EventLogMutation) AddedIndex() (r int64, exists bool) { + v := m.addindex + if v == nil { + return + } + return *v, true +} + +// ResetIndex resets all changes to the "index" field. +func (m *EventLogMutation) ResetIndex() { + m.index = nil + m.addindex = nil +} + +// SetEvent sets the "event" field. +func (m *EventLogMutation) SetEvent(s string) { + m.event = &s +} + +// Event returns the value of the "event" field in the mutation. +func (m *EventLogMutation) Event() (r string, exists bool) { + v := m.event + if v == nil { + return + } + return *v, true +} + +// OldEvent returns the old "event" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldEvent(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEvent is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEvent requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEvent: %w", err) + } + return oldValue.Event, nil +} + +// ResetEvent resets all changes to the "event" field. +func (m *EventLogMutation) ResetEvent() { + m.event = nil +} + +// SetTransaction sets the "transaction" field. +func (m *EventLogMutation) SetTransaction(b []byte) { + m.transaction = &b +} + +// Transaction returns the value of the "transaction" field in the mutation. +func (m *EventLogMutation) Transaction() (r []byte, exists bool) { + v := m.transaction + if v == nil { + return + } + return *v, true +} + +// OldTransaction returns the old "transaction" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldTransaction(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTransaction is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTransaction requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTransaction: %w", err) + } + return oldValue.Transaction, nil +} + +// ResetTransaction resets all changes to the "transaction" field. +func (m *EventLogMutation) ResetTransaction() { + m.transaction = nil +} + +// SetArgs sets the "args" field. +func (m *EventLogMutation) SetArgs(dla []datasets.EventLogArg) { + m.args = &dla + m.appendargs = nil +} + +// Args returns the value of the "args" field in the mutation. +func (m *EventLogMutation) Args() (r []datasets.EventLogArg, exists bool) { + v := m.args + if v == nil { + return + } + return *v, true +} + +// OldArgs returns the old "args" field's value of the EventLog entity. +// If the EventLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventLogMutation) OldArgs(ctx context.Context) (v []datasets.EventLogArg, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldArgs is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldArgs requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldArgs: %w", err) + } + return oldValue.Args, nil +} + +// AppendArgs adds dla to the "args" field. +func (m *EventLogMutation) AppendArgs(dla []datasets.EventLogArg) { + m.appendargs = append(m.appendargs, dla...) +} + +// AppendedArgs returns the list of values that were appended to the "args" field in this mutation. +func (m *EventLogMutation) AppendedArgs() ([]datasets.EventLogArg, bool) { + if len(m.appendargs) == 0 { + return nil, false + } + return m.appendargs, true +} + +// ResetArgs resets all changes to the "args" field. +func (m *EventLogMutation) ResetArgs() { + m.args = nil + m.appendargs = nil +} + +// AddSignerIDs adds the "signers" edge to the Signer entity by ids. +func (m *EventLogMutation) AddSignerIDs(ids ...int) { + if m.signers == nil { + m.signers = make(map[int]struct{}) + } + for i := range ids { + m.signers[ids[i]] = struct{}{} + } +} + +// ClearSigners clears the "signers" edge to the Signer entity. +func (m *EventLogMutation) ClearSigners() { + m.clearedsigners = true +} + +// SignersCleared reports if the "signers" edge to the Signer entity was cleared. +func (m *EventLogMutation) SignersCleared() bool { + return m.clearedsigners +} + +// RemoveSignerIDs removes the "signers" edge to the Signer entity by IDs. +func (m *EventLogMutation) RemoveSignerIDs(ids ...int) { + if m.removedsigners == nil { + m.removedsigners = make(map[int]struct{}) + } + for i := range ids { + delete(m.signers, ids[i]) + m.removedsigners[ids[i]] = struct{}{} + } +} + +// RemovedSigners returns the removed IDs of the "signers" edge to the Signer entity. +func (m *EventLogMutation) RemovedSignersIDs() (ids []int) { + for id := range m.removedsigners { + ids = append(ids, id) + } + return +} + +// SignersIDs returns the "signers" edge IDs in the mutation. +func (m *EventLogMutation) SignersIDs() (ids []int) { + for id := range m.signers { + ids = append(ids, id) + } + return +} + +// ResetSigners resets all changes to the "signers" edge. +func (m *EventLogMutation) ResetSigners() { + m.signers = nil + m.clearedsigners = false + m.removedsigners = nil +} + +// Where appends a list predicates to the EventLogMutation builder. +func (m *EventLogMutation) Where(ps ...predicate.EventLog) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the EventLogMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *EventLogMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.EventLog, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *EventLogMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *EventLogMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (EventLog). +func (m *EventLogMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *EventLogMutation) Fields() []string { + fields := make([]string, 0, 9) + if m.block != nil { + fields = append(fields, eventlog.FieldBlock) + } + if m.signersCount != nil { + fields = append(fields, eventlog.FieldSignersCount) + } + if m.signature != nil { + fields = append(fields, eventlog.FieldSignature) + } + if m.address != nil { + fields = append(fields, eventlog.FieldAddress) + } + if m.chain != nil { + fields = append(fields, eventlog.FieldChain) + } + if m.index != nil { + fields = append(fields, eventlog.FieldIndex) + } + if m.event != nil { + fields = append(fields, eventlog.FieldEvent) + } + if m.transaction != nil { + fields = append(fields, eventlog.FieldTransaction) + } + if m.args != nil { + fields = append(fields, eventlog.FieldArgs) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *EventLogMutation) Field(name string) (ent.Value, bool) { + switch name { + case eventlog.FieldBlock: + return m.Block() + case eventlog.FieldSignersCount: + return m.SignersCount() + case eventlog.FieldSignature: + return m.Signature() + case eventlog.FieldAddress: + return m.Address() + case eventlog.FieldChain: + return m.Chain() + case eventlog.FieldIndex: + return m.Index() + case eventlog.FieldEvent: + return m.Event() + case eventlog.FieldTransaction: + return m.Transaction() + case eventlog.FieldArgs: + return m.Args() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *EventLogMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case eventlog.FieldBlock: + return m.OldBlock(ctx) + case eventlog.FieldSignersCount: + return m.OldSignersCount(ctx) + case eventlog.FieldSignature: + return m.OldSignature(ctx) + case eventlog.FieldAddress: + return m.OldAddress(ctx) + case eventlog.FieldChain: + return m.OldChain(ctx) + case eventlog.FieldIndex: + return m.OldIndex(ctx) + case eventlog.FieldEvent: + return m.OldEvent(ctx) + case eventlog.FieldTransaction: + return m.OldTransaction(ctx) + case eventlog.FieldArgs: + return m.OldArgs(ctx) + } + return nil, fmt.Errorf("unknown EventLog field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *EventLogMutation) SetField(name string, value ent.Value) error { + switch name { + case eventlog.FieldBlock: + v, ok := value.(uint64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBlock(v) + return nil + case eventlog.FieldSignersCount: + v, ok := value.(uint64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSignersCount(v) + return nil + case eventlog.FieldSignature: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSignature(v) + return nil + case eventlog.FieldAddress: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAddress(v) + return nil + case eventlog.FieldChain: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetChain(v) + return nil + case eventlog.FieldIndex: + v, ok := value.(uint64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIndex(v) + return nil + case eventlog.FieldEvent: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEvent(v) + return nil + case eventlog.FieldTransaction: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTransaction(v) + return nil + case eventlog.FieldArgs: + v, ok := value.([]datasets.EventLogArg) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetArgs(v) + return nil + } + return fmt.Errorf("unknown EventLog field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *EventLogMutation) AddedFields() []string { + var fields []string + if m.addblock != nil { + fields = append(fields, eventlog.FieldBlock) + } + if m.addsignersCount != nil { + fields = append(fields, eventlog.FieldSignersCount) + } + if m.addindex != nil { + fields = append(fields, eventlog.FieldIndex) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *EventLogMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case eventlog.FieldBlock: + return m.AddedBlock() + case eventlog.FieldSignersCount: + return m.AddedSignersCount() + case eventlog.FieldIndex: + return m.AddedIndex() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *EventLogMutation) AddField(name string, value ent.Value) error { + switch name { + case eventlog.FieldBlock: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddBlock(v) + return nil + case eventlog.FieldSignersCount: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSignersCount(v) + return nil + case eventlog.FieldIndex: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddIndex(v) + return nil + } + return fmt.Errorf("unknown EventLog numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *EventLogMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *EventLogMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *EventLogMutation) ClearField(name string) error { + return fmt.Errorf("unknown EventLog nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *EventLogMutation) ResetField(name string) error { + switch name { + case eventlog.FieldBlock: + m.ResetBlock() + return nil + case eventlog.FieldSignersCount: + m.ResetSignersCount() + return nil + case eventlog.FieldSignature: + m.ResetSignature() + return nil + case eventlog.FieldAddress: + m.ResetAddress() + return nil + case eventlog.FieldChain: + m.ResetChain() + return nil + case eventlog.FieldIndex: + m.ResetIndex() + return nil + case eventlog.FieldEvent: + m.ResetEvent() + return nil + case eventlog.FieldTransaction: + m.ResetTransaction() + return nil + case eventlog.FieldArgs: + m.ResetArgs() + return nil + } + return fmt.Errorf("unknown EventLog field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *EventLogMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.signers != nil { + edges = append(edges, eventlog.EdgeSigners) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *EventLogMutation) AddedIDs(name string) []ent.Value { + switch name { + case eventlog.EdgeSigners: + ids := make([]ent.Value, 0, len(m.signers)) + for id := range m.signers { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *EventLogMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedsigners != nil { + edges = append(edges, eventlog.EdgeSigners) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *EventLogMutation) RemovedIDs(name string) []ent.Value { + switch name { + case eventlog.EdgeSigners: + ids := make([]ent.Value, 0, len(m.removedsigners)) + for id := range m.removedsigners { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *EventLogMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedsigners { + edges = append(edges, eventlog.EdgeSigners) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *EventLogMutation) EdgeCleared(name string) bool { + switch name { + case eventlog.EdgeSigners: + return m.clearedsigners + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *EventLogMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown EventLog unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *EventLogMutation) ResetEdge(name string) error { + switch name { + case eventlog.EdgeSigners: + m.ResetSigners() + return nil + } + return fmt.Errorf("unknown EventLog edge %s", name) +} + // SignerMutation represents an operation that mutates the Signer nodes in the graph. type SignerMutation struct { config @@ -936,6 +1908,9 @@ type SignerMutation struct { assetPrice map[int]struct{} removedassetPrice map[int]struct{} clearedassetPrice bool + eventLogs map[int]struct{} + removedeventLogs map[int]struct{} + clearedeventLogs bool done bool oldValue func(context.Context) (*Signer, error) predicates []predicate.Signer @@ -1257,6 +2232,60 @@ func (m *SignerMutation) ResetAssetPrice() { m.removedassetPrice = nil } +// AddEventLogIDs adds the "eventLogs" edge to the EventLog entity by ids. +func (m *SignerMutation) AddEventLogIDs(ids ...int) { + if m.eventLogs == nil { + m.eventLogs = make(map[int]struct{}) + } + for i := range ids { + m.eventLogs[ids[i]] = struct{}{} + } +} + +// ClearEventLogs clears the "eventLogs" edge to the EventLog entity. +func (m *SignerMutation) ClearEventLogs() { + m.clearedeventLogs = true +} + +// EventLogsCleared reports if the "eventLogs" edge to the EventLog entity was cleared. +func (m *SignerMutation) EventLogsCleared() bool { + return m.clearedeventLogs +} + +// RemoveEventLogIDs removes the "eventLogs" edge to the EventLog entity by IDs. +func (m *SignerMutation) RemoveEventLogIDs(ids ...int) { + if m.removedeventLogs == nil { + m.removedeventLogs = make(map[int]struct{}) + } + for i := range ids { + delete(m.eventLogs, ids[i]) + m.removedeventLogs[ids[i]] = struct{}{} + } +} + +// RemovedEventLogs returns the removed IDs of the "eventLogs" edge to the EventLog entity. +func (m *SignerMutation) RemovedEventLogsIDs() (ids []int) { + for id := range m.removedeventLogs { + ids = append(ids, id) + } + return +} + +// EventLogsIDs returns the "eventLogs" edge IDs in the mutation. +func (m *SignerMutation) EventLogsIDs() (ids []int) { + for id := range m.eventLogs { + ids = append(ids, id) + } + return +} + +// ResetEventLogs resets all changes to the "eventLogs" edge. +func (m *SignerMutation) ResetEventLogs() { + m.eventLogs = nil + m.clearedeventLogs = false + m.removedeventLogs = nil +} + // Where appends a list predicates to the SignerMutation builder. func (m *SignerMutation) Where(ps ...predicate.Signer) { m.predicates = append(m.predicates, ps...) @@ -1456,10 +2485,13 @@ func (m *SignerMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *SignerMutation) AddedEdges() []string { - edges := make([]string, 0, 1) + edges := make([]string, 0, 2) if m.assetPrice != nil { edges = append(edges, signer.EdgeAssetPrice) } + if m.eventLogs != nil { + edges = append(edges, signer.EdgeEventLogs) + } return edges } @@ -1473,16 +2505,25 @@ func (m *SignerMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case signer.EdgeEventLogs: + ids := make([]ent.Value, 0, len(m.eventLogs)) + for id := range m.eventLogs { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *SignerMutation) RemovedEdges() []string { - edges := make([]string, 0, 1) + edges := make([]string, 0, 2) if m.removedassetPrice != nil { edges = append(edges, signer.EdgeAssetPrice) } + if m.removedeventLogs != nil { + edges = append(edges, signer.EdgeEventLogs) + } return edges } @@ -1496,16 +2537,25 @@ func (m *SignerMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case signer.EdgeEventLogs: + ids := make([]ent.Value, 0, len(m.removedeventLogs)) + for id := range m.removedeventLogs { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *SignerMutation) ClearedEdges() []string { - edges := make([]string, 0, 1) + edges := make([]string, 0, 2) if m.clearedassetPrice { edges = append(edges, signer.EdgeAssetPrice) } + if m.clearedeventLogs { + edges = append(edges, signer.EdgeEventLogs) + } return edges } @@ -1515,6 +2565,8 @@ func (m *SignerMutation) EdgeCleared(name string) bool { switch name { case signer.EdgeAssetPrice: return m.clearedassetPrice + case signer.EdgeEventLogs: + return m.clearedeventLogs } return false } @@ -1534,6 +2586,9 @@ func (m *SignerMutation) ResetEdge(name string) error { case signer.EdgeAssetPrice: m.ResetAssetPrice() return nil + case signer.EdgeEventLogs: + m.ResetEventLogs() + return nil } return fmt.Errorf("unknown Signer edge %s", name) } diff --git a/src/ent/predicate/predicate.go b/src/ent/predicate/predicate.go index 93f5c0ca..610463bb 100644 --- a/src/ent/predicate/predicate.go +++ b/src/ent/predicate/predicate.go @@ -20,5 +20,8 @@ func AssetPriceOrErr(p AssetPrice, err error) AssetPrice { } } +// EventLog is the predicate function for eventlog builders. +type EventLog func(*sql.Selector) + // Signer is the predicate function for signer builders. type Signer func(*sql.Selector) diff --git a/src/ent/runtime.go b/src/ent/runtime.go index 67cb6417..61104ceb 100644 --- a/src/ent/runtime.go +++ b/src/ent/runtime.go @@ -7,6 +7,7 @@ import ( "entgo.io/ent/schema/field" "github.com/KenshiTech/unchained/ent/assetprice" + "github.com/KenshiTech/unchained/ent/eventlog" "github.com/KenshiTech/unchained/ent/schema" "github.com/KenshiTech/unchained/ent/signer" ) @@ -24,6 +25,16 @@ func init() { assetpriceDescSignature := assetpriceFields[3].Descriptor() // assetprice.SignatureValidator is a validator for the "signature" field. It is called by the builders before save. assetprice.SignatureValidator = assetpriceDescSignature.Validators[0].(func([]byte) error) + eventlogFields := schema.EventLog{}.Fields() + _ = eventlogFields + // eventlogDescSignature is the schema descriptor for signature field. + eventlogDescSignature := eventlogFields[2].Descriptor() + // eventlog.SignatureValidator is a validator for the "signature" field. It is called by the builders before save. + eventlog.SignatureValidator = eventlogDescSignature.Validators[0].(func([]byte) error) + // eventlogDescTransaction is the schema descriptor for transaction field. + eventlogDescTransaction := eventlogFields[7].Descriptor() + // eventlog.TransactionValidator is a validator for the "transaction" field. It is called by the builders before save. + eventlog.TransactionValidator = eventlogDescTransaction.Validators[0].(func([]byte) error) signerFields := schema.Signer{}.Fields() _ = signerFields // signerDescName is the schema descriptor for name field. diff --git a/src/ent/schema/eventlog.go b/src/ent/schema/eventlog.go new file mode 100644 index 00000000..f51bdfc8 --- /dev/null +++ b/src/ent/schema/eventlog.go @@ -0,0 +1,45 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "github.com/KenshiTech/unchained/datasets" +) + +// DataSet holds the schema definition for the DataSet entity. +type EventLog struct { + ent.Schema +} + +// Fields of the DataSet. +func (EventLog) Fields() []ent.Field { + return []ent.Field{ + field.Uint64("block"), + field.Uint64("signersCount"), + field.Bytes("signature").MaxLen(96), + field.String("address"), + field.String("chain"), + field.Uint64("index"), + field.String("event"), + field.Bytes("transaction").MaxLen(32), + field.JSON("args", []datasets.EventLogArg{}), + } +} + +// Edges of the DataSet. +func (EventLog) Edges() []ent.Edge { + return []ent.Edge{ + // TODO: Make these required on next migrate + edge.To("signers", Signer.Type).Required(), + } +} + +// Edges of the DataSet. +func (EventLog) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("block", "transaction", "index").Unique(), + index.Fields("block", "address", "event"), + } +} diff --git a/src/ent/schema/signer.go b/src/ent/schema/signer.go index 10933bcb..91b5fb27 100644 --- a/src/ent/schema/signer.go +++ b/src/ent/schema/signer.go @@ -26,6 +26,7 @@ func (Signer) Fields() []ent.Field { func (Signer) Edges() []ent.Edge { return []ent.Edge{ edge.From("assetPrice", AssetPrice.Type).Ref("signers"), + edge.From("eventLogs", EventLog.Type).Ref("signers"), } } diff --git a/src/ent/signer.go b/src/ent/signer.go index 09825fdc..17191a8c 100644 --- a/src/ent/signer.go +++ b/src/ent/signer.go @@ -34,9 +34,11 @@ type Signer struct { type SignerEdges struct { // AssetPrice holds the value of the assetPrice edge. AssetPrice []*AssetPrice `json:"assetPrice,omitempty"` + // EventLogs holds the value of the eventLogs edge. + EventLogs []*EventLog `json:"eventLogs,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [1]bool + loadedTypes [2]bool } // AssetPriceOrErr returns the AssetPrice value or an error if the edge @@ -48,6 +50,15 @@ func (e SignerEdges) AssetPriceOrErr() ([]*AssetPrice, error) { return nil, &NotLoadedError{edge: "assetPrice"} } +// EventLogsOrErr returns the EventLogs value or an error if the edge +// was not loaded in eager-loading. +func (e SignerEdges) EventLogsOrErr() ([]*EventLog, error) { + if e.loadedTypes[1] { + return e.EventLogs, nil + } + return nil, &NotLoadedError{edge: "eventLogs"} +} + // scanValues returns the types for scanning values from sql.Rows. func (*Signer) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) @@ -122,6 +133,11 @@ func (s *Signer) QueryAssetPrice() *AssetPriceQuery { return NewSignerClient(s.config).QueryAssetPrice(s) } +// QueryEventLogs queries the "eventLogs" edge of the Signer entity. +func (s *Signer) QueryEventLogs() *EventLogQuery { + return NewSignerClient(s.config).QueryEventLogs(s) +} + // Update returns a builder for updating this Signer. // Note that you need to call Signer.Unwrap() before calling this method if this Signer // was returned from a transaction, and the transaction was committed or rolled back. diff --git a/src/ent/signer/signer.go b/src/ent/signer/signer.go index df556c33..bde8ab4b 100644 --- a/src/ent/signer/signer.go +++ b/src/ent/signer/signer.go @@ -22,6 +22,8 @@ const ( FieldPoints = "points" // EdgeAssetPrice holds the string denoting the assetprice edge name in mutations. EdgeAssetPrice = "assetPrice" + // EdgeEventLogs holds the string denoting the eventlogs edge name in mutations. + EdgeEventLogs = "eventLogs" // Table holds the table name of the signer in the database. Table = "signers" // AssetPriceTable is the table that holds the assetPrice relation/edge. The primary key declared below. @@ -29,6 +31,11 @@ const ( // AssetPriceInverseTable is the table name for the AssetPrice entity. // It exists in this package in order to avoid circular dependency with the "assetprice" package. AssetPriceInverseTable = "asset_prices" + // EventLogsTable is the table that holds the eventLogs relation/edge. The primary key declared below. + EventLogsTable = "event_log_signers" + // EventLogsInverseTable is the table name for the EventLog entity. + // It exists in this package in order to avoid circular dependency with the "eventlog" package. + EventLogsInverseTable = "event_logs" ) // Columns holds all SQL columns for signer fields. @@ -44,6 +51,9 @@ var ( // AssetPricePrimaryKey and AssetPriceColumn2 are the table columns denoting the // primary key for the assetPrice relation (M2M). AssetPricePrimaryKey = []string{"asset_price_id", "signer_id"} + // EventLogsPrimaryKey and EventLogsColumn2 are the table columns denoting the + // primary key for the eventLogs relation (M2M). + EventLogsPrimaryKey = []string{"event_log_id", "signer_id"} ) // ValidColumn reports if the column name is valid (part of the table columns). @@ -96,6 +106,20 @@ func ByAssetPrice(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { sqlgraph.OrderByNeighborTerms(s, newAssetPriceStep(), append([]sql.OrderTerm{term}, terms...)...) } } + +// ByEventLogsCount orders the results by eventLogs count. +func ByEventLogsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEventLogsStep(), opts...) + } +} + +// ByEventLogs orders the results by eventLogs terms. +func ByEventLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEventLogsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} func newAssetPriceStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), @@ -103,3 +127,10 @@ func newAssetPriceStep() *sqlgraph.Step { sqlgraph.Edge(sqlgraph.M2M, true, AssetPriceTable, AssetPricePrimaryKey...), ) } +func newEventLogsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EventLogsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, EventLogsTable, EventLogsPrimaryKey...), + ) +} diff --git a/src/ent/signer/where.go b/src/ent/signer/where.go index f3d5caae..3501c803 100644 --- a/src/ent/signer/where.go +++ b/src/ent/signer/where.go @@ -281,6 +281,29 @@ func HasAssetPriceWith(preds ...predicate.AssetPrice) predicate.Signer { }) } +// HasEventLogs applies the HasEdge predicate on the "eventLogs" edge. +func HasEventLogs() predicate.Signer { + return predicate.Signer(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, EventLogsTable, EventLogsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasEventLogsWith applies the HasEdge predicate on the "eventLogs" edge with a given conditions (other predicates). +func HasEventLogsWith(preds ...predicate.EventLog) predicate.Signer { + return predicate.Signer(func(s *sql.Selector) { + step := newEventLogsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Signer) predicate.Signer { return predicate.Signer(sql.AndPredicates(predicates...)) diff --git a/src/ent/signer_create.go b/src/ent/signer_create.go index d7ecb257..cbd31f87 100644 --- a/src/ent/signer_create.go +++ b/src/ent/signer_create.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/KenshiTech/unchained/ent/assetprice" + "github.com/KenshiTech/unchained/ent/eventlog" "github.com/KenshiTech/unchained/ent/signer" ) @@ -61,6 +62,21 @@ func (sc *SignerCreate) AddAssetPrice(a ...*AssetPrice) *SignerCreate { return sc.AddAssetPriceIDs(ids...) } +// AddEventLogIDs adds the "eventLogs" edge to the EventLog entity by IDs. +func (sc *SignerCreate) AddEventLogIDs(ids ...int) *SignerCreate { + sc.mutation.AddEventLogIDs(ids...) + return sc +} + +// AddEventLogs adds the "eventLogs" edges to the EventLog entity. +func (sc *SignerCreate) AddEventLogs(e ...*EventLog) *SignerCreate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return sc.AddEventLogIDs(ids...) +} + // Mutation returns the SignerMutation object of the builder. func (sc *SignerCreate) Mutation() *SignerMutation { return sc.mutation @@ -181,6 +197,22 @@ func (sc *SignerCreate) createSpec() (*Signer, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } + if nodes := sc.mutation.EventLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: signer.EventLogsTable, + Columns: signer.EventLogsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } return _node, _spec } diff --git a/src/ent/signer_query.go b/src/ent/signer_query.go index 01692dbf..fbd789e7 100644 --- a/src/ent/signer_query.go +++ b/src/ent/signer_query.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/KenshiTech/unchained/ent/assetprice" + "github.com/KenshiTech/unchained/ent/eventlog" "github.com/KenshiTech/unchained/ent/predicate" "github.com/KenshiTech/unchained/ent/signer" ) @@ -24,6 +25,7 @@ type SignerQuery struct { inters []Interceptor predicates []predicate.Signer withAssetPrice *AssetPriceQuery + withEventLogs *EventLogQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -82,6 +84,28 @@ func (sq *SignerQuery) QueryAssetPrice() *AssetPriceQuery { return query } +// QueryEventLogs chains the current query on the "eventLogs" edge. +func (sq *SignerQuery) QueryEventLogs() *EventLogQuery { + query := (&EventLogClient{config: sq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := sq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := sq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(signer.Table, signer.FieldID, selector), + sqlgraph.To(eventlog.Table, eventlog.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, signer.EventLogsTable, signer.EventLogsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(sq.driver.Dialect(), step) + return fromU, nil + } + return query +} + // First returns the first Signer entity from the query. // Returns a *NotFoundError when no Signer was found. func (sq *SignerQuery) First(ctx context.Context) (*Signer, error) { @@ -275,6 +299,7 @@ func (sq *SignerQuery) Clone() *SignerQuery { inters: append([]Interceptor{}, sq.inters...), predicates: append([]predicate.Signer{}, sq.predicates...), withAssetPrice: sq.withAssetPrice.Clone(), + withEventLogs: sq.withEventLogs.Clone(), // clone intermediate query. sql: sq.sql.Clone(), path: sq.path, @@ -292,6 +317,17 @@ func (sq *SignerQuery) WithAssetPrice(opts ...func(*AssetPriceQuery)) *SignerQue return sq } +// WithEventLogs tells the query-builder to eager-load the nodes that are connected to +// the "eventLogs" edge. The optional arguments are used to configure the query builder of the edge. +func (sq *SignerQuery) WithEventLogs(opts ...func(*EventLogQuery)) *SignerQuery { + query := (&EventLogClient{config: sq.config}).Query() + for _, opt := range opts { + opt(query) + } + sq.withEventLogs = query + return sq +} + // GroupBy is used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. // @@ -370,8 +406,9 @@ func (sq *SignerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Signe var ( nodes = []*Signer{} _spec = sq.querySpec() - loadedTypes = [1]bool{ + loadedTypes = [2]bool{ sq.withAssetPrice != nil, + sq.withEventLogs != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { @@ -399,6 +436,13 @@ func (sq *SignerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Signe return nil, err } } + if query := sq.withEventLogs; query != nil { + if err := sq.loadEventLogs(ctx, query, nodes, + func(n *Signer) { n.Edges.EventLogs = []*EventLog{} }, + func(n *Signer, e *EventLog) { n.Edges.EventLogs = append(n.Edges.EventLogs, e) }); err != nil { + return nil, err + } + } return nodes, nil } @@ -463,6 +507,67 @@ func (sq *SignerQuery) loadAssetPrice(ctx context.Context, query *AssetPriceQuer } return nil } +func (sq *SignerQuery) loadEventLogs(ctx context.Context, query *EventLogQuery, nodes []*Signer, init func(*Signer), assign func(*Signer, *EventLog)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int]*Signer) + nids := make(map[int]map[*Signer]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(signer.EventLogsTable) + s.Join(joinT).On(s.C(eventlog.FieldID), joinT.C(signer.EventLogsPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(signer.EventLogsPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(signer.EventLogsPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := int(values[0].(*sql.NullInt64).Int64) + inValue := int(values[1].(*sql.NullInt64).Int64) + if nids[inValue] == nil { + nids[inValue] = map[*Signer]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*EventLog](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "eventLogs" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} func (sq *SignerQuery) sqlCount(ctx context.Context) (int, error) { _spec := sq.querySpec() diff --git a/src/ent/signer_update.go b/src/ent/signer_update.go index 731fedd0..d08f0740 100644 --- a/src/ent/signer_update.go +++ b/src/ent/signer_update.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/KenshiTech/unchained/ent/assetprice" + "github.com/KenshiTech/unchained/ent/eventlog" "github.com/KenshiTech/unchained/ent/predicate" "github.com/KenshiTech/unchained/ent/signer" ) @@ -90,6 +91,21 @@ func (su *SignerUpdate) AddAssetPrice(a ...*AssetPrice) *SignerUpdate { return su.AddAssetPriceIDs(ids...) } +// AddEventLogIDs adds the "eventLogs" edge to the EventLog entity by IDs. +func (su *SignerUpdate) AddEventLogIDs(ids ...int) *SignerUpdate { + su.mutation.AddEventLogIDs(ids...) + return su +} + +// AddEventLogs adds the "eventLogs" edges to the EventLog entity. +func (su *SignerUpdate) AddEventLogs(e ...*EventLog) *SignerUpdate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return su.AddEventLogIDs(ids...) +} + // Mutation returns the SignerMutation object of the builder. func (su *SignerUpdate) Mutation() *SignerMutation { return su.mutation @@ -116,6 +132,27 @@ func (su *SignerUpdate) RemoveAssetPrice(a ...*AssetPrice) *SignerUpdate { return su.RemoveAssetPriceIDs(ids...) } +// ClearEventLogs clears all "eventLogs" edges to the EventLog entity. +func (su *SignerUpdate) ClearEventLogs() *SignerUpdate { + su.mutation.ClearEventLogs() + return su +} + +// RemoveEventLogIDs removes the "eventLogs" edge to EventLog entities by IDs. +func (su *SignerUpdate) RemoveEventLogIDs(ids ...int) *SignerUpdate { + su.mutation.RemoveEventLogIDs(ids...) + return su +} + +// RemoveEventLogs removes "eventLogs" edges to EventLog entities. +func (su *SignerUpdate) RemoveEventLogs(e ...*EventLog) *SignerUpdate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return su.RemoveEventLogIDs(ids...) +} + // Save executes the query and returns the number of nodes affected by the update operation. func (su *SignerUpdate) Save(ctx context.Context) (int, error) { return withHooks(ctx, su.sqlSave, su.mutation, su.hooks) @@ -235,6 +272,51 @@ func (su *SignerUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if su.mutation.EventLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: signer.EventLogsTable, + Columns: signer.EventLogsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := su.mutation.RemovedEventLogsIDs(); len(nodes) > 0 && !su.mutation.EventLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: signer.EventLogsTable, + Columns: signer.EventLogsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := su.mutation.EventLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: signer.EventLogsTable, + Columns: signer.EventLogsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if n, err = sqlgraph.UpdateNodes(ctx, su.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{signer.Label} @@ -317,6 +399,21 @@ func (suo *SignerUpdateOne) AddAssetPrice(a ...*AssetPrice) *SignerUpdateOne { return suo.AddAssetPriceIDs(ids...) } +// AddEventLogIDs adds the "eventLogs" edge to the EventLog entity by IDs. +func (suo *SignerUpdateOne) AddEventLogIDs(ids ...int) *SignerUpdateOne { + suo.mutation.AddEventLogIDs(ids...) + return suo +} + +// AddEventLogs adds the "eventLogs" edges to the EventLog entity. +func (suo *SignerUpdateOne) AddEventLogs(e ...*EventLog) *SignerUpdateOne { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return suo.AddEventLogIDs(ids...) +} + // Mutation returns the SignerMutation object of the builder. func (suo *SignerUpdateOne) Mutation() *SignerMutation { return suo.mutation @@ -343,6 +440,27 @@ func (suo *SignerUpdateOne) RemoveAssetPrice(a ...*AssetPrice) *SignerUpdateOne return suo.RemoveAssetPriceIDs(ids...) } +// ClearEventLogs clears all "eventLogs" edges to the EventLog entity. +func (suo *SignerUpdateOne) ClearEventLogs() *SignerUpdateOne { + suo.mutation.ClearEventLogs() + return suo +} + +// RemoveEventLogIDs removes the "eventLogs" edge to EventLog entities by IDs. +func (suo *SignerUpdateOne) RemoveEventLogIDs(ids ...int) *SignerUpdateOne { + suo.mutation.RemoveEventLogIDs(ids...) + return suo +} + +// RemoveEventLogs removes "eventLogs" edges to EventLog entities. +func (suo *SignerUpdateOne) RemoveEventLogs(e ...*EventLog) *SignerUpdateOne { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return suo.RemoveEventLogIDs(ids...) +} + // Where appends a list predicates to the SignerUpdate builder. func (suo *SignerUpdateOne) Where(ps ...predicate.Signer) *SignerUpdateOne { suo.mutation.Where(ps...) @@ -492,6 +610,51 @@ func (suo *SignerUpdateOne) sqlSave(ctx context.Context) (_node *Signer, err err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if suo.mutation.EventLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: signer.EventLogsTable, + Columns: signer.EventLogsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := suo.mutation.RemovedEventLogsIDs(); len(nodes) > 0 && !suo.mutation.EventLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: signer.EventLogsTable, + Columns: signer.EventLogsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := suo.mutation.EventLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: signer.EventLogsTable, + Columns: signer.EventLogsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(eventlog.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } _node = &Signer{config: suo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/src/ent/tx.go b/src/ent/tx.go index c89db6b8..1ab39fd7 100644 --- a/src/ent/tx.go +++ b/src/ent/tx.go @@ -14,6 +14,8 @@ type Tx struct { config // AssetPrice is the client for interacting with the AssetPrice builders. AssetPrice *AssetPriceClient + // EventLog is the client for interacting with the EventLog builders. + EventLog *EventLogClient // Signer is the client for interacting with the Signer builders. Signer *SignerClient @@ -148,6 +150,7 @@ func (tx *Tx) Client() *Client { func (tx *Tx) init() { tx.AssetPrice = NewAssetPriceClient(tx.config) + tx.EventLog = NewEventLogClient(tx.config) tx.Signer = NewSignerClient(tx.config) } diff --git a/src/ethereum/rpc.go b/src/ethereum/rpc.go index 4e24fcd8..bbd19de2 100644 --- a/src/ethereum/rpc.go +++ b/src/ethereum/rpc.go @@ -2,7 +2,6 @@ package ethereum import ( "context" - "log" "reflect" "github.com/KenshiTech/unchained/config" @@ -12,63 +11,76 @@ import ( "github.com/ethereum/go-ethereum/ethclient" ) -var rpcList []string -var rpcIndex int -var Client *ethclient.Client +var rpcList map[string][]string +var rpcIndex map[string]int +var Clients map[string]*ethclient.Client func Start() { - rpcConfig := config.Config.Get("rpc.ethereum") - rpcIndex = 0 + rpcConf := config.Config.Sub("rpc") + networkNames := rpcConf.AllKeys() - switch reflect.TypeOf(rpcConfig).Kind() { - case reflect.String: - rpcList = append(rpcList, rpcConfig.(string)) + for _, name := range networkNames { - case reflect.Slice: - for _, rpc := range rpcConfig.([]interface{}) { - rpcList = append(rpcList, rpc.(string)) + conf := rpcConf.Get(name) + rpcIndex[name] = 0 + + switch reflect.TypeOf(conf).Kind() { + case reflect.String: + rpcList[name] = append(rpcList[name], conf.(string)) + + case reflect.Slice: + for _, rpc := range conf.([]interface{}) { + rpcList[name] = append(rpcList[name], rpc.(string)) + } + default: + panic("RPC List Is Invalid") } - default: - panic("RPC List Is Invalid") - } - RefreshRPC() + RefreshRPC(name) + } } -func refreshRPCWithRetries(retries int) bool { +func refreshRPCWithRetries(network string, retries int) bool { if retries == 0 { - log.Fatal("Cannot connect to any of the provided RPCs") + panic("Cannot connect to any of the provided RPCs") } - if rpcIndex == len(rpcList)-1 { - rpcIndex = 0 + if rpcIndex[network] == len(rpcList[network])-1 { + rpcIndex[network] = 0 } else { - rpcIndex++ + rpcIndex[network]++ } var err error - Client, err = ethclient.Dial(rpcList[rpcIndex]) + index := rpcIndex[network] + Clients[network], err = ethclient.Dial(rpcList[network][index]) if err != nil { - return refreshRPCWithRetries(retries - 1) + return refreshRPCWithRetries(network, retries-1) } return true } -func RefreshRPC() { - refreshRPCWithRetries(len(rpcList)) +func RefreshRPC(network string) { + refreshRPCWithRetries(network, len(rpcList)) } -func GetNewUniV3Contract(address string, refresh bool) (*contracts.UniV3, error) { +func GetNewUniV3Contract(network string, address string, refresh bool) (*contracts.UniV3, error) { if refresh { - RefreshRPC() + RefreshRPC(network) } - return contracts.NewUniV3(common.HexToAddress(address), Client) + return contracts.NewUniV3(common.HexToAddress(address), Clients[network]) +} + +func GetBlockNumber(network string) (uint64, error) { + return Clients[network].BlockNumber(context.Background()) } -func GetBlockNumber() (uint64, error) { - return Client.BlockNumber(context.Background()) +func init() { + rpcList = make(map[string][]string) + rpcIndex = make(map[string]int) + Clients = make(map[string]*ethclient.Client) } diff --git a/src/go.mod b/src/go.mod index 94b5cd05..efe58126 100644 --- a/src/go.mod +++ b/src/go.mod @@ -6,6 +6,7 @@ require ( entgo.io/ent v0.13.0 github.com/btcsuite/btcutil v1.0.2 github.com/consensys/gnark-crypto v0.12.1 + github.com/dgraph-io/badger/v4 v4.2.0 github.com/dustinkirkland/golang-petname v0.0.0-20231002161417-6a283f1aaaf2 github.com/ethereum/go-ethereum v1.13.12 github.com/go-co-op/gocron/v2 v2.2.4 @@ -30,14 +31,23 @@ require ( github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.1 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/inflect v0.19.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/glog v1.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/flatbuffers v1.12.1 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -45,12 +55,14 @@ require ( github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jonboulle/clockwork v0.4.0 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect @@ -66,6 +78,7 @@ require ( github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zclconf/go-cty v1.14.2 // indirect + go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect golang.org/x/mod v0.15.0 // indirect @@ -73,6 +86,7 @@ require ( golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/tools v0.18.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect diff --git a/src/go.sum b/src/go.sum index 170490b0..ab48a0d4 100644 --- a/src/go.sum +++ b/src/go.sum @@ -1,7 +1,9 @@ ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= entgo.io/ent v0.13.0 h1:DclxWczaCpyiKn6ZWVcJjq1zIKtJ11iNKy+08lNYsJE= entgo.io/ent v0.13.0/go.mod h1:+oU8oGna69xy29O+g+NEz+/TM7yJDhQQGJfuOWq1pT8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= @@ -33,10 +35,14 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= @@ -70,8 +76,20 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= +github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustinkirkland/golang-petname v0.0.0-20231002161417-6a283f1aaaf2 h1:S6Dco8FtAhEI/qkg/00H6RdEGC+MCy5GPiQ+xweNRFE= github.com/dustinkirkland/golang-petname v0.0.0-20231002161417-6a283f1aaaf2/go.mod h1:8AuBTZBRSFqEYBPYULd+NN474/zZBLP+6WeT5S9xlAc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/c-kzg-4844 v0.4.1 h1:ftiEBwhGX3Q08lJiMEfoSmqiUZPyad0exVSmGLjyPuc= github.com/ethereum/c-kzg-4844 v0.4.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.13.12 h1:iDr9UM2JWkngBHGovRJEQn4Kor7mT4gt9rUZqB5M29Y= @@ -102,16 +120,42 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= @@ -141,6 +185,8 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= @@ -192,6 +238,7 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y= github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= @@ -235,8 +282,10 @@ github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -259,45 +308,107 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -310,10 +421,13 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXL gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/src/net/client/client.go b/src/net/client/client.go index 458c8f1b..3e92af82 100644 --- a/src/net/client/client.go +++ b/src/net/client/client.go @@ -9,6 +9,7 @@ import ( "github.com/KenshiTech/unchained/bls" "github.com/KenshiTech/unchained/config" "github.com/KenshiTech/unchained/constants" + "github.com/KenshiTech/unchained/constants/opcodes" "github.com/KenshiTech/unchained/kosk" "github.com/KenshiTech/unchained/log" @@ -22,6 +23,10 @@ var Done chan struct{} func StartClient() { + if !config.Config.IsSet("broker.uri") { + return + } + brokerUrl := fmt.Sprintf( "%s/%s", config.Config.GetString("broker.uri"), @@ -55,7 +60,7 @@ func StartClient() { for { _, payload, err := Client.ReadMessage() - if err != nil || payload[0] == 5 { + if err != nil || payload[0] == opcodes.Error { if err != nil { log.Logger. @@ -92,12 +97,12 @@ func StartClient() { switch payload[0] { // TODO: Make a table of call codes - case 2: + case opcodes.Feedback: log.Logger. With("Feedback", string(payload[1:])). Info("Broker") - case 4: + case opcodes.KoskChallenge: // TODO: Refactor into a function // TODO: Check for errors! var challenge kosk.Challenge @@ -110,7 +115,7 @@ func StartClient() { Client.WriteMessage( websocket.BinaryMessage, - append([]byte{3}, koskPayload...), + append([]byte{opcodes.KoskResult}, koskPayload...), ) if err != nil { @@ -119,8 +124,11 @@ func StartClient() { Error("Write error") } - case 7: - Consume(payload[1:]) + case opcodes.PriceReportBroadcast: + ConsumePriceReport(payload[1:]) + + case opcodes.EventLogBroadcast: + ConsumeEventLog(payload[1:]) default: log.Logger. @@ -131,14 +139,22 @@ func StartClient() { } }() - Client.WriteMessage(websocket.BinaryMessage, append([]byte{0}, helloPayload...)) + Client.WriteMessage( + websocket.BinaryMessage, + append([]byte{opcodes.Hello}, helloPayload...)) +} + +func closeConnection() { + if config.Config.IsSet("broker.uri") { + Client.Close() + } } func ClientBlock() { interrupt := make(chan os.Signal, 1) signal.Notify(interrupt, os.Interrupt) - defer Client.Close() + defer closeConnection() for { select { @@ -146,13 +162,15 @@ func ClientBlock() { return case <-interrupt: - err := Client.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + if config.Config.IsSet("broker.uri") { + err := Client.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - if err != nil { - log.Logger. - With("Error", err). - Error("Connection closed") - return + if err != nil { + log.Logger. + With("Error", err). + Error("Connection closed") + return + } } select { diff --git a/src/net/client/consumer.go b/src/net/client/consumer.go index b536d04f..ecf12e33 100644 --- a/src/net/client/consumer.go +++ b/src/net/client/consumer.go @@ -7,8 +7,8 @@ import ( "github.com/vmihailenco/msgpack/v5" ) -func Consume(message []byte) { - var packet datasets.BroadcastPacket +func ConsumePriceReport(message []byte) { + var packet datasets.BroadcastPricePacket err := msgpack.Unmarshal(message[1:], &packet) if err != nil { panic(err) @@ -21,6 +21,20 @@ func Consume(message []byte) { Info("Attestation") } +func ConsumeEventLog(message []byte) { + var packet datasets.BroadcastEventPacket + err := msgpack.Unmarshal(message[1:], &packet) + if err != nil { + panic(err) + } + log.Logger. + With("Validators", len(packet.Signers)). + With("Chain", packet.Info.Chain). + With("Address", packet.Info.Address). + With("Event", packet.Info.Event). + Info("Attestation") +} + func StartConsumer() { Client.WriteMessage(websocket.BinaryMessage, []byte{6}) } diff --git a/src/net/server.go b/src/net/server.go index 042a48b0..ae2a2715 100644 --- a/src/net/server.go +++ b/src/net/server.go @@ -10,9 +10,11 @@ import ( "github.com/KenshiTech/unchained/bls" "github.com/KenshiTech/unchained/config" "github.com/KenshiTech/unchained/constants" + "github.com/KenshiTech/unchained/constants/opcodes" "github.com/KenshiTech/unchained/datasets" "github.com/KenshiTech/unchained/kosk" "github.com/KenshiTech/unchained/net/repository" + "github.com/KenshiTech/unchained/plugins/logs" "github.com/KenshiTech/unchained/plugins/uniswap" "github.com/gorilla/websocket" @@ -29,18 +31,30 @@ func processKosk(conn *websocket.Conn, messageType int, payload []byte) error { err := msgpack.Unmarshal(payload, &challenge) if err != nil { - err = conn.WriteMessage(messageType, append([]byte{2}, []byte("packet.invalid")...)) + err = conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + []byte("packet.invalid")...), + ) + if err != nil { fmt.Println("write:", err) return err } + return nil } signer, ok := signers.Load(conn) if !ok { - conn.WriteMessage(messageType, append([]byte{2}, []byte("hello.missing")...)) + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + []byte("hello.missing")...), + ) return errors.New("hello.missing") } @@ -51,11 +65,22 @@ func processKosk(conn *websocket.Conn, messageType int, payload []byte) error { ) if err != nil || !challenge.Passed { - conn.WriteMessage(messageType, append([]byte{5}, []byte("kosk.invalid")...)) + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Error}, + []byte("kosk.invalid")...), + ) return errors.New("kosk.invalid") } - conn.WriteMessage(messageType, append([]byte{2}, []byte("kosk.ok")...)) + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + []byte("kosk.ok")...), + ) + challenges.Store(conn, challenge) return nil @@ -68,16 +93,28 @@ func processHello(conn *websocket.Conn, messageType int, payload []byte) error { if err != nil { // TODO: what's the best way of doing this? - err = conn.WriteMessage(messageType, append([]byte{2}, []byte("packet.invalid")...)) + err = conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + []byte("packet.invalid")...), + ) + if err != nil { fmt.Println("write:", err) return err } + return nil } if signer.Name == "" || len(signer.PublicKey) != 96 { - conn.WriteMessage(messageType, append([]byte{5}, []byte("conf.invalid")...)) + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Error}, + []byte("conf.invalid")...), + ) return errors.New("conf.invalid") } @@ -89,12 +126,23 @@ func processHello(conn *websocket.Conn, messageType int, payload []byte) error { }) if publicKeyInUse { - conn.WriteMessage(messageType, append([]byte{5}, []byte("key.duplicate")...)) + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Error}, + []byte("key.duplicate")...), + ) return errors.New("key.duplicate") } signers.Store(conn, signer) - err = conn.WriteMessage(messageType, append([]byte{2}, []byte("conf.ok")...)) + + err = conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + []byte("conf.ok")...), + ) if err != nil { fmt.Println("write:", err) @@ -109,38 +157,74 @@ func processHello(conn *websocket.Conn, messageType int, payload []byte) error { // TODO: Client should hang on error if err != nil { - conn.WriteMessage(messageType, append([]byte{5}, []byte("kosk.error")...)) + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Error}, + []byte("kosk.error")...), + ) return err } - err = conn.WriteMessage(messageType, append([]byte{4}, koskPayload...)) + err = conn.WriteMessage( + messageType, + append( + []byte{opcodes.KoskChallenge}, + koskPayload...), + ) if err != nil { - conn.WriteMessage(messageType, append([]byte{5}, []byte("kosk.error")...)) + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Error}, + []byte("kosk.error")...), + ) return err } return nil } -func processPriceReport(conn *websocket.Conn, messageType int, payload []byte) error { - +func checkPublicKey(conn *websocket.Conn, messageType int) (*bls.Signer, error) { challenge, ok := challenges.Load(conn) if !ok || !challenge.Passed { - conn.WriteMessage(messageType, append([]byte{2}, []byte("kosk.missing")...)) - return errors.New("kosk.missing") + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + []byte("kosk.missing")...), + ) + return nil, errors.New("kosk.missing") } signer, ok := signers.Load(conn) if !ok { - conn.WriteMessage(messageType, append([]byte{2}, []byte("hello.missing")...)) - return errors.New("hello.missing") + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + []byte("hello.missing")...), + ) + return nil, errors.New("hello.missing") + } + + return &signer, nil +} + +// TODO: Can we use any part of this? +func processPriceReport(conn *websocket.Conn, messageType int, payload []byte) error { + + signer, err := checkPublicKey(conn, messageType) + + if err != nil { + return err } var report datasets.PriceReport - err := msgpack.Unmarshal(payload, &report) + err = msgpack.Unmarshal(payload, &report) if err != nil { return nil @@ -170,16 +254,98 @@ func processPriceReport(conn *websocket.Conn, messageType int, payload []byte) e return nil } - ok, _ = bls.Verify(signature, hash, pk) + ok, _ := bls.Verify(signature, hash, pk) message := []byte("signature.invalid") if ok { message = []byte("signature.accepted") // TODO: Only Ethereum is supported atm - uniswap.RecordSignature(signature, signer, report.PriceInfo) + uniswap.RecordSignature( + signature, + *signer, + hash, + report.PriceInfo, + true, + false, + ) } - err = conn.WriteMessage(messageType, append([]byte{2}, message...)) + err = conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + message...), + ) + + if err != nil { + fmt.Println("write:", err) + return err + } + + return nil +} + +func processEventLog(conn *websocket.Conn, messageType int, payload []byte) error { + + signer, err := checkPublicKey(conn, messageType) + + if err != nil { + return err + } + + var logReport datasets.EventLogReport + err = msgpack.Unmarshal(payload, &logReport) + + if err != nil { + return nil + } + + toHash, err := msgpack.Marshal(&logReport.EventLog) + + if err != nil { + return nil + } + + hash, err := bls.Hash(toHash) + + if err != nil { + return nil + } + + signature, err := bls.RecoverSignature(logReport.Signature) + + if err != nil { + return nil + } + + pk, err := bls.RecoverPublicKey(signer.PublicKey) + + if err != nil { + return nil + } + + ok, _ := bls.Verify(signature, hash, pk) + + message := []byte("signature.invalid") + if ok { + message = []byte("signature.accepted") + // TODO: Only Ethereum is supported atm + logs.RecordSignature( + signature, + *signer, + hash, + logReport.EventLog, + true, + false, + ) + } + + err = conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + message...), + ) if err != nil { fmt.Println("write:", err) @@ -211,15 +377,15 @@ func handleAtRoot(w http.ResponseWriter, r *http.Request) { switch payload[0] { // TODO: Make a table of call codes - case 0: + case opcodes.Hello: err := processHello(conn, messageType, payload[1:]) if err != nil { fmt.Println("write:", err) } - case 1: - + case opcodes.PriceReport: + // TODO: Maybe this is unnecessary if payload[1] == 0 { err := processPriceReport(conn, messageType, payload[2:]) @@ -230,25 +396,47 @@ func handleAtRoot(w http.ResponseWriter, r *http.Request) { } else { conn.WriteMessage( messageType, - append([]byte{2}, []byte("Dataset not supported")...), + append( + []byte{opcodes.Feedback}, + []byte("Dataset not supported")...), + ) + } + + case opcodes.EventLog: + // TODO: Maybe this is unnecessary + if payload[1] == 0 { + err := processEventLog(conn, messageType, payload[2:]) + + if err != nil { + fmt.Println("write:", err) + } + + } else { + conn.WriteMessage( + messageType, + append( + []byte{opcodes.Feedback}, + []byte("Dataset not supported")...), ) } - case 3: + case opcodes.KoskResult: err := processKosk(conn, messageType, payload[1:]) if err != nil { fmt.Println("write:", err) } - case 6: + case opcodes.RegisterConsumer: // TODO: Consumers must specify what they're subscribing to repository.Consumers.Store(conn, true) default: err = conn.WriteMessage( messageType, - append([]byte{5}, []byte("Instruction not supported")...), + append( + []byte{opcodes.Error}, + []byte("Instruction not supported")...), ) if err != nil { fmt.Println("write:", err) diff --git a/src/persistence/persistence.go b/src/persistence/persistence.go new file mode 100644 index 00000000..052cde75 --- /dev/null +++ b/src/persistence/persistence.go @@ -0,0 +1,49 @@ +package persistence + +import ( + "encoding/binary" + + badger "github.com/dgraph-io/badger/v4" +) + +var DB *badger.DB + +func Start(contextPath string) { + var err error + options := badger.DefaultOptions(contextPath) + options.Logger = nil + DB, err = badger.Open(options) + if err != nil { + panic(err) + } +} + +func ReadUInt64(key string) (uint64, error) { + var value uint64 + + err := DB.View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte(key)) + + if err == nil { + err = item.Value(func(val []byte) error { + value = binary.LittleEndian.Uint64(val) + return nil + }) + } + + return err + }) + + return value, err +} + +func WriteUint64(key string, value uint64) error { + err := DB.Update(func(txn *badger.Txn) error { + bytes := binary.LittleEndian.AppendUint64([]byte{}, value) + entry := badger.NewEntry([]byte(key), bytes) + err := txn.SetEntry(entry) + return err + }) + + return err +} diff --git a/src/plugins/logs/logs.go b/src/plugins/logs/logs.go index 3d96584c..abec485e 100644 --- a/src/plugins/logs/logs.go +++ b/src/plugins/logs/logs.go @@ -2,41 +2,82 @@ package logs import ( "context" + "fmt" "math/big" "os" "sort" + "sync" "time" + "github.com/KenshiTech/unchained/bls" "github.com/KenshiTech/unchained/config" + "github.com/KenshiTech/unchained/constants/opcodes" + "github.com/KenshiTech/unchained/datasets" + "github.com/KenshiTech/unchained/db" + "github.com/KenshiTech/unchained/ent" + "github.com/KenshiTech/unchained/ent/signer" "github.com/KenshiTech/unchained/ethereum" "github.com/KenshiTech/unchained/log" "github.com/KenshiTech/unchained/net/client" - "golang.org/x/text/cases" - "golang.org/x/text/language" + "github.com/KenshiTech/unchained/net/consumer" + "github.com/KenshiTech/unchained/persistence" + "github.com/KenshiTech/unchained/utils" + "github.com/gorilla/websocket" + bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381" + "github.com/dgraph-io/badger/v4" goEthereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/go-co-op/gocron/v2" + lru "github.com/hashicorp/golang-lru/v2" + "github.com/vmihailenco/msgpack/v5" + "golang.org/x/text/cases" + "golang.org/x/text/language" ) -// var lastSynced map[string]uint64 -var abiMap map[string]abi.ABI -var lastBlock uint64 +type EventKey struct { + Chain string + LogIndex uint64 + TxHash [32]byte +} + +type SupportKey struct { + Chain string + Address string + Event string +} + +var consensus *lru.Cache[EventKey, map[bls12381.G1Affine]uint64] +var signatureCache *lru.Cache[bls12381.G1Affine, []bls.Signature] +var aggregateCache *lru.Cache[bls12381.G1Affine, bls12381.G1Affine] +var DebouncedSaveSignatures func(key bls12381.G1Affine, arg SaveSignatureArgs) +var signatureMutex *sync.Mutex +var supportedEvents map[SupportKey]bool type LogConf struct { - Name string `mapstructure:"name"` - Abi string `mapstructure:"abi"` - Event string `mapstructure:"event"` - Address string `mapstructure:"address"` - From uint64 `mapstructure:"from"` + Name string `mapstructure:"name"` + Chain string `mapstructure:"chain"` + Abi string `mapstructure:"abi"` + Event string `mapstructure:"event"` + Address string `mapstructure:"address"` + From *uint64 `mapstructure:"from"` + Step uint64 `mapstructure:"step"` + Store bool `mapstructure:"store"` + Send bool `mapstructure:"send"` + Confrimations uint64 `mapstructure:"confirmations"` } -func GetBlockNumber() (*uint64, error) { - blockNumber, err := ethereum.GetBlockNumber() +// var lastSynced map[string]uint64 +var abiMap map[string]abi.ABI +var lastSyncedBlock map[LogConf]uint64 +var caser cases.Caser + +func GetBlockNumber(network string) (*uint64, error) { + blockNumber, err := ethereum.GetBlockNumber(network) if err != nil { - ethereum.RefreshRPC() + ethereum.RefreshRPC(network) return nil, err } @@ -48,8 +89,425 @@ type Event struct { Value [32]byte } +type SaveSignatureArgs struct { + Info datasets.EventLog + Hash bls12381.G1Affine +} + +func RecordSignature( + signature bls12381.G1Affine, + signer bls.Signer, + hash bls12381.G1Affine, + info datasets.EventLog, + debounce bool, + historical bool) { + + signatureMutex.Lock() + defer signatureMutex.Unlock() + + supportKey := SupportKey{ + Chain: info.Chain, + Address: info.Address, + Event: info.Event, + } + + if supported := supportedEvents[supportKey]; !supported { + return + } + + if !historical { + + blockNumber, err := GetBlockNumber(info.Chain) + + if err != nil { + panic(err) + } + + // TODO: this won't work for Arbitrum + // TODO: we disallow syncing historical events here + if *blockNumber-info.Block > 16 { + return // Data too old + } + } + + key := EventKey{ + Chain: info.Chain, + TxHash: info.TxHash, + LogIndex: info.LogIndex, + } + + if !consensus.Contains(key) { + consensus.Add(key, make(map[bls12381.G1Affine]uint64)) + } + + reportedValues, _ := consensus.Get(key) + reportedValues[hash]++ + isMajority := true + count := reportedValues[hash] + + for _, reportCount := range reportedValues { + if reportCount > count { + isMajority = false + break + } + } + + cached, ok := signatureCache.Get(hash) + + packed := bls.Signature{ + Signature: signature, + Signer: signer, + Processed: false, + } + + if !ok { + signatureCache.Add(hash, []bls.Signature{packed}) + // TODO: This should not only write to DB, + // TODO: but also report to "consumers" + if isMajority { + if debounce { + DebouncedSaveSignatures(hash, SaveSignatureArgs{Hash: hash, Info: info}) + } else { + SaveSignatures(SaveSignatureArgs{Hash: hash, Info: info}) + } + } + return + } + + for _, item := range cached { + if item.Signer.PublicKey == signer.PublicKey { + return + } + } + + cached = append(cached, packed) + signatureCache.Add(hash, cached) + + if isMajority { + if debounce { + DebouncedSaveSignatures(hash, SaveSignatureArgs{Hash: hash, Info: info}) + } else { + SaveSignatures(SaveSignatureArgs{Hash: hash, Info: info}) + } + } +} + +func SaveSignatures(args SaveSignatureArgs) { + + dbClient := db.GetClient() + signatures, ok := signatureCache.Get(args.Hash) + + if !ok { + return + } + + ctx := context.Background() + + var newSigners []bls.Signer + var newSignatures []bls12381.G1Affine + var keys [][]byte + + for i := range signatures { + signature := signatures[i] + keys = append(keys, signature.Signer.PublicKey[:]) + if !signature.Processed { + newSignatures = append(newSignatures, signature.Signature) + newSigners = append(newSigners, signature.Signer) + } + } + + // TODO: This part can be a shared library + err := dbClient.Signer.MapCreateBulk(newSigners, func(sc *ent.SignerCreate, i int) { + signer := newSigners[i] + sc.SetName(signer.Name). + SetKey(signer.PublicKey[:]). + SetShortkey(signer.ShortPublicKey[:]). + SetPoints(0) + }). + OnConflictColumns("shortkey"). + UpdateName(). + UpdateKey(). + Update(func(su *ent.SignerUpsert) { + su.AddPoints(1) + }). + Exec(ctx) + + if err != nil { + panic(err) + } + + signerIds, err := dbClient.Signer. + Query(). + Where(signer.KeyIn(keys...)). + IDs(ctx) + + if err != nil { + return + } + + var aggregate bls12381.G1Affine + currentAggregate, ok := aggregateCache.Get(args.Hash) + + if ok { + newSignatures = append(newSignatures, currentAggregate) + } + + aggregate, err = bls.AggregateSignatures(newSignatures) + + if err != nil { + return + } + + signatureBytes := aggregate.Bytes() + + packet := datasets.BroadcastEventPacket{ + Info: args.Info, + Signers: keys, + Signature: signatureBytes, + } + + payload, err := msgpack.Marshal(&packet) + + if err != nil { + panic(err) + } + + consumer.Broadcast( + append( + []byte{opcodes.EventLogBroadcast, 0}, + payload...), + ) + + err = dbClient.EventLog. + Create(). + SetBlock(args.Info.Block). + SetChain(args.Info.Chain). + SetAddress(args.Info.Address). + SetEvent(args.Info.Event). + SetIndex(args.Info.LogIndex). + SetTransaction(args.Info.TxHash[:]). + SetSignersCount(uint64(len(signatures))). + SetSignature(signatureBytes[:]). + SetArgs(args.Info.Args). + AddSignerIDs(signerIds...). + OnConflictColumns("block", "transaction", "index"). + UpdateNewValues(). + Exec(ctx) + + if err != nil { + panic(err) + } + + for _, signature := range signatures { + signature.Processed = true + } + + aggregateCache.Add(args.Hash, aggregate) +} + +func createTask(configs []LogConf, chain string) func() { + return func() { + + if client.IsClientSocketClosed { + return + } + + for _, conf := range configs { + + if conf.Chain != chain { + continue + } + + blockNumber, err := GetBlockNumber(chain) + allowedBlock := *blockNumber - conf.Confrimations + + if err != nil { + return + } + + if lastSyncedBlock[conf] == allowedBlock { + return + } + + contractAddress := common.HexToAddress(conf.Address) + contextKey := fmt.Sprintf("plugins.logs.events.%s", conf.Name) + fromBlock := lastSyncedBlock[conf] + + if fromBlock == 0 { + contextBlock, err := persistence.ReadUInt64(contextKey) + + if err != nil && err != badger.ErrKeyNotFound { + panic(err) + } + + if err != badger.ErrKeyNotFound { + fromBlock = contextBlock + } else if conf.From != nil { + fromBlock = *conf.From + } else { + fromBlock = allowedBlock - conf.Step + } + } + + toBlock := allowedBlock + + if fromBlock-toBlock > conf.Step { + toBlock = fromBlock + conf.Step + } + + query := goEthereum.FilterQuery{ + FromBlock: big.NewInt(int64(fromBlock)), + ToBlock: big.NewInt(int64(toBlock)), + Addresses: []common.Address{contractAddress}, + } + + rpcClient := ethereum.Clients[conf.Chain] + logs, err := rpcClient.FilterLogs(context.Background(), query) + + if err != nil { + panic(err) + } + + contractAbi := abiMap[conf.Abi] + + for _, vLog := range logs { + eventSignature := vLog.Topics[0] + eventAbi, err := contractAbi.EventByID(eventSignature) + + if eventAbi.Name != conf.Event { + continue + } + + if err != nil { + panic(err) + } + + eventData := make(map[string]interface{}) + err = contractAbi.UnpackIntoMap(eventData, eventAbi.Name, vLog.Data) + if err != nil { + panic(err) + } + + indexedParams := make([]abi.Argument, 0) + for _, input := range eventAbi.Inputs { + if input.Indexed { + indexedParams = append(indexedParams, input) + } + } + + err = abi.ParseTopicsIntoMap(eventData, indexedParams, vLog.Topics[1:]) + if err != nil { + panic(err) + } + + var keys []string + for k := range eventData { + keys = append(keys, k) + } + + message := log.Logger. + With("Event", conf.Event). + With("Block", vLog.BlockNumber) + + sort.Strings(keys) + for _, key := range keys { + message = message. + With(caser.String(key), eventData[key]) + } + + message.Info(conf.Name) + + args := []datasets.EventLogArg{} + for _, key := range keys { + args = append( + args, + datasets.EventLogArg{Name: key, Value: eventData[key]}, + ) + } + + event := datasets.EventLog{ + LogIndex: uint64(vLog.Index), + Block: vLog.BlockNumber, + Address: vLog.Address.Hex(), + Event: conf.Event, + Chain: conf.Chain, + TxHash: vLog.TxHash, + Args: args, + } + + toHash, err := msgpack.Marshal(&event) + + if err != nil { + panic(err) + } + + signature, hash := bls.Sign(*bls.ClientSecretKey, toHash) + compressedSignature := signature.Bytes() + + priceReport := datasets.EventLogReport{ + EventLog: event, + Signature: compressedSignature, + } + + payload, err := msgpack.Marshal(&priceReport) + + if err != nil { + panic(err) + } + + if conf.Send { + client.Client.WriteMessage( + websocket.BinaryMessage, + append([]byte{opcodes.EventLog, 0}, payload...), + ) + } + + if conf.Store { + RecordSignature( + signature, + bls.ClientSigner, + hash, + event, + false, + true, + ) + } + } + + lastSyncedBlock[conf] = toBlock + persistence.WriteUint64(contextKey, toBlock) + } + + } +} + +func Setup() { + if !config.Config.IsSet("plugins.logs") { + return + } + + var configs []LogConf + if err := config.Config.UnmarshalKey("plugins.logs.events", &configs); err != nil { + panic(err) + } + + for _, conf := range configs { + key := SupportKey{ + Chain: conf.Chain, + Address: conf.Address, + Event: conf.Event, + } + supportedEvents[key] = true + } + +} + func Start() { + if !config.Config.IsSet("plugins.logs") { + return + } + scheduler, err := gocron.NewScheduler() if err != nil { @@ -57,11 +515,16 @@ func Start() { } var configs []LogConf - if err := config.Config.UnmarshalKey("plugins.logs", &configs); err != nil { + if err := config.Config.UnmarshalKey("plugins.logs.events", &configs); err != nil { panic(err) } for _, conf := range configs { + + if _, exists := abiMap[conf.Abi]; exists { + continue + } + file, err := os.Open(conf.Abi) if err != nil { panic(err) @@ -76,113 +539,54 @@ func Start() { file.Close() } - caser := cases.Title(language.English, cases.NoLower) + scheduleConfs := config.Config.Sub("plugins.logs.schedule") + scheduleNames := scheduleConfs.AllKeys() - _, err = scheduler.NewJob( - gocron.DurationJob(5*time.Second), - gocron.NewTask( - func() { + for index := range scheduleNames { + name := scheduleNames[index] + duration := scheduleConfs.GetDuration(name) * time.Millisecond + task := createTask(configs, name) - if client.IsClientSocketClosed { - return - } - - blockNumber, err := GetBlockNumber() + _, err = scheduler.NewJob( + gocron.DurationJob(duration), + gocron.NewTask(task), + gocron.WithSingletonMode(gocron.LimitModeReschedule), + ) - if err != nil { - return - } + if err != nil { + panic(err) + } + } - if lastBlock == *blockNumber { - return - } + scheduler.Start() +} - lastBlock = *blockNumber +func init() { - for _, conf := range configs { + DebouncedSaveSignatures = utils.Debounce[bls12381.G1Affine, SaveSignatureArgs](5*time.Second, SaveSignatures) + signatureMutex = new(sync.Mutex) - contractAddress := common.HexToAddress(conf.Address) + abiMap = make(map[string]abi.ABI) + lastSyncedBlock = make(map[LogConf]uint64) + caser = cases.Title(language.English, cases.NoLower) + supportedEvents = make(map[SupportKey]bool) - query := goEthereum.FilterQuery{ - FromBlock: big.NewInt(int64(lastBlock)), - ToBlock: big.NewInt(int64(lastBlock)), - Addresses: []common.Address{contractAddress}, - } + var err error + signatureCache, err = lru.New[bls12381.G1Affine, []bls.Signature](24) - logs, err := ethereum.Client.FilterLogs(context.Background(), query) - if err != nil { - panic(err) - } + if err != nil { + panic(err) + } - contractAbi := abiMap[conf.Abi] - - for _, vLog := range logs { - eventSignature := vLog.Topics[0] - eventAbi, err := contractAbi.EventByID(eventSignature) - - if eventAbi.Name != conf.Event { - continue - } - - if err != nil { - panic(err) - } - - // Unpack the log's data using the event's name - // This gives you a map of the event's arguments - eventData := make(map[string]interface{}) - err = contractAbi.UnpackIntoMap(eventData, eventAbi.Name, vLog.Data) - if err != nil { - panic(err) - } - - for i, arg := range eventAbi.Inputs { - if arg.Indexed { - switch arg.Type.String() { - case "address": - if len(vLog.Topics) > i { - eventData[arg.Name] = common.BytesToAddress(vLog.Topics[i+1].Bytes()).Hex() - } - case "uint256", "uint8", "uint16", "uint32", "uint64": - if len(vLog.Topics) > i { - num := new(big.Int).SetBytes(vLog.Topics[i+1][:]) - eventData[arg.Name] = num - } - } - // TODO: Add support for more types - } - } - - var keys []string - for k := range eventData { - keys = append(keys, k) - } - - message := log.Logger. - With("Event", conf.Event). - With("Block", lastBlock) - - sort.Strings(keys) - for _, key := range keys { - message = message. - With(caser.String(key), eventData[key]) - } - - message.Info(conf.Name) - } - } - }, - ), - ) + consensus, err = lru.New[EventKey, map[bls12381.G1Affine]uint64](24) if err != nil { panic(err) } - scheduler.Start() -} + aggregateCache, err = lru.New[bls12381.G1Affine, bls12381.G1Affine](24) -func init() { - //lastSynced = make(map[string]uint64) - abiMap = make(map[string]abi.ABI) + if err != nil { + panic(err) + } } diff --git a/src/plugins/uniswap/uniswap.go b/src/plugins/uniswap/uniswap.go index 3f1e7c56..ecbdcdfa 100644 --- a/src/plugins/uniswap/uniswap.go +++ b/src/plugins/uniswap/uniswap.go @@ -10,6 +10,7 @@ import ( "github.com/KenshiTech/unchained/bls" "github.com/KenshiTech/unchained/config" + "github.com/KenshiTech/unchained/constants/opcodes" "github.com/KenshiTech/unchained/datasets" "github.com/KenshiTech/unchained/db" "github.com/KenshiTech/unchained/ent" @@ -31,26 +32,12 @@ import ( lru "github.com/hashicorp/golang-lru/v2" ) -var DebouncedSaveSignatures func(key AssetKey, arg datasets.PriceInfo) +var DebouncedSaveSignatures func(key bls12381.G1Affine, arg SaveSignatureArgs) var signatureMutex *sync.Mutex -var priceCache map[string]*lru.Cache[uint64, big.Int] -var signatureCache *lru.Cache[AssetKey, []bls.Signature] -var aggregateCache *lru.Cache[AssetKey, bls12381.G1Affine] - -var twoNinetySix big.Int -var tenEighteen big.Int -var tenEighteenF big.Float -var lastBlock uint64 -var lastPrice big.Int - -type Token struct { - Name string `mapstructure:"name"` - Pair string `mapstructure:"pair"` - Unit string `mapstructure:"unit"` - Symbol string `mapstructure:"symbol"` - Delta int64 `mapstructure:"delta"` - Invert bool `mapstructure:"invert"` +type TokenKey struct { + Pair string + Chain string } type AssetKey struct { @@ -60,75 +47,109 @@ type AssetKey struct { Block uint64 } +var priceCache map[string]*lru.Cache[uint64, big.Int] +var consensus *lru.Cache[AssetKey, map[bls12381.G1Affine]uint64] +var signatureCache *lru.Cache[bls12381.G1Affine, []bls.Signature] +var aggregateCache *lru.Cache[bls12381.G1Affine, bls12381.G1Affine] +var supportedTokens map[TokenKey]bool + +var twoOneNineTwo big.Int +var tenEighteen big.Int +var tenEighteenF big.Float +var lastBlock map[string]uint64 +var crossPrices map[string]big.Int +var lastPrice big.Int +var caser cases.Caser + +type Token struct { + Id *string `mapstructure:"id"` + Chain string `mapstructure:"chain"` + Name string `mapstructure:"name"` + Pair string `mapstructure:"pair"` + Unit string `mapstructure:"unit"` + Symbol string `mapstructure:"symbol"` + Delta int64 `mapstructure:"delta"` + Invert bool `mapstructure:"invert"` + Store bool `mapstructure:"store"` + Send bool `mapstructure:"send"` + Cross []string `mapstructure:"cross"` +} + // TODO: This needs to work with different datasets +// TODO: Can we turn this into a library func? func RecordSignature( signature bls12381.G1Affine, signer bls.Signer, - info datasets.PriceInfo) { + hash bls12381.G1Affine, + info datasets.PriceInfo, + debounce bool, + historical bool) { signatureMutex.Lock() defer signatureMutex.Unlock() - lruCache := priceCache[strings.ToLower(info.Pair)] - - if lruCache == nil { + // TODO: Invert makes a difference here + tokenKey := TokenKey{Chain: info.Chain, Pair: info.Pair} + if supported := supportedTokens[tokenKey]; !supported { return } - // TODO: Needs optimization - if !lruCache.Contains(info.Block) { - - var tokens []Token - if err := config.Config.UnmarshalKey("plugins.uniswap.tokens", &tokens); err != nil { - panic(err) - } - - var found Token - - for _, token := range tokens { - if strings.EqualFold(token.Pair, info.Pair) && - strings.EqualFold(token.Name, info.Asset) { - found = token - break - } - } - - if len(found.Pair) == 0 { - return - } + if !historical { - blockNumber, _, err := GetPriceFromPair( - found.Pair, - found.Delta, - found.Invert, - ) + blockNumber, err := GetBlockNumber(info.Chain) if err != nil { - return + panic(err) } - lastBlock = *blockNumber - } - - if lastBlock-info.Block > 16 { - return // Data too old + // TODO: this won't work for Arbitrum + if *blockNumber-info.Block > 16 { + return // Data too old + } } key := AssetKey{ - Block: info.Block, Asset: info.Asset, Chain: info.Chain, Pair: info.Pair, + Block: info.Block, } - cached, ok := signatureCache.Get(key) - packed := bls.Signature{Signature: signature, Signer: signer, Processed: false} + if !consensus.Contains(key) { + consensus.Add(key, make(map[bls12381.G1Affine]uint64)) + } + + reportedValues, _ := consensus.Get(key) + reportedValues[hash]++ + isMajority := true + count := reportedValues[hash] + + for _, reportCount := range reportedValues { + if reportCount > count { + isMajority = false + break + } + } + + cached, ok := signatureCache.Get(hash) + + packed := bls.Signature{ + Signature: signature, + Signer: signer, + Processed: false, + } if !ok { - signatureCache.Add(key, []bls.Signature{packed}) + signatureCache.Add(hash, []bls.Signature{packed}) // TODO: This should not only write to DB, // TODO: but also report to "consumers" - DebouncedSaveSignatures(key, info) + if isMajority { + if debounce { + DebouncedSaveSignatures(hash, SaveSignatureArgs{Hash: hash, Info: info}) + } else { + SaveSignatures(SaveSignatureArgs{Hash: hash, Info: info}) + } + } return } @@ -139,29 +160,26 @@ func RecordSignature( } cached = append(cached, packed) - signatureCache.Add(key, cached) + signatureCache.Add(hash, cached) - DebouncedSaveSignatures(key, info) + if isMajority { + if debounce { + DebouncedSaveSignatures(hash, SaveSignatureArgs{Hash: hash, Info: info}) + } else { + SaveSignatures(SaveSignatureArgs{Hash: hash, Info: info}) + } + } } -func SaveSignatures(info datasets.PriceInfo) { - - dbClient := db.GetClient() - lruCache := priceCache[strings.ToLower(info.Pair)] - price, ok := lruCache.Get(info.Block) - - if !ok { - return - } +type SaveSignatureArgs struct { + Info datasets.PriceInfo + Hash bls12381.G1Affine +} - key := AssetKey{ - Block: info.Block, - Asset: info.Asset, - Chain: info.Chain, - Pair: info.Pair, - } +func SaveSignatures(args SaveSignatureArgs) { - signatures, ok := signatureCache.Get(key) + dbClient := db.GetClient() + signatures, ok := signatureCache.Get(args.Hash) if !ok { return @@ -211,7 +229,7 @@ func SaveSignatures(info datasets.PriceInfo) { } var aggregate bls12381.G1Affine - currentAggregate, ok := aggregateCache.Get(key) + currentAggregate, ok := aggregateCache.Get(args.Hash) if ok { newSignatures = append(newSignatures, currentAggregate) @@ -225,8 +243,8 @@ func SaveSignatures(info datasets.PriceInfo) { signatureBytes := aggregate.Bytes() - packet := datasets.BroadcastPacket{ - Info: info, + packet := datasets.BroadcastPricePacket{ + Info: args.Info, Signers: keys, Signature: signatureBytes, } @@ -235,16 +253,20 @@ func SaveSignatures(info datasets.PriceInfo) { if err == nil { // TODO: Handle errors in a proper way - consumer.Broadcast(append([]byte{7, 0}, payload...)) + consumer.Broadcast( + append( + []byte{opcodes.PriceReportBroadcast, 0}, + payload...), + ) } err = dbClient.AssetPrice. Create(). - SetPair(info.Pair). - SetAsset(info.Asset). - SetChain(info.Chain). - SetBlock(info.Block). - SetPrice(&price). + SetPair(args.Info.Pair). + SetAsset(args.Info.Asset). + SetChain(args.Info.Chain). + SetBlock(args.Info.Block). + SetPrice(&args.Info.Price). SetSignersCount(uint64(len(signatures))). SetSignature(signatureBytes[:]). AddSignerIDs(signerIds...). @@ -260,7 +282,7 @@ func SaveSignatures(info datasets.PriceInfo) { signature.Processed = true } - aggregateCache.Add(key, aggregate) + aggregateCache.Add(args.Hash, aggregate) } func GetPriceFromCache(block uint64, pair string) (big.Int, bool) { @@ -268,11 +290,11 @@ func GetPriceFromCache(block uint64, pair string) (big.Int, bool) { return lruCache.Get(block) } -func GetBlockNumber() (*uint64, error) { - blockNumber, err := ethereum.GetBlockNumber() +func GetBlockNumber(network string) (*uint64, error) { + blockNumber, err := ethereum.GetBlockNumber(network) if err != nil { - ethereum.RefreshRPC() + ethereum.RefreshRPC(network) return nil, err } @@ -280,15 +302,16 @@ func GetBlockNumber() (*uint64, error) { } func GetPriceAtBlockFromPair( + network string, blockNumber uint64, pairAddr string, decimalDif int64, inverse bool) (*big.Int, error) { - pair, err := ethereum.GetNewUniV3Contract(pairAddr, false) + pair, err := ethereum.GetNewUniV3Contract(network, pairAddr, false) if err != nil { - ethereum.RefreshRPC() + ethereum.RefreshRPC(network) return nil, err } @@ -298,7 +321,7 @@ func GetPriceAtBlockFromPair( }) if err != nil { - ethereum.RefreshRPC() + ethereum.RefreshRPC(network) return nil, err } @@ -310,18 +333,20 @@ func GetPriceAtBlockFromPair( } func GetPriceFromPair( + network string, pairAddr string, decimalDif int64, inverse bool) (*uint64, *big.Int, error) { - blockNumber, err := ethereum.GetBlockNumber() + blockNumber, err := ethereum.GetBlockNumber(network) if err != nil { - ethereum.RefreshRPC() + ethereum.RefreshRPC(network) return nil, nil, err } lastPrice, err := GetPriceAtBlockFromPair( + network, blockNumber, pairAddr, decimalDif, @@ -332,17 +357,17 @@ func GetPriceFromPair( func priceFromSqrtX96(sqrtPriceX96 *big.Int, decimalDif int64, inverse bool) *big.Int { var decimalFix big.Int - var sqrtPrice big.Int + var powerUp big.Int var rawPrice big.Int var price big.Int var factor big.Int decimalFix.Mul(sqrtPriceX96, &tenEighteen) - sqrtPrice.Div(&decimalFix, &twoNinetySix) - rawPrice.Exp(&sqrtPrice, big.NewInt(2), nil) + powerUp.Exp(&decimalFix, big.NewInt(2), nil) + rawPrice.Div(&powerUp, &twoOneNineTwo) if inverse { - factor.Exp(big.NewInt(10), big.NewInt(72-decimalDif), nil) + factor.Exp(big.NewInt(10), big.NewInt(54+decimalDif), nil) price.Div(&factor, &rawPrice) } else { // TODO: needs work @@ -354,6 +379,10 @@ func priceFromSqrtX96(sqrtPriceX96 *big.Int, decimalDif int64, inverse bool) *bi } func Setup() { + if !config.Config.IsSet("plugins.uniswap") { + return + } + var tokens []Token err := config.Config.UnmarshalKey("plugins.uniswap.tokens", &tokens) @@ -365,144 +394,214 @@ func Setup() { for _, token := range tokens { priceCache[strings.ToLower(token.Pair)], err = lru.New[uint64, big.Int](24) + key := TokenKey{Chain: token.Chain, Pair: token.Pair} + supportedTokens[key] = true + if err != nil { panic(err) } } } -func Start() { +func syncBlocks(token Token, latest uint64) { + for block := lastBlock[token.Chain]; block < latest; block++ { - scheduler, err := gocron.NewScheduler() + price, err := GetPriceAtBlockFromPair( + token.Chain, + block, + token.Pair, + token.Delta, + token.Invert, + ) - if err != nil { - panic(err) - } + if err != nil { + panic(err) + } - var tokens []Token - if err := config.Config.UnmarshalKey("plugins.uniswap.tokens", &tokens); err != nil { - panic(err) - } + for _, cross := range token.Cross { + stored := crossPrices[cross] - caser := cases.Title(language.English, cases.NoLower) + if stored.Cmp(big.NewInt(0)) == 0 { + return + } - for _, token := range tokens { - priceCache[strings.ToLower(token.Pair)], err = lru.New[uint64, big.Int](24) + price.Mul(price, &stored) + } + + for range token.Cross { + price.Div(price, &tenEighteen) + } + + if token.Id != nil { + crossPrices[*token.Id] = *price + } + + var priceF big.Float + priceF.Quo(new(big.Float).SetInt(price), &tenEighteenF) + priceStr := fmt.Sprintf("%.18f %s", &priceF, token.Unit) + + log.Logger. + With("Block", block). + With("Price", priceStr). + Info(caser.String(token.Name)) + + priceInfo := datasets.PriceInfo{ + Price: *price, + Block: block, + Chain: token.Chain, + Pair: strings.ToLower(token.Pair), + Asset: strings.ToLower(token.Name), + } + + toHash, err := msgpack.Marshal(&priceInfo) if err != nil { panic(err) } - } - - _, err = scheduler.NewJob( - gocron.DurationJob(5*time.Second), - gocron.NewTask( - func() { - if client.IsClientSocketClosed { - return - } + signature, hash := bls.Sign(*bls.ClientSecretKey, toHash) + compressedSignature := signature.Bytes() - blockNumber, err := GetBlockNumber() + priceReport := datasets.PriceReport{ + PriceInfo: priceInfo, + Signature: compressedSignature, + } - if err != nil { - return - } + payload, err := msgpack.Marshal(&priceReport) - if lastBlock == *blockNumber { - return - } + if err != nil { + panic(err) + } - lastBlock = *blockNumber + if token.Send && !client.IsClientSocketClosed { + client.Client.WriteMessage( + websocket.BinaryMessage, + append([]byte{opcodes.PriceReport, 0}, payload...), + ) + } - for _, token := range tokens { + if token.Store { + RecordSignature( + signature, + bls.ClientSigner, + hash, + priceInfo, + false, + true, + ) + } + } +} - price, err := GetPriceAtBlockFromPair( - *blockNumber, - token.Pair, - token.Delta, - token.Invert, - ) +func createTask(tokens []Token, chain string) func() { - if err != nil { - return - } + return func() { - var priceF big.Float - priceF.Quo(new(big.Float).SetInt(price), &tenEighteenF) - priceStr := fmt.Sprintf("%.18f %s", &priceF, token.Unit) + currBlockNumber, err := GetBlockNumber(chain) - log.Logger. - With("Block", *blockNumber). - With("Price", priceStr). - Info(caser.String(token.Name)) + if err != nil { + panic(err) + } - priceInfo := datasets.PriceInfo{ - Price: *price, - Block: *blockNumber, - Chain: "ethereum", - Pair: strings.ToLower(token.Pair), - Asset: strings.ToLower(token.Name), - } + if lastBlock[chain] == *currBlockNumber { + return + } - toHash, err := msgpack.Marshal(&priceInfo) + if lastBlock[chain] == 0 { + lastBlock[chain] = *currBlockNumber - 1 + } - if err != nil { - panic(err) - } + for _, token := range tokens { - signature, _ := bls.Sign(*bls.ClientSecretKey, toHash) - compressedSignature := signature.Bytes() + if token.Chain != chain { + continue + } - priceReport := datasets.PriceReport{ - PriceInfo: priceInfo, - Signature: compressedSignature, - } + syncBlocks(token, *currBlockNumber) - payload, err := msgpack.Marshal(&priceReport) + } - if err != nil { - panic(err) - } + // TODO: block numbers should be tracked separately for each token + lastBlock[chain] = *currBlockNumber + } +} - if !client.IsClientSocketClosed { - client.Client.WriteMessage(websocket.BinaryMessage, append([]byte{1, 0}, payload...)) - } +func Start() { - } - }, - ), - ) + scheduler, err := gocron.NewScheduler() if err != nil { panic(err) } + var tokens []Token + if err := config.Config.UnmarshalKey("plugins.uniswap.tokens", &tokens); err != nil { + panic(err) + } + + for _, token := range tokens { + priceCache[strings.ToLower(token.Pair)], err = lru.New[uint64, big.Int](24) + + if err != nil { + panic(err) + } + } + + scheduleConfs := config.Config.Sub("plugins.uniswap.schedule") + scheduleNames := scheduleConfs.AllKeys() + + for index := range scheduleNames { + name := scheduleNames[index] + duration := scheduleConfs.GetDuration(name) * time.Millisecond + task := createTask(tokens, name) + + _, err = scheduler.NewJob( + gocron.DurationJob(duration), + gocron.NewTask(task), + gocron.WithSingletonMode(gocron.LimitModeReschedule), + ) + + if err != nil { + panic(err) + } + } + scheduler.Start() } func init() { - DebouncedSaveSignatures = utils.Debounce[AssetKey, datasets.PriceInfo](5*time.Second, SaveSignatures) + DebouncedSaveSignatures = utils.Debounce[bls12381.G1Affine, SaveSignatureArgs](5*time.Second, SaveSignatures) signatureMutex = new(sync.Mutex) - twoNinetySix.Exp(big.NewInt(2), big.NewInt(96), nil) + twoOneNineTwo.Exp(big.NewInt(2), big.NewInt(192), nil) tenEighteen.Exp(big.NewInt(10), big.NewInt(18), nil) tenEighteenF.SetInt(&tenEighteen) // TODO: Should use AssetKey priceCache = make(map[string]*lru.Cache[uint64, big.Int]) + supportedTokens = make(map[TokenKey]bool) var err error - signatureCache, err = lru.New[AssetKey, []bls.Signature](24) + signatureCache, err = lru.New[bls12381.G1Affine, []bls.Signature](24) if err != nil { panic(err) } - aggregateCache, err = lru.New[AssetKey, bls12381.G1Affine](24) + consensus, err = lru.New[AssetKey, map[bls12381.G1Affine]uint64](24) if err != nil { panic(err) } + + aggregateCache, err = lru.New[bls12381.G1Affine, bls12381.G1Affine](24) + + if err != nil { + panic(err) + } + + lastBlock = make(map[string]uint64) + crossPrices = make(map[string]big.Int) + caser = cases.Title(language.English, cases.NoLower) }