diff --git a/entries/block.go b/entries/block.go index d4bc2a2..5f45c48 100644 --- a/entries/block.go +++ b/entries/block.go @@ -124,6 +124,7 @@ func bulkInsertBlockEntry(entries []*lib.StateChangeEntry, db bun.IDB, operation for _, entry := range uniqueBlocks { block := entry.Encoder.(*lib.MsgDeSoBlock) blockEntry, blockSigners := BlockEncoderToPGStruct(block, entry.KeyBytes, params) + pgBlockEntrySlice = append(pgBlockEntrySlice, blockEntry) pgBlockSignersEntrySlice = append(pgBlockSignersEntrySlice, blockSigners...) for jj, transaction := range block.Txns { diff --git a/entries/bls_pkid_pair.go b/entries/bls_pkid_pair.go index 4fb2d1a..cd38b8d 100644 --- a/entries/bls_pkid_pair.go +++ b/entries/bls_pkid_pair.go @@ -4,6 +4,7 @@ import ( "context" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/state-consumer/consumer" + lru "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" "github.com/uptrace/bun" ) @@ -77,15 +78,15 @@ func BLSPublicKeyPKIDPairSnapshotEncoderToPGStruct( // BLSPublicKeyPKIDPairBatchOperation is the entry point for processing a batch of BLSPublicKeyPKIDPair entries. // It determines the appropriate handler based on the operation type and executes it. -func BLSPublicKeyPKIDPairBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { +func BLSPublicKeyPKIDPairBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams, cachedEntries *lru.Cache[string, []byte]) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType var err error if operationType == lib.DbOperationTypeDelete { - err = bulkDeleteBLSPkidPairEntry(entries, db, operationType) + err = bulkDeleteBLSPkidPairEntry(entries, db, operationType, cachedEntries) } else { - err = bulkInsertBLSPkidPairEntry(entries, db, operationType, params) + err = bulkInsertBLSPkidPairEntry(entries, db, operationType, params, cachedEntries) } if err != nil { return errors.Wrapf(err, "entries.StakeBatchOperation: Problem with operation type %v", operationType) @@ -95,10 +96,14 @@ func BLSPublicKeyPKIDPairBatchOperation(entries []*lib.StateChangeEntry, db bun. // bulkInsertBLSPkidPairEntry inserts a batch of stake entries into the database. func bulkInsertBLSPkidPairEntry( - entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams, + entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams, cachedEntries *lru.Cache[string, []byte], ) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) + + // Filter out any entries that are already tracked in the cache. + uniqueEntries = consumer.FilterCachedEntries(uniqueEntries, cachedEntries) + uniqueBLSPkidPairEntries := consumer.FilterEntriesByPrefix( uniqueEntries, lib.Prefixes.PrefixValidatorBLSPublicKeyPKIDPairEntry) uniqueBLSPkidPairSnapshotEntries := consumer.FilterEntriesByPrefix( @@ -145,11 +150,16 @@ func bulkInsertBLSPkidPairEntry( } } + // Update the cache with the new entries. + for _, entry := range uniqueEntries { + cachedEntries.Add(string(entry.KeyBytes), entry.EncoderBytes) + } + return nil } // bulkDeleteBLSPkidPairEntry deletes a batch of stake entries from the database. -func bulkDeleteBLSPkidPairEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteBLSPkidPairEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, cachedEntries *lru.Cache[string, []byte]) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) @@ -182,5 +192,10 @@ func bulkDeleteBLSPkidPairEntry(entries []*lib.StateChangeEntry, db bun.IDB, ope } } + // Remove the deleted entries from the cache. + for _, key := range keysToDelete { + cachedEntries.Remove(string(key)) + } + return nil } diff --git a/entries/jailed_history.go b/entries/jailed_history.go index 32a342b..89b5877 100644 --- a/entries/jailed_history.go +++ b/entries/jailed_history.go @@ -4,6 +4,7 @@ import ( "context" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/state-consumer/consumer" + lru "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" "github.com/uptrace/bun" ) @@ -39,15 +40,15 @@ func UnjailValidatorStateChangeMetadataEncoderToPGStruct( // ValidatorBatchOperation is the entry point for processing a batch of Validator entries. // It determines the appropriate handler based on the operation type and executes it. -func JailedHistoryEventBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { +func JailedHistoryEventBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams, cachedEntries *lru.Cache[string, []byte]) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType var err error if operationType == lib.DbOperationTypeDelete { - err = bulkDeleteValidatorEntry(entries, db, operationType) + err = bulkDeleteValidatorEntry(entries, db, operationType, cachedEntries) } else { - err = bulkInsertValidatorEntry(entries, db, operationType, params) + err = bulkInsertValidatorEntry(entries, db, operationType, params, cachedEntries) } if err != nil { return errors.Wrapf(err, "entries.ValidatorBatchOperation: Problem with operation type %v", operationType) diff --git a/entries/pkid.go b/entries/pkid.go index f7058d0..1c9fcb6 100644 --- a/entries/pkid.go +++ b/entries/pkid.go @@ -5,6 +5,7 @@ import ( "github.com/deso-protocol/core/lib" "github.com/deso-protocol/state-consumer/consumer" "github.com/golang/glog" + lru "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" "github.com/uptrace/bun" ) @@ -127,15 +128,15 @@ func bulkDeletePkidEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationT return nil } -func PkidBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { +func PkidBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams, cachedEntries *lru.Cache[string, []byte]) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType var err error if operationType == lib.DbOperationTypeDelete { - err = bulkDeletePkid(entries, db, operationType) + err = bulkDeletePkid(entries, db, operationType, cachedEntries) } else { - err = bulkInsertPkid(entries, db, operationType, params) + err = bulkInsertPkid(entries, db, operationType, params, cachedEntries) } if err != nil { return errors.Wrapf(err, "entries.PostBatchOperation: Problem with operation type %v", operationType) @@ -144,12 +145,16 @@ func PkidBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib } // bulkInsertPkid inserts a batch of PKIDs into the database. -func bulkInsertPkid(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertPkid(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams, cachedEntries *lru.Cache[string, []byte]) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) uniqueLeaderScheduleEntries := consumer.FilterEntriesByPrefix( uniqueEntries, lib.Prefixes.PrefixSnapshotLeaderSchedule) + + // Filter out any entries that are already in the cache. + uniqueLeaderScheduleEntries = consumer.FilterCachedEntries(uniqueLeaderScheduleEntries, cachedEntries) + // NOTE: if we need to support parsing other indexes for PKIDs beyond LeaderSchedule, // we will need to filter the uniqueEntries by the appropriate prefix and then convert // the entries to the appropriate PG struct. @@ -178,11 +183,16 @@ func bulkInsertPkid(entries []*lib.StateChangeEntry, db bun.IDB, operationType l } } + // Update the cached entries with the new entries. + for _, entry := range uniqueLeaderScheduleEntries { + cachedEntries.Add(string(entry.KeyBytes), entry.EncoderBytes) + } + return nil } // bulkDeletePKID deletes a batch of PKIDs from the database. -func bulkDeletePkid(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { +func bulkDeletePkid(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, cachedEntries *lru.Cache[string, []byte]) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) @@ -201,5 +211,10 @@ func bulkDeletePkid(entries []*lib.StateChangeEntry, db bun.IDB, operationType l } } + // Remove the entries from the cache. + for _, entry := range uniqueEntries { + cachedEntries.Remove(string(entry.KeyBytes)) + } + return nil } diff --git a/entries/validator.go b/entries/validator.go index 2da3524..d3ec887 100644 --- a/entries/validator.go +++ b/entries/validator.go @@ -4,6 +4,7 @@ import ( "context" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/state-consumer/consumer" + lru "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" "github.com/uptrace/bun" "github.com/uptrace/bun/extra/bunbig" @@ -98,15 +99,15 @@ func ValidatorEncoderToPGStruct(validatorEntry *lib.ValidatorEntry, keyBytes []b // ValidatorBatchOperation is the entry point for processing a batch of Validator entries. // It determines the appropriate handler based on the operation type and executes it. -func ValidatorBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { +func ValidatorBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams, cachedEntries *lru.Cache[string, []byte]) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType var err error if operationType == lib.DbOperationTypeDelete { - err = bulkDeleteValidatorEntry(entries, db, operationType) + err = bulkDeleteValidatorEntry(entries, db, operationType, cachedEntries) } else { - err = bulkInsertValidatorEntry(entries, db, operationType, params) + err = bulkInsertValidatorEntry(entries, db, operationType, params, cachedEntries) } if err != nil { return errors.Wrapf(err, "entries.ValidatorBatchOperation: Problem with operation type %v", operationType) @@ -115,9 +116,13 @@ func ValidatorBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params } // bulkInsertValidatorEntry inserts a batch of validator entries into the database. -func bulkInsertValidatorEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertValidatorEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams, cachedEntries *lru.Cache[string, []byte]) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) + + // Filter out any entries that are already tracked in the cache. + uniqueEntries = consumer.FilterCachedEntries(uniqueEntries, cachedEntries) + uniqueValidatorEntries := consumer.FilterEntriesByPrefix(uniqueEntries, lib.Prefixes.PrefixValidatorByPKID) uniqueSnapshotValidatorEntries := consumer.FilterEntriesByPrefix(uniqueEntries, lib.Prefixes.PrefixSnapshotValidatorSetByPKID) // Create a new array to hold the bun struct. @@ -156,11 +161,17 @@ func bulkInsertValidatorEntry(entries []*lib.StateChangeEntry, db bun.IDB, opera return errors.Wrapf(err, "entries.bulkInsertValidatorEntry: Error inserting snapshot validator entries") } } + + // Add any new entries to the cache. + for _, entry := range uniqueEntries { + cachedEntries.Add(string(entry.KeyBytes), entry.EncoderBytes) + } + return nil } // bulkDeleteValidatorEntry deletes a batch of validator entries from the database. -func bulkDeleteValidatorEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteValidatorEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, cachedEntries *lru.Cache[string, []byte]) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) uniqueKeys := consumer.KeysToDelete(uniqueEntries) @@ -182,6 +193,12 @@ func bulkDeleteValidatorEntry(entries []*lib.StateChangeEntry, db bun.IDB, opera Exec(context.Background()); err != nil { return errors.Wrapf(err, "entries.bulkDeleteValidatorEntry: Error deleting entries") } + + // Delete cached validator entries. + for _, key := range validatorKeysToDelete { + keyStr := string(key) + cachedEntries.Remove(keyStr) + } } // Execute the delete query for snapshot validator entries. diff --git a/go.mod b/go.mod index 8c744c8..addb2e5 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/deso-protocol/state-consumer v1.0.3 github.com/golang/glog v1.2.2 github.com/google/uuid v1.6.0 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/pkg/errors v0.9.1 github.com/spf13/viper v1.19.0 github.com/uptrace/bun v1.2.3 @@ -58,7 +59,7 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/btcsuite/btcd/btcutil v1.1.6 // indirect github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect - github.com/btcsuite/btclog v0.0.0-20241003133417-09c4e92e319c // indirect + github.com/btcsuite/btclog v0.0.0-20241017175713-3428138b75c7 // indirect github.com/bwesterb/go-ristretto v1.2.3 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -79,7 +80,7 @@ require ( github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect github.com/ebitengine/purego v0.8.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/ethereum/go-ethereum v1.14.11 // indirect github.com/fatih/color v1.17.0 // indirect @@ -131,7 +132,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/montanaflynn/stats v0.7.1 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/nyaruka/phonenumbers v1.4.0 // indirect + github.com/nyaruka/phonenumbers v1.4.1 // indirect github.com/oleiade/lane v1.0.1 // indirect github.com/onflow/crypto v0.25.2 // indirect github.com/outcaste-io/ristretto v0.2.3 // indirect @@ -142,7 +143,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/puzpuzpuz/xsync/v3 v3.4.0 // indirect github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 // indirect - github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 // indirect + github.com/robinjoseph08/go-pg-migrations/v3 v3.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sagikazarmark/locafero v0.6.0 // indirect @@ -204,7 +205,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/grpc v1.67.1 // indirect - google.golang.org/grpc/stats/opentelemetry v0.0.0-20241016173057-569c8eb0af32 // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241017163036-56df169480cd // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 8f4ff64..cba3024 100644 --- a/go.sum +++ b/go.sum @@ -61,7 +61,6 @@ github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSC github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= @@ -88,8 +87,8 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtyd github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btclog v0.0.0-20241003133417-09c4e92e319c h1:4HxD1lBUGUddhzgaNgrCPsFWd7cGYNpeFUgd9ZIgyM0= -github.com/btcsuite/btclog v0.0.0-20241003133417-09c4e92e319c/go.mod h1:w7xnGOhwT3lmrS4H3b/D1XAXxvh+tbhUm8xeHN2y3TQ= +github.com/btcsuite/btclog v0.0.0-20241017175713-3428138b75c7 h1:Sy/7AwD/XuTsfXHMvcmjF8ZvAX0qR2TMcDbBANuMTR4= +github.com/btcsuite/btclog v0.0.0-20241017175713-3428138b75c7/go.mod h1:w7xnGOhwT3lmrS4H3b/D1XAXxvh+tbhUm8xeHN2y3TQ= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= @@ -116,7 +115,6 @@ github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8E github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -161,14 +159,13 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/ethereum/go-ethereum v1.14.11 h1:8nFDCUUE67rPc6AKxFj7JKaOa2W/W1Rse3oS6LvvxEY= github.com/ethereum/go-ethereum v1.14.11/go.mod h1:+l/fr42Mma+xBnhefL/+z11/hcmJ2egl+ScIVPjhc7E= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= @@ -185,7 +182,6 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gernest/mention v2.0.0+incompatible h1:pTXnujBC6tqlw5awDkLojq92TXbt0F+4+8FBlQC+di8= github.com/gernest/mention v2.0.0+incompatible/go.mod h1:/z3Hb+4gaPF+vL8og/lj6Au5j8hh5EfU7/EknmDUuO4= -github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373/go.mod h1:Dcsy1kii/xFyNad5JqY/d0GO5mu91sungp5xotbm3Yk= github.com/git-chglog/git-chglog v0.15.4 h1:BwPDj7AghQTfpXO+UxG4mZM5MUTe9wfDuenF3jpyNf0= github.com/git-chglog/git-chglog v0.15.4/go.mod h1:BmWdTpqBVzPjKNrBTZGcQCrQV9zq6gFKurhWNnJbYDA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -193,7 +189,6 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-pg/pg/v10 v10.5.0/go.mod h1:BfgPoQnD2wXNd986RYEHzikqv9iE875PrFaZ9vXvtNM= github.com/go-pg/pg/v10 v10.13.0 h1:xMagDE57VP8Y2KvIf9PvrsOAIjX62XqaKmfEzB0c5eU= github.com/go-pg/pg/v10 v10.13.0/go.mod h1:IXp9Ok9JNNW9yWedbQxxvKUv84XhoH5+tGd+68y+zDs= github.com/go-pg/zerochecker v0.2.0 h1:pp7f72c3DobMWOb2ErtZsnrPaSvHd2W4o9//8HtF4mU= @@ -215,9 +210,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -274,9 +267,10 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= @@ -284,7 +278,6 @@ github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/log15 v3.0.0-testing.5+incompatible/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= @@ -321,17 +314,13 @@ github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/goveralls v0.0.6/go.mod h1:h8b4ow6FxSPMQHF6o2ve3qsclnffZjYTNEKmLesRwqw= github.com/mattn/goveralls v0.0.12 h1:PEEeF0k1SsTjOBQ8FOmrOAoCu4ytuMaWCnWe94zxbCg= github.com/mattn/goveralls v0.0.12/go.mod h1:44ImGEUfmqH8bBtaMrYKsM65LXfNLWmwaxFGjZwgMSQ= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -347,12 +336,11 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nyaruka/phonenumbers v1.4.0 h1:ddhWiHnHCIX3n6ETDA58Zq5dkxkjlvgrDWM2OHHPCzU= -github.com/nyaruka/phonenumbers v1.4.0/go.mod h1:gv+CtldaFz+G3vHHnasBSirAi3O2XLqZzVWz4V1pl2E= +github.com/nyaruka/phonenumbers v1.4.1 h1:dNsiYGirahC2lMRz3p2dxmmyLbzD3arCgmj/hPEVRPY= +github.com/nyaruka/phonenumbers v1.4.1/go.mod h1:gv+CtldaFz+G3vHHnasBSirAi3O2XLqZzVWz4V1pl2E= github.com/oleiade/lane v1.0.1 h1:hXofkn7GEOubzTwNpeL9MaNy8WxolCYb9cInAIeqShU= github.com/oleiade/lane v1.0.1/go.mod h1:IyTkraa4maLfjq/GmHR+Dxb4kCMtEGeb+qmhlrQ5Mk4= github.com/onflow/crypto v0.25.2 h1:GjHunqVt+vPcdqhxxhAXiMIF3YiLX7gTuTR5O+VG2ns= @@ -361,14 +349,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= @@ -395,11 +381,10 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY= github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI= -github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 h1:0/H63lDsoNYVn5YmP6VLDEnnKkoVYiHx7udTWCK4BUI= -github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0/go.mod h1:nOkSFfwwDUBFnDDQqMRC2p4PDE7GZb/KSVqILVB3bmw= +github.com/robinjoseph08/go-pg-migrations/v3 v3.1.0 h1:EjexnDlSIZoK/gMfQmKIqB7tYsI+SS5hqxmXd63RLb4= +github.com/robinjoseph08/go-pg-migrations/v3 v3.1.0/go.mod h1:9yEG60N97UVFGD/UKQUXoGVZh/t8KXx3JxEpxhKFlKY= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= @@ -418,7 +403,6 @@ github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0 h1:Xuk8ma/ibJ1 github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0/go.mod h1:7AwjWCpdPhkSmNAgUv5C7EJ4AbmjEB3r047r3DXWu3Y= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -441,7 +425,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -484,17 +467,12 @@ github.com/uptrace/bun/extra/bunbig v1.2.3 h1:S0Nd2u/tNk1Nax8GNyF43vJOCtLpeWDpdp github.com/uptrace/bun/extra/bunbig v1.2.3/go.mod h1:1+LVar7Ras4JMvULZ0tLO8TNx1W/5LxrK9cS6g57F20= github.com/uptrace/bun/extra/bundebug v1.2.3 h1:2QBykz9/u4SkN9dnraImDcbrMk2fUhuq2gL6hkh9qSc= github.com/uptrace/bun/extra/bundebug v1.2.3/go.mod h1:bihsYJxXxWZXwc1R3qALTHvp+npE0ElgaCvcjzyPPdw= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/vmihailenco/bufpool v0.1.11 h1:gOq2WmBrq0i2yW5QJ16ykccQ4wH9UyEsgLm6czKAd94= github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ= -github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI= -github.com/vmihailenco/msgpack/v5 v5.0.0-beta.8/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo= github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= @@ -515,7 +493,6 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= @@ -534,13 +511,9 @@ go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= @@ -567,16 +540,11 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -598,7 +566,6 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180606202747-9527bec2660b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -612,8 +579,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201026173827-119d4633e4d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -656,7 +621,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -677,8 +641,6 @@ google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= @@ -695,8 +657,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20241016173057-569c8eb0af32 h1:jRUP0CSxgWbqgFsIjPx94M/QSGMbiu4rWEmGIgCbpjA= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20241016173057-569c8eb0af32/go.mod h1:xwT0YrcBcgR1ZSSLJtUgCjF5QlvTOhiwA/I9TcYf3Gg= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20241017163036-56df169480cd h1:02ssfqxi4Gh7gq74zPkpugKBpSew71uIxoc+lBwF+KI= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20241017163036-56df169480cd/go.mod h1:xwT0YrcBcgR1ZSSLJtUgCjF5QlvTOhiwA/I9TcYf3Gg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -710,18 +672,15 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/AlecAivazis/survey.v1 v1.8.7/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA= gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 h1:zSY6DDsFRMQDNQYKWCv/AEwJXoPpDf1FfMyw7I1B7M8= gopkg.in/DataDog/dd-trace-go.v1 v1.69.0/go.mod h1:U9AOeBHNAL95JXcd/SPf4a7O5GNeF/yD13sJtli/yaU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/kyokomi/emoji.v1 v1.5.1/go.mod h1:N9AZ6hi1jHOPn34PsbpufQZUcKftSD7WgS2pgpmH4Lg= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -738,7 +697,6 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= mellium.im/sasl v0.3.2 h1:PT6Xp7ccn9XaXAnJ03FcEjmAn7kK1x7aoXV6F+Vmrl0= mellium.im/sasl v0.3.2/go.mod h1:NKXDi1zkr+BlMHLQjY3ofYuU4KSPFxknb8mfEu6SveY= modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= diff --git a/handler/data_handler.go b/handler/data_handler.go index 58c3d9e..77b3bcf 100644 --- a/handler/data_handler.go +++ b/handler/data_handler.go @@ -8,13 +8,34 @@ import ( "fmt" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/postgres-data-handler/entries" + "github.com/deso-protocol/postgres-data-handler/migrations/explorer_migrations" + "github.com/deso-protocol/postgres-data-handler/migrations/explorer_view_migrations" + "github.com/deso-protocol/postgres-data-handler/migrations/initial_migrations" "github.com/deso-protocol/postgres-data-handler/migrations/post_sync_migrations" "github.com/deso-protocol/state-consumer/consumer" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" "github.com/uptrace/bun" + "strings" ) +const ( + // The name of the publication to use for the subscribed database. + SubscribedPublicationName = "pdh_publication" + // The name of the subscription to use for the subscribed database. + SubscribedSubscriptionName = "pdh_subscription" +) + +type PostgresDataHandlerConfig struct { + // Config for the main database. + DbConfig *DBConfig + // Config for the secondary database. + SubDbConfig *DBConfig + // Whether to calculate explorer stats. + CalculateExplorerStats bool +} + // PostgresDataHandler is a struct that implements the StateSyncerDataHandler interface. It is used by the // consumer to insert/delete entries into the postgres database. type PostgresDataHandler struct { @@ -25,6 +46,12 @@ type PostgresDataHandler struct { // Params is a struct containing the current blockchain parameters. // It is used to determine which prefix to use for public keys. Params *lib.DeSoParams + // A secondary database used for high-throughput operations. + SubscribedDB *bun.DB + // The config for the data handler. + Config *PostgresDataHandlerConfig + + CachedEntries *lru.Cache[string, []byte] } // HandleEntryBatch performs a bulk operation for a batch of entries, based on the encoder type. @@ -92,7 +119,7 @@ func (postgresDataHandler *PostgresDataHandler) HandleEntryBatch(batchedEntries case lib.EncoderTypeStakeEntry: err = entries.StakeBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeValidatorEntry: - err = entries.ValidatorBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + err = entries.ValidatorBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params, postgresDataHandler.CachedEntries) case lib.EncoderTypeLockedStakeEntry: err = entries.LockedStakeBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeLockedBalanceEntry: @@ -102,11 +129,11 @@ func (postgresDataHandler *PostgresDataHandler) HandleEntryBatch(batchedEntries case lib.EncoderTypeEpochEntry: err = entries.EpochEntryBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypePKID: - err = entries.PkidBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + err = entries.PkidBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params, postgresDataHandler.CachedEntries) case lib.EncoderTypeGlobalParamsEntry: err = entries.GlobalParamsBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeBLSPublicKeyPKIDPairEntry: - err = entries.BLSPublicKeyPKIDPairBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + err = entries.BLSPublicKeyPKIDPairBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params, postgresDataHandler.CachedEntries) case lib.EncoderTypeBlockNode: err = entries.BlockNodeOperation(batchedEntries, dbHandle, postgresDataHandler.Params) } @@ -152,18 +179,68 @@ func (postgresDataHandler *PostgresDataHandler) HandleSyncEvent(syncEvent consum } } - if err := RunMigrations(postgresDataHandler.DB, false, MigrationTypePostHypersync); err != nil { + ctx := CreateMigrationContext(context.Background(), postgresDataHandler.Config.DbConfig) + + if err := RunMigrations(postgresDataHandler.DB, post_sync_migrations.Migrations, ctx); err != nil { return fmt.Errorf("failed to run migrations: %w", err) } - fmt.Printf("Starting to refresh explorer statistics\n") - go post_sync_migrations.RefreshExplorerStatistics(postgresDataHandler.DB) - // Begin a new transaction, if one was being tracked previously. - if commitTxn { - err := postgresDataHandler.InitiateTransaction() - if err != nil { - return errors.Wrapf(err, "PostgresDataHandler.HandleSyncEvent: Error initiating transaction") + explorerDb := postgresDataHandler.DB + + // Setup the explorer views as well if those are enabled. + // If we have a subscribed database, run migrations on that as well. + if postgresDataHandler.SubscribedDB != nil { + if err := RunMigrations(postgresDataHandler.SubscribedDB, post_sync_migrations.Migrations, ctx); err != nil { + return fmt.Errorf("failed to run migrations: %w", err) + } + + // If we are calculating explorer stats, run the explorer migrations. + if postgresDataHandler.Config.CalculateExplorerStats { + if err := RunMigrations(postgresDataHandler.SubscribedDB, explorer_migrations.Migrations, ctx); err != nil { + return fmt.Errorf("failed to run migrations: %w", err) + } + + explorer_view_migrations.SetDBConfig(postgresDataHandler.Config.SubDbConfig.DBHost, postgresDataHandler.Config.SubDbConfig.DBPort, postgresDataHandler.Config.SubDbConfig.DBUsername, postgresDataHandler.Config.SubDbConfig.DBPassword, postgresDataHandler.Config.SubDbConfig.DBName) + if err := RunMigrations(postgresDataHandler.DB, explorer_view_migrations.Migrations, ctx); err != nil { + return fmt.Errorf("failed to run migrations: %w", err) + } + explorerDb = postgresDataHandler.SubscribedDB + } + + // Create the publication on the main db. + if err := CreatePublication(postgresDataHandler.DB, SubscribedPublicationName, []string{"transaction_type"}); err != nil { + return fmt.Errorf("failed to create publication: %w", err) } + + connectionString := fmt.Sprintf("host=%s port=%s dbname=%s user=%s password=%s", postgresDataHandler.Config.DbConfig.DBHost, postgresDataHandler.Config.DbConfig.DBPort, postgresDataHandler.Config.DbConfig.DBName, postgresDataHandler.Config.DbConfig.DBUsername, postgresDataHandler.Config.DbConfig.DBPassword) + // Create the subscription on the subscribed db. + if err := CreateSubscription(postgresDataHandler.SubscribedDB, SubscribedPublicationName, SubscribedSubscriptionName, connectionString, false); err != nil { + if strings.Contains(err.Error(), "already exists") { + err = RefreshSubscription(postgresDataHandler.SubscribedDB, SubscribedSubscriptionName) + if err != nil { + return fmt.Errorf("failed to refresh subscription: %v", err) + } + } else { + return fmt.Errorf("failed to create subscription: %v", err) + } + } + + // If we are running the explorer stats, but don't have a subscribed db, run the explorer migrations on the main db. + } else if postgresDataHandler.Config.CalculateExplorerStats { + if err := RunMigrations(postgresDataHandler.DB, explorer_migrations.Migrations, ctx); err != nil { + return fmt.Errorf("failed to run migrations: %w", err) + } + } + + if postgresDataHandler.Config.CalculateExplorerStats { + fmt.Printf("Starting to refresh explorer statistics\n") + go post_sync_migrations.RefreshExplorerStatistics(explorerDb, postgresDataHandler.SubscribedDB) + } + + // Begin a new transaction. + err := postgresDataHandler.InitiateTransaction() + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.HandleSyncEvent: Error initiating transaction") } // After hypersync, we don't need to maintain so many idle open connections. @@ -174,16 +251,33 @@ func (postgresDataHandler *PostgresDataHandler) HandleSyncEvent(syncEvent consum } func (postgresDataHandler *PostgresDataHandler) ResetAndMigrateDatabase() error { + if postgresDataHandler.Config.SubDbConfig.DBHost != "" { + // Drop the subscription to allow schema reset + if _, err := postgresDataHandler.SubscribedDB.Exec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s;", SubscribedSubscriptionName)); err != nil { + return fmt.Errorf("failed to drop subscription: %w", err) + } + + if _, err := postgresDataHandler.SubscribedDB.Exec("DROP SCHEMA public CASCADE; CREATE SCHEMA public;"); err != nil { + return fmt.Errorf("failed to reset schema: %w", err) + } + + ctx := CreateMigrationContext(context.Background(), postgresDataHandler.Config.SubDbConfig) + // Run migrations. + if err := RunMigrations(postgresDataHandler.SubscribedDB, initial_migrations.Migrations, ctx); err != nil { + return fmt.Errorf("failed to run migrations: %w", err) + } + } + // Drop and recreate the schema - essentially nuke the entire db. if _, err := postgresDataHandler.DB.Exec("DROP SCHEMA public CASCADE; CREATE SCHEMA public;"); err != nil { return fmt.Errorf("failed to reset schema: %w", err) } + ctx := CreateMigrationContext(context.Background(), postgresDataHandler.Config.DbConfig) // Run migrations. - if err := RunMigrations(postgresDataHandler.DB, false, MigrationTypeInitial); err != nil { + if err := RunMigrations(postgresDataHandler.DB, initial_migrations.Migrations, ctx); err != nil { return fmt.Errorf("failed to run migrations: %w", err) } - return nil } @@ -325,3 +419,130 @@ func generateSavepointName() (string, error) { // Convert the byte slice to a hexadecimal string return "savepoint_" + hex.EncodeToString(randomBytes), nil } + +// CreatePublication creates a publication with the given name. +func CreatePublication(db *bun.DB, publicationName string, excludeTables []string) error { + // Define tables to exclude by default + defaultExclusions := []string{"bun_migrations", "bun_migration_locks"} + excludeTables = append(excludeTables, defaultExclusions...) + + // Convert excludeTables to a format suitable for SQL query + exclusionList := "'" + strings.Join(excludeTables, "', '") + "'" + + // Query to get tables that are not in the exclusion list + var tables []string + query := fmt.Sprintf(` + SELECT table_name + FROM information_schema.tables + WHERE table_schema = 'public' + AND table_type = 'BASE TABLE' + AND table_name NOT IN (%s);`, exclusionList) + + if err := db.NewRaw(query).Scan(context.Background(), &tables); err != nil { + return errors.Wrap(err, "CreatePublication: Error retrieving tables") + } + + _, err := db.Exec(fmt.Sprintf("DROP PUBLICATION IF EXISTS %s;", publicationName)) + if err != nil { + return errors.Wrapf(err, "CreatePublication: Error dropping publication") + } + + // Construct the CREATE PUBLICATION command with the filtered table list + createPubQuery := fmt.Sprintf("CREATE PUBLICATION %s FOR TABLE %s;", publicationName, strings.Join(tables, ", ")) + _, err = db.Exec(createPubQuery) + if err != nil { + return errors.Wrapf(err, "CreatePublication: Error creating publication") + } + + return nil +} + +func CreateSubscription(db *bun.DB, publicationName string, subscriptionName string, connectionString string, reuseSlot bool) error { + var query string + if reuseSlot { + query = fmt.Sprintf("CREATE SUBSCRIPTION %s CONNECTION '%s' PUBLICATION %s WITH (slot_name = '%s', create_slot = false);", + subscriptionName, connectionString, publicationName, subscriptionName) + } else { + query = fmt.Sprintf("CREATE SUBSCRIPTION %s CONNECTION '%s' PUBLICATION %s;", + subscriptionName, connectionString, publicationName) + } + + _, err := db.Exec(query) + if err != nil { + return errors.Wrapf(err, "CreateSubscription: Error creating subscription") + } + + return nil +} + +func RefreshSubscription(db *bun.DB, subscriptionName string) error { + _, err := db.Exec(fmt.Sprintf("ALTER SUBSCRIPTION %s REFRESH PUBLICATION;", subscriptionName)) + if err != nil { + return errors.Wrapf(err, "RefreshSubscription: Error refreshing subscription") + } + return nil +} + +func SyncPublicationSubscription(publisherDB *bun.DB, subscriberDB *bun.DB, publicationName string, subscriptionName string, connectionString string, subDbConfig *DBConfig) error { + // Check if the publication exists on the publisher database + var publicationExists bool + query := "SELECT EXISTS (SELECT 1 FROM pg_publication WHERE pubname = ?);" + err := publisherDB.NewRaw(query, publicationName).Scan(context.Background(), &publicationExists) + if err != nil { + return errors.Wrapf(err, "Error checking publication %s", publicationName) + } + + if !publicationExists { + return nil + } + + // Run migrations on the subscriber db. + ctx := CreateMigrationContext(context.Background(), subDbConfig) + // Run post sync migrations immediately after setting up the sub db. + err = RunMigrations(subscriberDB, initial_migrations.Migrations, ctx) + if err != nil { + return errors.Wrapf(err, "Error running migrations on subscriber database") + } + err = RunMigrations(subscriberDB, post_sync_migrations.Migrations, ctx) + if err != nil { + return errors.Wrapf(err, "Error running post sync migrations on subscriber database") + } + + // Check if the subscription exists on the subscriber database + var subscriptionExists bool + query = "SELECT EXISTS (SELECT 1 FROM pg_subscription WHERE subname = ?);" + err = subscriberDB.NewRaw(query, subscriptionName).Scan(context.Background(), &subscriptionExists) + if err != nil { + return errors.Wrapf(err, "Error checking subscription %s", subscriptionName) + } + + // If the subscription exists, refresh it + if subscriptionExists { + err = RefreshSubscription(subscriberDB, subscriptionName) + if err != nil { + return errors.Wrapf(err, "Error refreshing subscription %s", subscriptionName) + } + fmt.Printf("Subscription %s refreshed successfully\n", subscriptionName) + } else { + // Check if the replication slot already exists on the publisher database + var replicationSlotExists bool + query = "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = ?);" + err = publisherDB.NewRaw(query, subscriptionName).Scan(context.Background(), &replicationSlotExists) + if err != nil { + return errors.Wrapf(err, "Error checking replication slot %s", subscriptionName) + } + + if replicationSlotExists { + fmt.Printf("Replication slot %s already exists on the publisher, reusing it\n", subscriptionName) + } + + // Create the subscription, even if the replication slot exists, it should reuse it + err = CreateSubscription(subscriberDB, publicationName, subscriptionName, connectionString, replicationSlotExists) + if err != nil { + return errors.Wrapf(err, "Error creating subscription %s", subscriptionName) + } + fmt.Printf("Subscription %s created successfully\n", subscriptionName) + } + + return nil +} diff --git a/handler/db_utils.go b/handler/db_utils.go index 4ee8ed3..3f8ea96 100644 --- a/handler/db_utils.go +++ b/handler/db_utils.go @@ -2,34 +2,35 @@ package handler import ( "context" + "database/sql" + "fmt" "github.com/deso-protocol/postgres-data-handler/migrations/initial_migrations" "github.com/deso-protocol/postgres-data-handler/migrations/post_sync_migrations" "github.com/golang/glog" "github.com/uptrace/bun" + "github.com/uptrace/bun/dialect/pgdialect" + "github.com/uptrace/bun/driver/pgdriver" + "github.com/uptrace/bun/extra/bundebug" "github.com/uptrace/bun/migrate" ) type MigrationType uint8 const ( - // We intentionally skip zero as otherwise that would be the default value. - MigrationTypeInitial MigrationType = 0 - MigrationTypePostHypersync MigrationType = 1 + MigrationContextKey = "migration_context" +) +const ( + EntryCacheSize uint = 1000000 // 1M entries ) -// TODO: Make this a method on the PostgresDataHandler struct. -func RunMigrations(db *bun.DB, reset bool, migrationType MigrationType) error { - ctx := context.Background() + +func RunMigrations(db *bun.DB, migrations *migrate.Migrations, ctx context.Context) error { var migrator *migrate.Migrator - initialMigrator := migrate.NewMigrator(db, initial_migrations.Migrations) - postSyncMigrator := migrate.NewMigrator(db, post_sync_migrations.Migrations) + // Make sure we don't mark a migration as successful if it fails. + migrationOpt := migrate.WithMarkAppliedOnSuccess(true) + migrator = migrate.NewMigrator(db, migrations, migrationOpt) - if migrationType == MigrationTypeInitial { - migrator = initialMigrator - } else if migrationType == MigrationTypePostHypersync { - migrator = postSyncMigrator - } if err := AcquireAdvisoryLock(db); err != nil { return err } @@ -42,17 +43,6 @@ func RunMigrations(db *bun.DB, reset bool, migrationType MigrationType) error { glog.Fatal(err) } - // If resetting, revert all migrations, starting with the most recently applied. - if reset { - if err := RollbackAllMigrations(postSyncMigrator, ctx); err != nil { - return err - } - - if err := RollbackAllMigrations(initialMigrator, ctx); err != nil { - return err - } - } - group, err := migrator.Migrate(ctx) if err != nil { return err @@ -76,3 +66,56 @@ func RollbackAllMigrations(migrator *migrate.Migrator, ctx context.Context) erro } return nil } + +type DBConfig struct { + DBHost string + DBPort string + DBUsername string + DBPassword string + DBName string +} + +func SetupDb(dbConfig *DBConfig, threadLimit int, logQueries bool, readonlyUserPassword string, calculateExplorerStatistics bool) (*bun.DB, error) { + pgURI := PGUriFromDbConfig(dbConfig) + // Open a PostgreSQL database. + pgdb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(pgURI))) + if pgdb == nil { + glog.Fatalf("Error connecting to postgres db at URI: %v", pgURI) + } + + // Create a Bun db on top of postgres for querying. + db := bun.NewDB(pgdb, pgdialect.New()) + + db.SetConnMaxLifetime(0) + + db.SetMaxIdleConns(threadLimit * 2) + + //Print all queries to stdout for debugging. + if logQueries { + db.AddQueryHook(bundebug.NewQueryHook(bundebug.WithVerbose(true))) + } + + // Set the readonly user password for the initial migrations. + initial_migrations.SetQueryUserPassword(readonlyUserPassword) + + post_sync_migrations.SetCalculateExplorerStatistics(calculateExplorerStatistics) + + ctx := CreateMigrationContext(context.Background(), dbConfig) + // Apply db migrations. + err := RunMigrations(db, initial_migrations.Migrations, ctx) + if err != nil { + return nil, err + } + return db, nil +} + +func CreateMigrationContext(ctx context.Context, config *DBConfig) context.Context { + if config != nil { + ctx = context.WithValue(ctx, MigrationContextKey, config) + } + return ctx +} + +func PGUriFromDbConfig(config *DBConfig) string { + return fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=disable&timeout=18000s", config.DBUsername, config.DBPassword, config.DBHost, config.DBPort, config.DBName) +} diff --git a/main.go b/main.go index 6d5d1b3..f830689 100644 --- a/main.go +++ b/main.go @@ -1,20 +1,15 @@ package main import ( - "database/sql" "flag" "fmt" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/postgres-data-handler/handler" - "github.com/deso-protocol/postgres-data-handler/migrations/initial_migrations" - "github.com/deso-protocol/postgres-data-handler/migrations/post_sync_migrations" "github.com/deso-protocol/state-consumer/consumer" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/spf13/viper" "github.com/uptrace/bun" - "github.com/uptrace/bun/dialect/pgdialect" - "github.com/uptrace/bun/driver/pgdriver" - "github.com/uptrace/bun/extra/bundebug" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "gopkg.in/DataDog/dd-trace-go.v1/profiler" ) @@ -22,8 +17,8 @@ import ( func main() { // Initialize flags and get config values. setupFlags() - pgURI, stateChangeDir, consumerProgressDir, batchBytes, threadLimit, logQueries, readOnlyUserPassword, - explorerStatistics, datadogProfiler, isTestnet, isRegtest, isAcceleratedRegtest, syncMempool := getConfigValues() + stateChangeDir, consumerProgressDir, batchBytes, threadLimit, logQueries, readOnlyUserPassword, + explorerStatistics, datadogProfiler, isTestnet, isRegtest, isAcceleratedRegtest, syncMempool, dbConfig, subDbConfig := getConfigValues() // Print all the config values in a single printf call broken up // with newlines and make it look pretty both printed out and in code @@ -47,11 +42,20 @@ func main() { logQueries, explorerStatistics, datadogProfiler, isTestnet) // Initialize the DB. - db, err := setupDb(pgURI, threadLimit, logQueries, readOnlyUserPassword, explorerStatistics) + db, err := handler.SetupDb(dbConfig, threadLimit, logQueries, readOnlyUserPassword, explorerStatistics) if err != nil { glog.Fatalf("Error setting up DB: %v", err) } + var subDb *bun.DB + + if subDbConfig != nil { + subDb, err = handler.SetupDb(subDbConfig, threadLimit, logQueries, readOnlyUserPassword, explorerStatistics) + if err != nil { + glog.Fatalf("Error setting up DB: %v", err) + } + } + // Setup profiler if enabled. if datadogProfiler { tracer.Start() @@ -70,6 +74,26 @@ func main() { } lib.GlobalDeSoParams = *params + pdhConfig := &handler.PostgresDataHandlerConfig{ + DbConfig: dbConfig, + SubDbConfig: subDbConfig, + CalculateExplorerStats: explorerStatistics, + } + + cachedEntries, err := lru.New[string, []byte](int(handler.EntryCacheSize)) + if err != nil { + glog.Fatalf("Error creating LRU cache: %v", err) + } + + if subDbConfig != nil { + connectionString := fmt.Sprintf("host=%s port=%s dbname=%s user=%s password=%s", pdhConfig.DbConfig.DBHost, pdhConfig.DbConfig.DBPort, pdhConfig.DbConfig.DBName, pdhConfig.DbConfig.DBUsername, pdhConfig.DbConfig.DBPassword) + + err = handler.SyncPublicationSubscription(db, subDb, handler.SubscribedPublicationName, handler.SubscribedSubscriptionName, connectionString, subDbConfig) + if err != nil { + glog.Fatalf("Error syncing publication and subscription: %v", err) + } + } + // Initialize and run a state syncer consumer. stateSyncerConsumer := &consumer.StateSyncerConsumer{} err = stateSyncerConsumer.InitializeAndRun( @@ -79,8 +103,11 @@ func main() { threadLimit, syncMempool, &handler.PostgresDataHandler{ - DB: db, - Params: params, + DB: db, + SubscribedDB: subDb, + Params: params, + Config: pdhConfig, + CachedEntries: cachedEntries, }, ) if err != nil { @@ -101,14 +128,35 @@ func setupFlags() { viper.AutomaticEnv() } -func getConfigValues() (pgURI string, stateChangeDir string, consumerProgressDir string, batchBytes uint64, threadLimit int, logQueries bool, readonlyUserPassword string, explorerStatistics bool, datadogProfiler bool, isTestnet bool, isRegtest bool, isAcceleratedRegtest bool, syncMempool bool) { +func getConfigValues() (stateChangeDir string, consumerProgressDir string, batchBytes uint64, threadLimit int, logQueries bool, readonlyUserPassword string, explorerStatistics bool, datadogProfiler bool, isTestnet bool, isRegtest bool, isAcceleratedRegtest bool, syncMempool bool, dbConfig *handler.DBConfig, subDbConfig *handler.DBConfig) { dbHost := viper.GetString("DB_HOST") dbPort := viper.GetString("DB_PORT") dbUsername := viper.GetString("DB_USERNAME") dbPassword := viper.GetString("DB_PASSWORD") - pgURI = fmt.Sprintf("postgres://%s:%s@%s:%s/postgres?sslmode=disable&timeout=18000s", dbUsername, dbPassword, dbHost, dbPort) + dbConfig = &handler.DBConfig{ + DBHost: dbHost, + DBPort: dbPort, + DBUsername: dbUsername, + DBPassword: dbPassword, + DBName: "postgres", + } + + subDbHost := viper.GetString("SUB_DB_HOST") + subDbPort := viper.GetString("SUB_DB_PORT") + subDbUsername := viper.GetString("SUB_DB_USERNAME") + subDbPassword := viper.GetString("SUB_DB_PASSWORD") + + if subDbHost != "" { + subDbConfig = &handler.DBConfig{ + DBHost: subDbHost, + DBPort: subDbPort, + DBUsername: subDbUsername, + DBPassword: subDbPassword, + DBName: "postgres", + } + } stateChangeDir = viper.GetString("STATE_CHANGE_DIR") if stateChangeDir == "" { @@ -142,37 +190,5 @@ func getConfigValues() (pgURI string, stateChangeDir string, consumerProgressDir isRegtest = viper.GetBool("REGTEST") isAcceleratedRegtest = viper.GetBool("ACCELERATED_REGTEST") - return pgURI, stateChangeDir, consumerProgressDir, batchBytes, threadLimit, logQueries, readonlyUserPassword, explorerStatistics, datadogProfiler, isTestnet, isRegtest, isAcceleratedRegtest, syncMempool -} - -func setupDb(pgURI string, threadLimit int, logQueries bool, readonlyUserPassword string, calculateExplorerStatistics bool) (*bun.DB, error) { - // Open a PostgreSQL database. - pgdb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(pgURI))) - if pgdb == nil { - glog.Fatalf("Error connecting to postgres db at URI: %v", pgURI) - } - - // Create a Bun db on top of postgres for querying. - db := bun.NewDB(pgdb, pgdialect.New()) - - db.SetConnMaxLifetime(0) - - db.SetMaxIdleConns(threadLimit * 2) - - //Print all queries to stdout for debugging. - if logQueries { - db.AddQueryHook(bundebug.NewQueryHook(bundebug.WithVerbose(true))) - } - - // Set the readonly user password for the initial migrations. - initial_migrations.SetQueryUserPassword(readonlyUserPassword) - - post_sync_migrations.SetCalculateExplorerStatistics(calculateExplorerStatistics) - - // Apply db migrations. - err := handler.RunMigrations(db, false, handler.MigrationTypeInitial) - if err != nil { - return nil, err - } - return db, nil + return stateChangeDir, consumerProgressDir, batchBytes, threadLimit, logQueries, readonlyUserPassword, explorerStatistics, datadogProfiler, isTestnet, isRegtest, isAcceleratedRegtest, syncMempool, dbConfig, subDbConfig } diff --git a/migrations/post_sync_migrations/20230713000002_create_statistic_views.go b/migrations/explorer_migrations/20230713000002_create_statistic_views.go similarity index 99% rename from migrations/post_sync_migrations/20230713000002_create_statistic_views.go rename to migrations/explorer_migrations/20230713000002_create_statistic_views.go index 6f60b35..f40e681 100644 --- a/migrations/post_sync_migrations/20230713000002_create_statistic_views.go +++ b/migrations/explorer_migrations/20230713000002_create_statistic_views.go @@ -1,17 +1,12 @@ -package post_sync_migrations +package explorer_migrations import ( "context" "github.com/uptrace/bun" ) -// TODO: revisit access group relationships when we refactor the messaging app to use the graphql API. func init() { Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } - err := RunMigrationWithRetries(db, ` CREATE TABLE public_key_first_transaction ( public_key VARCHAR PRIMARY KEY , @@ -68,7 +63,7 @@ func init() { count_value bigint; padded_transaction_type varchar; BEGIN - IF transaction_type < 1 OR transaction_type > 33 THEN + IF transaction_type < 1 OR transaction_type > 44 THEN RAISE EXCEPTION '% is not a valid transaction type', transaction_type; END IF; @@ -1241,9 +1236,6 @@ func init() { return nil }, func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` DROP FUNCTION IF EXISTS refresh_statistic_views; DROP VIEW IF EXISTS statistic_dashboard; diff --git a/migrations/explorer_migrations/20230714000011_create_postgraphile_fk_comments.go b/migrations/explorer_migrations/20230714000011_create_postgraphile_fk_comments.go new file mode 100644 index 0000000..6675c7d --- /dev/null +++ b/migrations/explorer_migrations/20230714000011_create_postgraphile_fk_comments.go @@ -0,0 +1,155 @@ +package explorer_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on materialized view statistic_txn_count_all is E'@omit'; + comment on materialized view statistic_txn_count_30_d is E'@omit'; + comment on materialized view statistic_wallet_count_all is E'@omit'; + comment on materialized view statistic_new_wallet_count_30_d is E'@omit'; + comment on materialized view statistic_active_wallet_count_30_d is E'@omit'; + comment on materialized view statistic_block_height_current is E'@omit'; + comment on materialized view statistic_txn_count_pending is E'@omit'; + comment on materialized view statistic_txn_fee_1_d is E'@omit'; + comment on materialized view statistic_total_supply is E'@omit'; + comment on materialized view statistic_post_count is E'@omit'; + comment on materialized view statistic_post_longform_count is E'@omit'; + comment on materialized view statistic_comment_count is E'@omit'; + comment on materialized view statistic_repost_count is E'@omit'; + comment on materialized view statistic_txn_count_creator_coin is E'@omit'; + comment on materialized view statistic_txn_count_nft is E'@omit'; + comment on materialized view statistic_txn_count_dex is E'@omit'; + comment on materialized view statistic_txn_count_social is E'@omit'; + comment on materialized view statistic_follow_count is E'@omit'; + comment on materialized view statistic_message_count is E'@omit'; + comment on materialized view statistic_social_leaderboard_likes is E'@omit'; + comment on materialized view statistic_social_leaderboard_reactions is E'@omit'; + comment on materialized view statistic_social_leaderboard_diamonds is E'@omit'; + comment on materialized view statistic_social_leaderboard_reposts is E'@omit'; + comment on materialized view statistic_social_leaderboard_comments is E'@omit'; + comment on table public_key_first_transaction IS E'@omit'; + comment on function get_transaction_count is E'@omit'; + comment on function refresh_public_key_first_transaction is E'@omit'; + comment on view statistic_dashboard is E'@name dashboardStat'; + comment on materialized view statistic_social_leaderboard is E'@name socialLeaderboardStat'; + comment on materialized view statistic_nft_leaderboard is E'@name nftLeaderboardStat'; + comment on materialized view statistic_defi_leaderboard is E'@name defiLeaderboardStat'; + comment on materialized view statistic_txn_count_monthly is E'@name monthlyTxnCountStat'; + comment on materialized view statistic_wallet_count_monthly is E'@name monthlyNewWalletCountStat'; + comment on materialized view statistic_txn_count_daily is E'@name dailyTxnCountStat'; + comment on materialized view statistic_new_wallet_count_daily is E'@name dailyNewWalletCountStat'; + comment on materialized view statistic_active_wallet_count_daily is E'@name dailyActiveWalletCountStat'; + comment on materialized view statistic_profile_transactions is E'@name profileTransactionStat\n@unique public_key\n@foreignKey (public_key) references account (public_key)|@foreignFieldName transactionStats|@fieldName account'; + comment on materialized view statistic_profile_top_nft_owners is E'@name profileNftTopOwners'; + comment on function hex_to_numeric is E'@omit'; + comment on function int_to_bytea is E'@omit'; + comment on function cc_nanos_total_sell_value is E'@omit'; + comment on view dao_coin_limit_order_max_bids is E'@omit'; + comment on view dao_coin_limit_order_min_asks is E'@omit'; + comment on view dao_coin_limit_order_bid_asks is E'@unique selling_creator_pkid,buying_creator_pkid\n@foreignKey (selling_creator_pkid) references account (pkid)|@foreignFieldName bidAskAsSellingToken|@fieldName sellingTokenAccount\n@foreignKey (buying_creator_pkid) references account (pkid)|@foreignFieldName bidAskAsBuyingToken|@fieldName buyingTokenAccount\n@name deso_token_limit_order_bid_asks'; + comment on materialized view statistic_cc_balance_totals is E'@omit'; + comment on materialized view statistic_nft_balance_totals is E'@omit'; + comment on materialized view statistic_deso_token_balance_totals is E'@omit'; + comment on materialized view statistic_portfolio_value is E'@name profilePortfolioValueStat\n@unique public_key\n@omit all'; + comment on materialized view statistic_profile_cc_royalties is E'@omit'; + comment on materialized view statistic_profile_diamond_earnings is E'@omit'; + comment on materialized view statistic_profile_nft_bid_royalty_earnings is E'@omit'; + comment on materialized view statistic_profile_nft_buy_now_royalty_earnings is E'@omit'; + comment on materialized view statistic_profile_earnings is E'@name profileEarningsStats\n@unique public_key\n@omit all'; + comment on materialized view statistic_profile_deso_token_buy_orders is E'@omit'; + comment on materialized view statistic_profile_deso_token_sell_orders is E'@omit'; + comment on materialized view statistic_profile_diamonds_given is E'@omit'; + comment on materialized view statistic_profile_diamonds_received is E'@omit'; + comment on materialized view statistic_profile_cc_buyers is E'@omit'; + comment on materialized view statistic_profile_cc_sellers is E'@omit'; + comment on materialized view statistic_profile_nft_bid_buys is E'@omit'; + comment on materialized view statistic_profile_nft_bid_sales is E'@omit'; + comment on materialized view statistic_profile_nft_buy_now_buys is E'@omit'; + comment on materialized view statistic_profile_nft_buy_now_sales is E'@omit'; + comment on materialized view statistic_profile_deso_token_buy_orders is E'@omit'; + comment on materialized view statistic_profile_deso_token_sell_orders is E'@omit'; + comment on materialized view statistic_profile_earnings_breakdown_counts is E'@name profileEarningsBreakdownStats\n@unique public_key\n@omit all'; + comment on function jsonb_to_bytea is E'@omit'; + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on materialized view statistic_txn_count_all is NULL; + comment on materialized view statistic_txn_count_30_d is NULL; + comment on materialized view statistic_wallet_count_all is NULL; + comment on materialized view statistic_new_wallet_count_30_d is NULL; + comment on materialized view statistic_active_wallet_count_30_d is NULL; + comment on materialized view statistic_block_height_current is NULL; + comment on materialized view statistic_txn_count_pending is NULL; + comment on materialized view statistic_txn_fee_1_d is NULL; + comment on materialized view statistic_total_supply is NULL; + comment on materialized view statistic_post_count is NULL; + comment on materialized view statistic_post_longform_count is NULL; + comment on materialized view statistic_comment_count is NULL; + comment on materialized view statistic_repost_count is NULL; + comment on materialized view statistic_txn_count_creator_coin is NULL; + comment on materialized view statistic_txn_count_nft is NULL; + comment on materialized view statistic_txn_count_dex is NULL; + comment on materialized view statistic_txn_count_social is NULL; + comment on materialized view statistic_follow_count is NULL; + comment on materialized view statistic_message_count is NULL; + comment on materialized view statistic_social_leaderboard_likes is NULL; + comment on materialized view statistic_social_leaderboard_reactions is NULL; + comment on materialized view statistic_social_leaderboard_diamonds is NULL; + comment on materialized view statistic_social_leaderboard_reposts is NULL; + comment on materialized view statistic_social_leaderboard_comments is NULL; + comment on table public_key_first_transaction IS NULL; + comment on function get_transaction_count is NULL; + comment on function refresh_public_key_first_transaction is NULL; + comment on view statistic_dashboard is NULL; + comment on materialized view statistic_social_leaderboard is NULL; + comment on materialized view statistic_nft_leaderboard is NULL; + comment on materialized view statistic_defi_leaderboard is NULL; + comment on materialized view statistic_txn_count_monthly is NULL; + comment on materialized view statistic_wallet_count_monthly is NULL; + comment on materialized view statistic_wallet_count_monthly is NULL; + comment on materialized view statistic_txn_count_daily is NULL; + comment on materialized view statistic_new_wallet_count_daily is NULL; + comment on materialized view statistic_active_wallet_count_daily is NULL; + comment on materialized view statistic_profile_transactions is NULL; + comment on materialized view statistic_profile_top_nft_owners is NULL; + comment on function cc_nanos_total_sell_value is NULL; + comment on view dao_coin_limit_order_max_bids is NULL; + comment on view dao_coin_limit_order_min_asks is NULL; + comment on view dao_coin_limit_order_bid_asks is NULL; + comment on materialized view statistic_cc_balance_totals is NULL; + comment on materialized view statistic_nft_balance_totals is NULL; + comment on materialized view statistic_deso_token_balance_totals is NULL; + comment on materialized view statistic_portfolio_value is NULL; + comment on materialized view statistic_profile_deso_token_buy_orders is NULL; + comment on materialized view statistic_profile_deso_token_sell_orders is NULL; + comment on materialized view statistic_profile_diamonds_given is NULL; + comment on materialized view statistic_profile_diamonds_received is NULL; + comment on materialized view statistic_profile_cc_buyers is NULL; + comment on materialized view statistic_profile_cc_sellers is NULL; + comment on materialized view statistic_profile_nft_bid_buys is NULL; + comment on materialized view statistic_profile_nft_bid_sales is NULL; + comment on materialized view statistic_profile_nft_buy_now_buys is NULL; + comment on materialized view statistic_profile_nft_buy_now_sales is NULL; + comment on materialized view statistic_profile_deso_token_buy_orders is NULL; + comment on materialized view statistic_profile_deso_token_sell_orders is NULL; + comment on materialized view statistic_profile_earnings_breakdown_counts is NULL; + comment on function jsonb_to_bytea is NULL; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/explorer_migrations/20240260000022_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go b/migrations/explorer_migrations/20240260000022_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go new file mode 100644 index 0000000..c7d12be --- /dev/null +++ b/migrations/explorer_migrations/20240260000022_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go @@ -0,0 +1,90 @@ +package explorer_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` +CREATE MATERIALIZED VIEW my_stake_summary as +select coalesce(total_stake_rewards.staker_pkid, total_stake_amount.staker_pkid) as staker_pkid, + total_stake_rewards.total_rewards as total_stake_rewards, + total_stake_amount.total_stake as total_stake +from (select staker_pkid, sum(reward_nanos) total_rewards + from stake_reward + group by staker_pkid) total_stake_rewards + full outer join + (select staker_pkid, sum(stake_amount_nanos) total_stake + from stake_entry + group by staker_pkid) total_stake_amount + on total_stake_amount.staker_pkid = total_stake_rewards.staker_pkid; + +CREATE UNIQUE INDEX my_stake_summary_unique_index ON my_stake_summary (staker_pkid); + +CREATE MATERIALIZED VIEW staking_summary as +select * +from (select sum(total_stake_amount_nanos) as global_stake_amount_nanos, + count(distinct validator_pkid) as num_validators + from validator_entry) validator_summary, + (select max(epoch_number) current_epoch_number from epoch_entry) current_epoch, + (select count(distinct snapshot_at_epoch_number) num_epochs_in_leader_schedule + from leader_schedule_entry) num_epochs_in_leader_schedule, + (select count(distinct staker_pkid) as num_stakers from stake_entry) staker_summary; + +CREATE UNIQUE INDEX staking_summary_unique_index ON staking_summary (global_stake_amount_nanos, num_validators, current_epoch_number, num_epochs_in_leader_schedule); + +CREATE MATERIALIZED VIEW validator_stats as +select validator_entry.validator_pkid, + rank() OVER ( order by validator_entry.total_stake_amount_nanos) as validator_rank, + validator_entry.total_stake_amount_nanos::float / + staking_summary.global_stake_amount_nanos::float as percent_total_stake, + coalesce(time_in_jail, 0) + + (case + when jailed_at_epoch_number = 0 then 0 + else (staking_summary.current_epoch_number - jailed_at_epoch_number) END) epochs_in_jail, + coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0) num_epochs_in_leader_schedule, + coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0)::float / + staking_summary.num_epochs_in_leader_schedule::float as percent_epochs_in_leader_schedule, + coalesce(total_rewards, 0) as total_stake_reward_nanos +from staking_summary, + validator_entry + left join (select validator_pkid, sum(jhe.unjailed_at_epoch_number - jhe.jailed_at_epoch_number) time_in_jail + from jailed_history_event jhe + group by validator_pkid) jhe + on jhe.validator_pkid = validator_entry.validator_pkid + left join (select validator_pkid, count(*) as num_epochs_in_leader_schedule + from leader_schedule_entry + group by validator_pkid) leader_schedule_summary + on leader_schedule_summary.validator_pkid = validator_entry.validator_pkid + left join (select validator_pkid, sum(reward_nanos) as total_rewards + from stake_reward + group by validator_pkid) as total_stake_rewards + on total_stake_rewards.validator_pkid = validator_entry.validator_pkid; + +CREATE UNIQUE INDEX validator_stats_unique_index ON validator_stats (validator_pkid); + + comment on materialized view validator_stats is E'@unique validator_pkid\n@foreignKey (validator_pkid) references validator_entry (validator_pkid)|@foreignFieldName validatorStats|@fieldName validatorEntry'; + comment on materialized view my_stake_summary is E'@unique staker_pkid\n@foreignKey (staker_pkid) references account (pkid)|@foreignFieldName myStakeSummary|@fieldName staker'; + +`) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP MATERIALIZED VIEW IF EXISTS validator_stats CASCADE; + DROP MATERIALIZED VIEW IF EXISTS staking_summary CASCADE; + DROP MATERIALIZED VIEW IF EXISTS my_stake_summary CASCADE; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240301000001_update_validator_stats_view.go b/migrations/explorer_migrations/20240301000001_update_validator_stats_view.go similarity index 97% rename from migrations/post_sync_migrations/20240301000001_update_validator_stats_view.go rename to migrations/explorer_migrations/20240301000001_update_validator_stats_view.go index 80737ee..1cb0a52 100644 --- a/migrations/post_sync_migrations/20240301000001_update_validator_stats_view.go +++ b/migrations/explorer_migrations/20240301000001_update_validator_stats_view.go @@ -1,4 +1,4 @@ -package post_sync_migrations +package explorer_migrations import ( "context" @@ -8,9 +8,6 @@ import ( func init() { Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` DROP MATERIALIZED VIEW IF EXISTS validator_stats; create materialized view validator_stats as @@ -52,9 +49,6 @@ CREATE UNIQUE INDEX validator_stats_unique_index ON validator_stats (validator_p return nil }, func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` DROP MATERIALIZED VIEW IF EXISTS validator_stats CASCADE; CREATE MATERIALIZED VIEW validator_stats as diff --git a/migrations/post_sync_migrations/20240419000001_update_statistc_txn_count_all.go b/migrations/explorer_migrations/20240419000001_update_statistc_txn_count_all.go similarity index 97% rename from migrations/post_sync_migrations/20240419000001_update_statistc_txn_count_all.go rename to migrations/explorer_migrations/20240419000001_update_statistc_txn_count_all.go index 98d3974..859b7d1 100644 --- a/migrations/post_sync_migrations/20240419000001_update_statistc_txn_count_all.go +++ b/migrations/explorer_migrations/20240419000001_update_statistc_txn_count_all.go @@ -1,4 +1,4 @@ -package post_sync_migrations +package explorer_migrations import ( "context" @@ -9,10 +9,6 @@ import ( func init() { Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } - err := RunMigrationWithRetries(db, fmt.Sprintf(` DROP MATERIALIZED VIEW IF EXISTS statistic_txn_count_all CASCADE; CREATE MATERIALIZED VIEW statistic_txn_count_all AS diff --git a/migrations/post_sync_migrations/20240809000000_create_stake_rewards_by_validator_and_staker_view.go b/migrations/explorer_migrations/20240809000000_create_stake_rewards_by_validator_and_staker_view.go similarity index 89% rename from migrations/post_sync_migrations/20240809000000_create_stake_rewards_by_validator_and_staker_view.go rename to migrations/explorer_migrations/20240809000000_create_stake_rewards_by_validator_and_staker_view.go index cde2128..602dff4 100644 --- a/migrations/post_sync_migrations/20240809000000_create_stake_rewards_by_validator_and_staker_view.go +++ b/migrations/explorer_migrations/20240809000000_create_stake_rewards_by_validator_and_staker_view.go @@ -1,4 +1,4 @@ -package post_sync_migrations +package explorer_migrations import ( "context" @@ -8,9 +8,6 @@ import ( func init() { Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` CREATE OR REPLACE VIEW stake_rewards_by_validator_and_staker AS SELECT @@ -27,9 +24,6 @@ func init() { return nil }, func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` DROP VIEW IF EXISTS stake_rewards_by_validator_and_staker CASCADE; `) diff --git a/migrations/post_sync_migrations/20240811000000_update_staking_summary_unique_idx.go b/migrations/explorer_migrations/20240811000000_update_staking_summary_unique_idx.go similarity index 98% rename from migrations/post_sync_migrations/20240811000000_update_staking_summary_unique_idx.go rename to migrations/explorer_migrations/20240811000000_update_staking_summary_unique_idx.go index dd339d9..82cafa5 100644 --- a/migrations/post_sync_migrations/20240811000000_update_staking_summary_unique_idx.go +++ b/migrations/explorer_migrations/20240811000000_update_staking_summary_unique_idx.go @@ -1,4 +1,4 @@ -package post_sync_migrations +package explorer_migrations import ( "context" @@ -8,9 +8,6 @@ import ( func init() { Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` drop materialized view validator_stats; drop materialized view staking_summary; @@ -86,9 +83,6 @@ func init() { return nil }, func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` drop materialized view validator_stats; drop materialized view staking_summary; diff --git a/migrations/post_sync_migrations/20240917000000_profile_transactions_idx.go b/migrations/explorer_migrations/20240917000000_profile_transactions_idx.go similarity index 80% rename from migrations/post_sync_migrations/20240917000000_profile_transactions_idx.go rename to migrations/explorer_migrations/20240917000000_profile_transactions_idx.go index 2607bf3..96c28d2 100644 --- a/migrations/post_sync_migrations/20240917000000_profile_transactions_idx.go +++ b/migrations/explorer_migrations/20240917000000_profile_transactions_idx.go @@ -1,4 +1,4 @@ -package post_sync_migrations +package explorer_migrations import ( "context" @@ -8,9 +8,6 @@ import ( func init() { Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` create index if not exists statistic_profile_transactions_latest_idx on statistic_profile_transactions (latest_transaction_timestamp desc); `) @@ -20,9 +17,6 @@ func init() { return nil }, func(ctx context.Context, db *bun.DB) error { - if !calculateExplorerStatistics { - return nil - } _, err := db.Exec(` drop index if exists statistic_profile_transactions_latest_idx; `) diff --git a/migrations/explorer_migrations/init.go b/migrations/explorer_migrations/init.go new file mode 100644 index 0000000..5f2cccf --- /dev/null +++ b/migrations/explorer_migrations/init.go @@ -0,0 +1,40 @@ +package explorer_migrations + +import ( + "context" + "fmt" + "github.com/pkg/errors" + "github.com/uptrace/bun" + "github.com/uptrace/bun/migrate" + "math" + "time" +) + +var ( + Migrations = migrate.NewMigrations() +) + +func init() { + if err := Migrations.DiscoverCaller(); err != nil { + panic(err) + } +} + +const ( + retryLimit = 10 +) + +func RunMigrationWithRetries(db *bun.DB, migrationQuery string) error { + ctx, cancel := context.WithTimeout(context.Background(), 90*time.Minute) + defer cancel() + for ii := 0; ii < retryLimit; ii++ { + _, err := db.ExecContext(ctx, migrationQuery) + if err == nil { + return nil + } + waitTime := 5 * time.Duration(math.Pow(2, float64(ii))) * time.Second + fmt.Printf("Failed to migrate, retrying in %v. err: %v. Query: %v\n", waitTime, err, migrationQuery) + time.Sleep(waitTime) + } + return errors.New("Failed to migrate after 5 attempts") +} diff --git a/migrations/explorer_view_migrations/20241001000000_remote_views.go b/migrations/explorer_view_migrations/20241001000000_remote_views.go new file mode 100644 index 0000000..b6a4680 --- /dev/null +++ b/migrations/explorer_view_migrations/20241001000000_remote_views.go @@ -0,0 +1,139 @@ +package explorer_view_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + userPassword := DbPassword + userName := DbUsername + dbName := DbName + host := DbHost + port := DbPort + + if _, err := db.Exec(` + CREATE EXTENSION IF NOT EXISTS postgres_fdw; + + -- Create a foreign server + CREATE SERVER IF NOT EXISTS subscriber_server + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host ?, port ?, dbname ?); + + -- Create a user mapping + CREATE USER MAPPING IF NOT EXISTS FOR current_user + SERVER subscriber_server + OPTIONS (user ?, password ?); + + IMPORT FOREIGN SCHEMA public + LIMIT TO ( + statistic_dashboard, + statistic_social_leaderboard, + statistic_nft_leaderboard, + statistic_defi_leaderboard, + statistic_txn_count_monthly, + statistic_wallet_count_monthly, + statistic_txn_count_daily, + statistic_new_wallet_count_daily, + statistic_active_wallet_count_daily, + statistic_profile_transactions, + statistic_profile_top_nft_owners, + dao_coin_limit_order_bid_asks, + statistic_portfolio_value, + statistic_profile_earnings, + statistic_profile_earnings_breakdown_counts + ) + FROM SERVER subscriber_server + INTO public; + + CREATE VIEW statistic_dashboard_remote_view AS + SELECT * FROM statistic_dashboard; + COMMENT ON VIEW statistic_dashboard_remote_view IS E'@name dashboardStat'; + COMMENT ON FOREIGN TABLE statistic_dashboard IS E'@omit'; + + CREATE VIEW statistic_social_leaderboard_remote_view AS + SELECT * FROM statistic_social_leaderboard; + COMMENT ON VIEW statistic_social_leaderboard_remote_view IS E'@name socialLeaderboardStat'; + COMMENT ON FOREIGN TABLE statistic_social_leaderboard IS E'@omit'; + + CREATE VIEW statistic_nft_leaderboard_remote_view AS + SELECT * FROM statistic_nft_leaderboard; + COMMENT ON VIEW statistic_nft_leaderboard_remote_view IS E'@name nftLeaderboardStat'; + COMMENT ON FOREIGN TABLE statistic_nft_leaderboard IS E'@omit'; + + CREATE VIEW statistic_defi_leaderboard_remote_view AS + SELECT * FROM statistic_defi_leaderboard; + COMMENT ON VIEW statistic_defi_leaderboard_remote_view IS E'@name defiLeaderboardStat'; + COMMENT ON FOREIGN TABLE statistic_defi_leaderboard IS E'@omit'; + + CREATE VIEW statistic_txn_count_monthly_remote_view AS + SELECT * FROM statistic_txn_count_monthly; + COMMENT ON VIEW statistic_txn_count_monthly_remote_view IS E'@name monthlyTxnCountStat'; + COMMENT ON FOREIGN TABLE statistic_txn_count_monthly IS E'@omit'; + + CREATE VIEW statistic_wallet_count_monthly_remote_view AS + SELECT * FROM statistic_wallet_count_monthly; + COMMENT ON VIEW statistic_wallet_count_monthly_remote_view IS E'@name monthlyNewWalletCountStat'; + COMMENT ON FOREIGN TABLE statistic_wallet_count_monthly IS E'@omit'; + + CREATE VIEW statistic_txn_count_daily_remote_view AS + SELECT * FROM statistic_txn_count_daily; + COMMENT ON VIEW statistic_txn_count_daily_remote_view IS E'@name dailyTxnCountStat'; + COMMENT ON FOREIGN TABLE statistic_txn_count_daily IS E'@omit'; + + CREATE VIEW statistic_new_wallet_count_daily_remote_view AS + SELECT * FROM statistic_new_wallet_count_daily; + COMMENT ON VIEW statistic_new_wallet_count_daily_remote_view IS E'@name dailyNewWalletCountStat'; + COMMENT ON FOREIGN TABLE statistic_new_wallet_count_daily IS E'@omit'; + + CREATE VIEW statistic_active_wallet_count_daily_remote_view AS + SELECT * FROM statistic_active_wallet_count_daily; + COMMENT ON VIEW statistic_active_wallet_count_daily_remote_view IS E'@name dailyActiveWalletCountStat'; + COMMENT ON FOREIGN TABLE statistic_active_wallet_count_daily IS E'@omit'; + + CREATE VIEW statistic_profile_transactions_remote_view AS + SELECT * FROM statistic_profile_transactions; + COMMENT ON VIEW statistic_profile_transactions_remote_view IS E'@name profileTransactionStat\n@unique public_key\n@foreignKey (public_key) references account (public_key)|@foreignFieldName transactionStats|@fieldName account'; + COMMENT ON FOREIGN TABLE statistic_profile_transactions IS E'@omit'; + + CREATE VIEW statistic_profile_top_nft_owners_remote_view AS + SELECT * FROM statistic_profile_top_nft_owners; + COMMENT ON VIEW statistic_profile_top_nft_owners_remote_view IS E'@name profileNftTopOwners'; + COMMENT ON FOREIGN TABLE statistic_profile_top_nft_owners IS E'@omit'; + + CREATE VIEW dao_coin_limit_order_bid_asks_remote_view AS + SELECT * FROM dao_coin_limit_order_bid_asks; + COMMENT ON VIEW dao_coin_limit_order_bid_asks_remote_view IS E'@unique selling_creator_pkid,buying_creator_pkid\n@foreignKey (selling_creator_pkid) references account (pkid)|@foreignFieldName bidAskAsSellingToken|@fieldName sellingTokenAccount\n@foreignKey (buying_creator_pkid) references account (pkid)|@foreignFieldName bidAskAsBuyingToken|@fieldName buyingTokenAccount\n@name deso_token_limit_order_bid_asks'; + COMMENT ON FOREIGN TABLE dao_coin_limit_order_bid_asks IS E'@omit'; + + CREATE VIEW statistic_portfolio_value_remote_view AS + SELECT * FROM statistic_portfolio_value; + COMMENT ON VIEW statistic_portfolio_value_remote_view IS E'@name profilePortfolioValueStat\n@unique public_key\n@omit all'; + COMMENT ON FOREIGN TABLE statistic_portfolio_value IS E'@omit'; + + CREATE VIEW statistic_profile_earnings_remote_view AS + SELECT * FROM statistic_profile_earnings; + COMMENT ON VIEW statistic_profile_earnings_remote_view IS E'@name profileEarningsStats\n@unique public_key\n@omit all'; + COMMENT ON FOREIGN TABLE statistic_profile_earnings IS E'@omit'; + + CREATE VIEW statistic_profile_earnings_breakdown_counts_remote_view AS + SELECT * FROM statistic_profile_earnings_breakdown_counts; + COMMENT ON VIEW statistic_profile_earnings_breakdown_counts_remote_view IS E'@name profileEarningsBreakdownStats\n@unique public_key\n@omit all'; + COMMENT ON FOREIGN TABLE statistic_profile_earnings_breakdown_counts IS E'@omit'; + `, host, port, dbName, userName, userPassword); err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + drop index if exists statistic_profile_transactions_latest_idx; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/explorer_view_migrations/init.go b/migrations/explorer_view_migrations/init.go new file mode 100644 index 0000000..888e1ca --- /dev/null +++ b/migrations/explorer_view_migrations/init.go @@ -0,0 +1,29 @@ +package explorer_view_migrations + +import ( + "github.com/uptrace/bun/migrate" +) + +var Migrations = migrate.NewMigrations() + +var ( + DbHost string + DbPort string + DbUsername string + DbPassword string + DbName string +) + +func SetDBConfig(host string, port string, username string, password string, dbname string) { + DbHost = host + DbPort = port + DbUsername = username + DbPassword = password + DbName = dbname +} + +func init() { + if err := Migrations.DiscoverCaller(); err != nil { + panic(err) + } +} diff --git a/migrations/initial_migrations/20230405000000_create_profile_entry_table.go b/migrations/initial_migrations/20230405000000_create_profile_entry_table.go index bc36c4c..7b5e6dc 100644 --- a/migrations/initial_migrations/20230405000000_create_profile_entry_table.go +++ b/migrations/initial_migrations/20230405000000_create_profile_entry_table.go @@ -30,9 +30,9 @@ func createProfileEntryTable(db *bun.DB, tableName string) error { CASE WHEN cc_coins_in_circulation_nanos = 0 THEN 0 ELSE - ( + (ROUND(( deso_locked_nanos::NUMERIC / (cc_coins_in_circulation_nanos::NUMERIC * 0.33333) * 1e9 - )::NUMERIC + )::NUMERIC, 0))::NUMERIC END ) STORED ); diff --git a/migrations/initial_migrations/20230512000001_create_utxo_operation_entry_table.go b/migrations/initial_migrations/20230512000001_create_utxo_operation_entry_table.go index f23f5fb..d13ef9d 100644 --- a/migrations/initial_migrations/20230512000001_create_utxo_operation_entry_table.go +++ b/migrations/initial_migrations/20230512000001_create_utxo_operation_entry_table.go @@ -36,7 +36,7 @@ func init() { transaction_index INTEGER NOT NULL, utxo_op_index INTEGER NOT NULL, utxo_op_bytes BYTEA NOT NULL, - badger_key BYTEA NOT NULL + badger_key BYTEA NOT NULL PRIMARY KEY ); CREATE UNIQUE INDEX utxo_operation_block_hash_transaction_utxo_op_idx ON utxo_operation (block_hash, transaction_index desc, utxo_op_index desc); CREATE INDEX utxo_entry_type_block_hash_transaction_utxo_op_idx ON utxo_operation (operation_type, block_hash, transaction_index desc, utxo_op_index desc); diff --git a/migrations/initial_migrations/20241023000000_base_58_pg_fn.go b/migrations/initial_migrations/20241023000000_base_58_pg_fn.go new file mode 100644 index 0000000..691d856 --- /dev/null +++ b/migrations/initial_migrations/20241023000000_base_58_pg_fn.go @@ -0,0 +1,41 @@ +package initial_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + CREATE OR REPLACE FUNCTION base58_check_encode_with_prefix(input bytea) RETURNS TEXT AS $$ + DECLARE + prefix bytea := E'\\xcd1400'::bytea; + b bytea; + big_val NUMERIC; + BEGIN + b := prefix || input || checksum(prefix || input); + + -- Convert bytea to a big numeric for Base58 encoding + SELECT INTO big_val bytes_to_bigint(b); + RETURN base58_encode(big_val); + END; + $$ LANGUAGE plpgsql IMMUTABLE; + `) + if err != nil { + return err + } + return nil + + }, func(ctx context.Context, db *bun.DB) error { + // Lastly, delete the noaccess role. + _, err := db.Exec(` + DROP FUNCTION base58_check_encode_with_prefix(bytea); +`) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20230714000001_create_postgraphile_fk_comments.go b/migrations/post_sync_migrations/20230714000001_create_postgraphile_fk_comments.go index 8a394bc..b66c3c6 100644 --- a/migrations/post_sync_migrations/20230714000001_create_postgraphile_fk_comments.go +++ b/migrations/post_sync_migrations/20230714000001_create_postgraphile_fk_comments.go @@ -103,84 +103,6 @@ func init() { return err } - // Only annotate the explorer statistics views if the env var is set to enable them. - if !calculateExplorerStatistics { - return nil - } - - _, err = db.Exec(` - comment on materialized view statistic_txn_count_all is E'@omit'; - comment on materialized view statistic_txn_count_30_d is E'@omit'; - comment on materialized view statistic_wallet_count_all is E'@omit'; - comment on materialized view statistic_new_wallet_count_30_d is E'@omit'; - comment on materialized view statistic_active_wallet_count_30_d is E'@omit'; - comment on materialized view statistic_block_height_current is E'@omit'; - comment on materialized view statistic_txn_count_pending is E'@omit'; - comment on materialized view statistic_txn_fee_1_d is E'@omit'; - comment on materialized view statistic_total_supply is E'@omit'; - comment on materialized view statistic_post_count is E'@omit'; - comment on materialized view statistic_post_longform_count is E'@omit'; - comment on materialized view statistic_comment_count is E'@omit'; - comment on materialized view statistic_repost_count is E'@omit'; - comment on materialized view statistic_txn_count_creator_coin is E'@omit'; - comment on materialized view statistic_txn_count_nft is E'@omit'; - comment on materialized view statistic_txn_count_dex is E'@omit'; - comment on materialized view statistic_txn_count_social is E'@omit'; - comment on materialized view statistic_follow_count is E'@omit'; - comment on materialized view statistic_message_count is E'@omit'; - comment on materialized view statistic_social_leaderboard_likes is E'@omit'; - comment on materialized view statistic_social_leaderboard_reactions is E'@omit'; - comment on materialized view statistic_social_leaderboard_diamonds is E'@omit'; - comment on materialized view statistic_social_leaderboard_reposts is E'@omit'; - comment on materialized view statistic_social_leaderboard_comments is E'@omit'; - comment on table public_key_first_transaction IS E'@omit'; - comment on function get_transaction_count is E'@omit'; - comment on function refresh_public_key_first_transaction is E'@omit'; - comment on view statistic_dashboard is E'@name dashboardStat'; - comment on materialized view statistic_social_leaderboard is E'@name socialLeaderboardStat'; - comment on materialized view statistic_nft_leaderboard is E'@name nftLeaderboardStat'; - comment on materialized view statistic_defi_leaderboard is E'@name defiLeaderboardStat'; - comment on materialized view statistic_txn_count_monthly is E'@name monthlyTxnCountStat'; - comment on materialized view statistic_wallet_count_monthly is E'@name monthlyNewWalletCountStat'; - comment on materialized view statistic_txn_count_daily is E'@name dailyTxnCountStat'; - comment on materialized view statistic_new_wallet_count_daily is E'@name dailyNewWalletCountStat'; - comment on materialized view statistic_active_wallet_count_daily is E'@name dailyActiveWalletCountStat'; - comment on materialized view statistic_profile_transactions is E'@name profileTransactionStat\n@unique public_key\n@foreignKey (public_key) references account (public_key)|@foreignFieldName transactionStats|@fieldName account'; - comment on materialized view statistic_profile_top_nft_owners is E'@name profileNftTopOwners'; - comment on function hex_to_numeric is E'@omit'; - comment on function int_to_bytea is E'@omit'; - comment on function cc_nanos_total_sell_value is E'@omit'; - comment on view dao_coin_limit_order_max_bids is E'@omit'; - comment on view dao_coin_limit_order_min_asks is E'@omit'; - comment on view dao_coin_limit_order_bid_asks is E'@unique selling_creator_pkid,buying_creator_pkid\n@foreignKey (selling_creator_pkid) references account (pkid)|@foreignFieldName bidAskAsSellingToken|@fieldName sellingTokenAccount\n@foreignKey (buying_creator_pkid) references account (pkid)|@foreignFieldName bidAskAsBuyingToken|@fieldName buyingTokenAccount\n@name deso_token_limit_order_bid_asks'; - comment on materialized view statistic_cc_balance_totals is E'@omit'; - comment on materialized view statistic_nft_balance_totals is E'@omit'; - comment on materialized view statistic_deso_token_balance_totals is E'@omit'; - comment on materialized view statistic_portfolio_value is E'@name profilePortfolioValueStat\n@unique public_key\n@omit all'; - comment on materialized view statistic_profile_cc_royalties is E'@omit'; - comment on materialized view statistic_profile_diamond_earnings is E'@omit'; - comment on materialized view statistic_profile_nft_bid_royalty_earnings is E'@omit'; - comment on materialized view statistic_profile_nft_buy_now_royalty_earnings is E'@omit'; - comment on materialized view statistic_profile_earnings is E'@name profileEarningsStats\n@unique public_key\n@omit all'; - comment on materialized view statistic_profile_deso_token_buy_orders is E'@omit'; - comment on materialized view statistic_profile_deso_token_sell_orders is E'@omit'; - comment on materialized view statistic_profile_diamonds_given is E'@omit'; - comment on materialized view statistic_profile_diamonds_received is E'@omit'; - comment on materialized view statistic_profile_cc_buyers is E'@omit'; - comment on materialized view statistic_profile_cc_sellers is E'@omit'; - comment on materialized view statistic_profile_nft_bid_buys is E'@omit'; - comment on materialized view statistic_profile_nft_bid_sales is E'@omit'; - comment on materialized view statistic_profile_nft_buy_now_buys is E'@omit'; - comment on materialized view statistic_profile_nft_buy_now_sales is E'@omit'; - comment on materialized view statistic_profile_deso_token_buy_orders is E'@omit'; - comment on materialized view statistic_profile_deso_token_sell_orders is E'@omit'; - comment on materialized view statistic_profile_earnings_breakdown_counts is E'@name profileEarningsBreakdownStats\n@unique public_key\n@omit all'; - comment on function jsonb_to_bytea is E'@omit'; - `) - if err != nil { - return err - } - return nil }, func(ctx context.Context, db *bun.DB) error { _, err := db.Exec(` @@ -274,78 +196,6 @@ func init() { return err } - // Only revert the explorer statistics views if the env var is set to enable them. - if !calculateExplorerStatistics { - return nil - } - - _, err = db.Exec(` - comment on materialized view statistic_txn_count_all is NULL; - comment on materialized view statistic_txn_count_30_d is NULL; - comment on materialized view statistic_wallet_count_all is NULL; - comment on materialized view statistic_new_wallet_count_30_d is NULL; - comment on materialized view statistic_active_wallet_count_30_d is NULL; - comment on materialized view statistic_block_height_current is NULL; - comment on materialized view statistic_txn_count_pending is NULL; - comment on materialized view statistic_txn_fee_1_d is NULL; - comment on materialized view statistic_total_supply is NULL; - comment on materialized view statistic_post_count is NULL; - comment on materialized view statistic_post_longform_count is NULL; - comment on materialized view statistic_comment_count is NULL; - comment on materialized view statistic_repost_count is NULL; - comment on materialized view statistic_txn_count_creator_coin is NULL; - comment on materialized view statistic_txn_count_nft is NULL; - comment on materialized view statistic_txn_count_dex is NULL; - comment on materialized view statistic_txn_count_social is NULL; - comment on materialized view statistic_follow_count is NULL; - comment on materialized view statistic_message_count is NULL; - comment on materialized view statistic_social_leaderboard_likes is NULL; - comment on materialized view statistic_social_leaderboard_reactions is NULL; - comment on materialized view statistic_social_leaderboard_diamonds is NULL; - comment on materialized view statistic_social_leaderboard_reposts is NULL; - comment on materialized view statistic_social_leaderboard_comments is NULL; - comment on table public_key_first_transaction IS NULL; - comment on function get_transaction_count is NULL; - comment on function refresh_public_key_first_transaction is NULL; - comment on view statistic_dashboard is NULL; - comment on materialized view statistic_social_leaderboard is NULL; - comment on materialized view statistic_nft_leaderboard is NULL; - comment on materialized view statistic_defi_leaderboard is NULL; - comment on materialized view statistic_txn_count_monthly is NULL; - comment on materialized view statistic_wallet_count_monthly is NULL; - comment on materialized view statistic_wallet_count_monthly is NULL; - comment on materialized view statistic_txn_count_daily is NULL; - comment on materialized view statistic_new_wallet_count_daily is NULL; - comment on materialized view statistic_active_wallet_count_daily is NULL; - comment on materialized view statistic_profile_transactions is NULL; - comment on materialized view statistic_profile_top_nft_owners is NULL; - comment on function cc_nanos_total_sell_value is NULL; - comment on view dao_coin_limit_order_max_bids is NULL; - comment on view dao_coin_limit_order_min_asks is NULL; - comment on view dao_coin_limit_order_bid_asks is NULL; - comment on materialized view statistic_cc_balance_totals is NULL; - comment on materialized view statistic_nft_balance_totals is NULL; - comment on materialized view statistic_deso_token_balance_totals is NULL; - comment on materialized view statistic_portfolio_value is NULL; - comment on materialized view statistic_profile_deso_token_buy_orders is NULL; - comment on materialized view statistic_profile_deso_token_sell_orders is NULL; - comment on materialized view statistic_profile_diamonds_given is NULL; - comment on materialized view statistic_profile_diamonds_received is NULL; - comment on materialized view statistic_profile_cc_buyers is NULL; - comment on materialized view statistic_profile_cc_sellers is NULL; - comment on materialized view statistic_profile_nft_bid_buys is NULL; - comment on materialized view statistic_profile_nft_bid_sales is NULL; - comment on materialized view statistic_profile_nft_buy_now_buys is NULL; - comment on materialized view statistic_profile_nft_buy_now_sales is NULL; - comment on materialized view statistic_profile_deso_token_buy_orders is NULL; - comment on materialized view statistic_profile_deso_token_sell_orders is NULL; - comment on materialized view statistic_profile_earnings_breakdown_counts is NULL; - comment on function jsonb_to_bytea is NULL; - `) - if err != nil { - return err - } - return nil }) } diff --git a/migrations/post_sync_migrations/20240260000002_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go b/migrations/post_sync_migrations/20240260000002_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go index 1c310f3..e040ec2 100644 --- a/migrations/post_sync_migrations/20240260000002_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go +++ b/migrations/post_sync_migrations/20240260000002_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go @@ -23,74 +23,6 @@ from block comment on view epoch_details_for_block is E'@unique block_hash\n@unique epoch_number\n@foreignKey (block_hash) references block (block_hash)|@foreignFieldName epochDetailForBlock|@fieldName block\n@foreignKey (epoch_number) references epoch_entry (epoch_number)|@foreignFieldName blockHashesInEpoch|@fieldName epochEntry\n@foreignKey (proposer_pkid) references account (pkid)|@foreignFieldName proposedBlockHashes|@fieldName proposer'; comment on table bls_public_key_pkid_pair_snapshot_entry is E'@foreignKey (pkid) references account (pkid)|@foreignFieldName blsPublicKeyPkidPairSnapshotEntries|@fieldName account\n@foreignKey (snapshot_at_epoch_number) references epoch_entry (snapshot_at_epoch_number)|@foreignFieldName blsPublicKeyPkidPairSnapshotEntries|@fieldName epochEntry'; comment on column bls_public_key_pkid_pair_snapshot_entry.badger_key is E'@omit'; -`) - if err != nil { - return err - } - if !calculateExplorerStatistics { - return nil - } - _, err = db.Exec(` -CREATE MATERIALIZED VIEW my_stake_summary as -select coalesce(total_stake_rewards.staker_pkid, total_stake_amount.staker_pkid) as staker_pkid, - total_stake_rewards.total_rewards as total_stake_rewards, - total_stake_amount.total_stake as total_stake -from (select staker_pkid, sum(reward_nanos) total_rewards - from stake_reward - group by staker_pkid) total_stake_rewards - full outer join - (select staker_pkid, sum(stake_amount_nanos) total_stake - from stake_entry - group by staker_pkid) total_stake_amount - on total_stake_amount.staker_pkid = total_stake_rewards.staker_pkid; - -CREATE UNIQUE INDEX my_stake_summary_unique_index ON my_stake_summary (staker_pkid); - -CREATE MATERIALIZED VIEW staking_summary as -select * -from (select sum(total_stake_amount_nanos) as global_stake_amount_nanos, - count(distinct validator_pkid) as num_validators - from validator_entry) validator_summary, - (select max(epoch_number) current_epoch_number from epoch_entry) current_epoch, - (select count(distinct snapshot_at_epoch_number) num_epochs_in_leader_schedule - from leader_schedule_entry) num_epochs_in_leader_schedule, - (select count(distinct staker_pkid) as num_stakers from stake_entry) staker_summary; - -CREATE UNIQUE INDEX staking_summary_unique_index ON staking_summary (global_stake_amount_nanos, num_validators, current_epoch_number, num_epochs_in_leader_schedule); - -CREATE MATERIALIZED VIEW validator_stats as -select validator_entry.validator_pkid, - rank() OVER ( order by validator_entry.total_stake_amount_nanos) as validator_rank, - validator_entry.total_stake_amount_nanos::float / - staking_summary.global_stake_amount_nanos::float as percent_total_stake, - coalesce(time_in_jail, 0) + - (case - when jailed_at_epoch_number = 0 then 0 - else (staking_summary.current_epoch_number - jailed_at_epoch_number) END) epochs_in_jail, - coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0) num_epochs_in_leader_schedule, - coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0)::float / - staking_summary.num_epochs_in_leader_schedule::float as percent_epochs_in_leader_schedule, - coalesce(total_rewards, 0) as total_stake_reward_nanos -from staking_summary, - validator_entry - left join (select validator_pkid, sum(jhe.unjailed_at_epoch_number - jhe.jailed_at_epoch_number) time_in_jail - from jailed_history_event jhe - group by validator_pkid) jhe - on jhe.validator_pkid = validator_entry.validator_pkid - left join (select validator_pkid, count(*) as num_epochs_in_leader_schedule - from leader_schedule_entry - group by validator_pkid) leader_schedule_summary - on leader_schedule_summary.validator_pkid = validator_entry.validator_pkid - left join (select validator_pkid, sum(reward_nanos) as total_rewards - from stake_reward - group by validator_pkid) as total_stake_rewards - on total_stake_rewards.validator_pkid = validator_entry.validator_pkid; - -CREATE UNIQUE INDEX validator_stats_unique_index ON validator_stats (validator_pkid); - - comment on materialized view validator_stats is E'@unique validator_pkid\n@foreignKey (validator_pkid) references validator_entry (validator_pkid)|@foreignFieldName validatorStats|@fieldName validatorEntry'; - comment on materialized view my_stake_summary is E'@unique staker_pkid\n@foreignKey (staker_pkid) references account (pkid)|@foreignFieldName myStakeSummary|@fieldName staker'; - `) if err != nil { return err @@ -106,17 +38,6 @@ CREATE UNIQUE INDEX validator_stats_unique_index ON validator_stats (validator_p if err != nil { return err } - if !calculateExplorerStatistics { - return nil - } - _, err = db.Exec(` - DROP MATERIALIZED VIEW IF EXISTS validator_stats CASCADE; - DROP MATERIALIZED VIEW IF EXISTS staking_summary CASCADE; - DROP MATERIALIZED VIEW IF EXISTS my_stake_summary CASCADE; - `) - if err != nil { - return err - } return nil }) diff --git a/migrations/post_sync_migrations/20240418000003_update_get_transaction_count_function.go b/migrations/post_sync_migrations/20240418000003_update_get_transaction_count_function.go index 1fce957..6faca00 100644 --- a/migrations/post_sync_migrations/20240418000003_update_get_transaction_count_function.go +++ b/migrations/post_sync_migrations/20240418000003_update_get_transaction_count_function.go @@ -41,7 +41,7 @@ func init() { count_value bigint; padded_transaction_type varchar; BEGIN - IF transaction_type < 1 OR transaction_type > 33 THEN + IF transaction_type < 1 OR transaction_type > 44 THEN RAISE EXCEPTION '% is not a valid transaction type', transaction_type; END IF; diff --git a/migrations/post_sync_migrations/migration_helpers.go b/migrations/post_sync_migrations/migration_helpers.go index d08f51f..d7c3b7d 100644 --- a/migrations/post_sync_migrations/migration_helpers.go +++ b/migrations/post_sync_migrations/migration_helpers.go @@ -17,65 +17,66 @@ var ( commands = []struct { Query string Ticker *time.Ticker + SubDb bool }{ - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_all", Ticker: time.NewTicker(1 * time.Hour)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_30_d", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_wallet_count_all", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_new_wallet_count_30_d", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_active_wallet_count_30_d", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_block_height_current", Ticker: time.NewTicker(2 * time.Hour)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_pending", Ticker: time.NewTicker(2 * time.Second)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_fee_1_d", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_total_supply", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_post_count", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_comment_count", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_repost_count", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_creator_coin", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_nft", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_dex", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_social", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_follow_count", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_message_count", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "SELECT refresh_public_key_first_transaction()", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_likes", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_reactions", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_diamonds", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_reposts", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_comments", Ticker: time.NewTicker(15 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard", Ticker: time.NewTicker(1 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_nft_leaderboard", Ticker: time.NewTicker(1 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_defi_leaderboard", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_monthly", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_wallet_count_monthly", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_daily", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_new_wallet_count_daily", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_active_wallet_count_daily", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_transactions", Ticker: time.NewTicker(1 * time.Hour)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_top_nft_owners", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_cc_balance_totals", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_nft_balance_totals", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_deso_token_balance_totals", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_portfolio_value", Ticker: time.NewTicker(3 * time.Hour)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_cc_royalties", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_diamond_earnings", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_bid_royalty_earnings", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_buy_now_royalty_earnings", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_buy_orders", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_sell_orders", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_diamonds_given", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_diamonds_received", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_cc_buyers", Ticker: time.NewTicker(3 * time.Hour)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_cc_sellers", Ticker: time.NewTicker(3 * time.Hour)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_bid_buys", Ticker: time.NewTicker(1 * time.Hour)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_bid_sales", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_buy_now_buys", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_buy_now_sales", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_buy_orders", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_sell_orders", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_earnings_breakdown_counts", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY staking_summary", Ticker: time.NewTicker(1 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY my_stake_summary", Ticker: time.NewTicker(1 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY validator_stats", Ticker: time.NewTicker(1 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_all", Ticker: time.NewTicker(1 * time.Hour)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_30_d", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_wallet_count_all", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_new_wallet_count_30_d", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_active_wallet_count_30_d", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_block_height_current", Ticker: time.NewTicker(2 * time.Hour)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_pending", Ticker: time.NewTicker(2 * time.Second)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_fee_1_d", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_total_supply", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_post_count", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_comment_count", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_repost_count", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_creator_coin", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_nft", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_dex", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_social", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_follow_count", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_message_count", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: false, Query: "SELECT refresh_public_key_first_transaction()", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_likes", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_reactions", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_diamonds", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_reposts", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard_comments", Ticker: time.NewTicker(15 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_social_leaderboard", Ticker: time.NewTicker(1 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_nft_leaderboard", Ticker: time.NewTicker(1 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_defi_leaderboard", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_monthly", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_wallet_count_monthly", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_daily", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_new_wallet_count_daily", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_active_wallet_count_daily", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_transactions", Ticker: time.NewTicker(1 * time.Hour)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_top_nft_owners", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_cc_balance_totals", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_nft_balance_totals", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_deso_token_balance_totals", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_portfolio_value", Ticker: time.NewTicker(3 * time.Hour)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_cc_royalties", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_diamond_earnings", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_bid_royalty_earnings", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_buy_now_royalty_earnings", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_buy_orders", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_sell_orders", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_diamonds_given", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_diamonds_received", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_cc_buyers", Ticker: time.NewTicker(3 * time.Hour)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_cc_sellers", Ticker: time.NewTicker(3 * time.Hour)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_bid_buys", Ticker: time.NewTicker(1 * time.Hour)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_bid_sales", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_buy_now_buys", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_nft_buy_now_sales", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_buy_orders", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_sell_orders", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: true, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_earnings_breakdown_counts", Ticker: time.NewTicker(30 * time.Minute)}, + {SubDb: false, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY staking_summary", Ticker: time.NewTicker(1 * time.Minute)}, + {SubDb: false, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY my_stake_summary", Ticker: time.NewTicker(1 * time.Minute)}, + {SubDb: false, Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY validator_stats", Ticker: time.NewTicker(1 * time.Minute)}, } ) @@ -94,7 +95,7 @@ func RunMigrationWithRetries(db *bun.DB, migrationQuery string) error { return errors.New("Failed to migrate after 5 attempts") } -func RefreshExplorerStatistics(db *bun.DB) { +func RefreshExplorerStatistics(db *bun.DB, subDb *bun.DB) { // Only run if explorer statistics are enabled. if !calculateExplorerStatistics { return @@ -105,6 +106,7 @@ func RefreshExplorerStatistics(db *bun.DB) { go func(command struct { Query string Ticker *time.Ticker + SubDb bool }) { // Create a channel to ensure only one command is running at a time. running := make(chan bool, 1) @@ -116,7 +118,14 @@ func RefreshExplorerStatistics(db *bun.DB) { running <- true go func() { - err := executeQuery(db, command.Query) + var execDb *bun.DB + if command.SubDb { + execDb = subDb + } else { + execDb = db + } + + err := executeQuery(execDb, command.Query) if err != nil { fmt.Printf("Error executing explorer refresh query: %s: %v\n", command.Query, err) }