diff --git a/Dockerfile b/Dockerfile index fa5c931..b4ba447 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,42 +1,53 @@ -FROM alpine:latest AS daodao +FROM alpine:latest AS postgres-data-handler RUN apk update RUN apk upgrade -RUN apk add --update go gcc g++ vips-dev +RUN apk add --update bash go cmake g++ gcc git make vips-dev WORKDIR /postgres-data-handler/src COPY postgres-data-handler/go.mod postgres-data-handler/ COPY postgres-data-handler/go.sum postgres-data-handler/ -COPY core/go.mod core/ -COPY core/go.sum core/ +COPY core/go.mod core/ +COPY core/go.sum core/ +COPY backend/go.mod backend/ +COPY backend/go.sum backend/ +COPY state-consumer/go.mod state-consumer/ +COPY state-consumer/go.sum state-consumer/ WORKDIR /postgres-data-handler/src/postgres-data-handler RUN go mod download # include postgres data handler src -COPY postgres-data-handler/entries entries -COPY postgres-data-handler/migrations migrations +COPY postgres-data-handler/entries entries +COPY postgres-data-handler/migrations migrations COPY postgres-data-handler/handler handler -COPY postgres-data-handler/main.go . +COPY postgres-data-handler/main.go . # include core src -COPY core/desohash ../core/desohash -COPY core/cmd ../core/cmd -COPY core/lib ../core/lib -COPY core/migrate ../core/migrate +COPY core/desohash ../core/desohash +COPY core/consensus ../core/consensus +COPY core/collections ../core/collections +COPY core/bls ../core/bls +COPY core/cmd ../core/cmd +COPY core/lib ../core/lib +COPY core/migrate ../core/migrate + +# include backend src +COPY backend/apis ../backend/apis +COPY backend/config ../backend/config +COPY backend/cmd ../backend/cmd +COPY backend/miner ../backend/miner +COPY backend/routes ../backend/routes +COPY backend/countries ../backend/countries + +# include state-consumer src +COPY state-consumer/consumer ../state-consumer/consumer RUN go mod tidy ## build postgres data handler backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/postgres-data-handler main.go -# -## create tiny image -#FROM alpine:latest -## -#RUN apk add --update vips-dev -## -#COPY --from=daodao /daodao/src/daodao-backend/bin/daodao-backend /daodao/bin/daodao-backend -#ENTRYPOINT ["/daodao/bin/daodao-backend"] + ENTRYPOINT ["/postgres-data-handler/src/postgres-data-handler/bin/postgres-data-handler"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..89e6719 --- /dev/null +++ b/Makefile @@ -0,0 +1,8 @@ +dev: + go run . + +dev-env: + docker compose -f local.docker-compose.yml build && docker compose -f local.docker-compose.yml up + +dev-env-down: + docker compose -f local.docker-compose.yml down --volumes \ No newline at end of file diff --git a/entries/access_group.go b/entries/access_group.go index fe43564..2051d2f 100644 --- a/entries/access_group.go +++ b/entries/access_group.go @@ -52,7 +52,7 @@ func AccessGroupEncoderToPGStruct(accessGroupEntry *lib.AccessGroupEntry, keyByt // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func AccessGroupBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func AccessGroupBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -69,7 +69,7 @@ func AccessGroupBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, para } // bulkInsertAccessGroupEntry inserts a batch of access_group entries into the database. -func bulkInsertAccessGroupEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertAccessGroupEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -94,7 +94,7 @@ func bulkInsertAccessGroupEntry(entries []*lib.StateChangeEntry, db *bun.DB, ope } // bulkDeletePostEntry deletes a batch of access_group entries from the database. -func bulkDeleteAccessGroupEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteAccessGroupEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/access_group_member.go b/entries/access_group_member.go index 78f8338..1722f14 100644 --- a/entries/access_group_member.go +++ b/entries/access_group_member.go @@ -57,7 +57,7 @@ func AccessGroupMemberEncoderToPGStruct(accessGroupMemberEntry *lib.AccessGroupM // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func AccessGroupMemberBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func AccessGroupMemberBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -74,7 +74,7 @@ func AccessGroupMemberBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB } // bulkInsertAccessGroupMemberEntry inserts a batch of access_group_member entries into the database. -func bulkInsertAccessGroupMemberEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertAccessGroupMemberEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -99,7 +99,7 @@ func bulkInsertAccessGroupMemberEntry(entries []*lib.StateChangeEntry, db *bun.D } // bulkDeletePostEntry deletes a batch of access_group_member entries from the database. -func bulkDeleteAccessGroupMemberEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteAccessGroupMemberEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/balance.go b/entries/balance.go index 4f8c2a0..6c6de0a 100644 --- a/entries/balance.go +++ b/entries/balance.go @@ -46,7 +46,7 @@ func BalanceEntryEncoderToPGStruct(balanceEntry *lib.BalanceEntry, keyBytes []by // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func BalanceBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func BalanceBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -63,7 +63,7 @@ func BalanceBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params * } // bulkInsertBalanceEntry inserts a batch of balance entries into the database. -func bulkInsertBalanceEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertBalanceEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -88,7 +88,7 @@ func bulkInsertBalanceEntry(entries []*lib.StateChangeEntry, db *bun.DB, operati } // bulkDeletePostEntry deletes a batch of balance entries from the database. -func bulkDeleteBalanceEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteBalanceEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/block.go b/entries/block.go index 2b28d35..7429306 100644 --- a/entries/block.go +++ b/entries/block.go @@ -3,22 +3,31 @@ package entries import ( "context" "encoding/hex" + "reflect" + "time" + "github.com/deso-protocol/core/lib" "github.com/deso-protocol/state-consumer/consumer" "github.com/pkg/errors" "github.com/uptrace/bun" - "time" ) type BlockEntry struct { - BlockHash string `pg:",pk,use_zero"` - PrevBlockHash string - TxnMerkleRoot string - Timestamp time.Time - Height uint64 - Nonce uint64 - ExtraNonce uint64 - BadgerKey []byte `pg:",use_zero"` + BlockHash string `pg:",pk,use_zero"` + PrevBlockHash string + TxnMerkleRoot string + Timestamp time.Time + Height uint64 + Nonce uint64 + ExtraNonce uint64 + BlockVersion uint32 + ProposerVotingPublicKey string `pg:",use_zero"` + ProposerRandomSeedSignature string `pg:",use_zero"` + ProposedInView uint64 + ProposerVotePartialSignature string `pg:",use_zero"` + // TODO: Quorum Certificates. Separate entry. + + BadgerKey []byte `pg:",use_zero"` } type PGBlockEntry struct { @@ -26,26 +35,62 @@ type PGBlockEntry struct { BlockEntry } +type BlockSigner struct { + BlockHash string + SignerIndex uint64 +} + +type PGBlockSigner struct { + bun.BaseModel `bun:"table:block_signer"` + BlockSigner +} + // Convert the UserAssociation DeSo encoder to the PG struct used by bun. -func BlockEncoderToPGStruct(block *lib.MsgDeSoBlock, keyBytes []byte) *PGBlockEntry { +func BlockEncoderToPGStruct(block *lib.MsgDeSoBlock, keyBytes []byte, params *lib.DeSoParams) (*PGBlockEntry, []*PGBlockSigner) { blockHash, _ := block.Hash() + blockHashHex := hex.EncodeToString(blockHash[:]) + qc := block.Header.GetQC() + blockSigners := []*PGBlockSigner{} + if !isInterfaceNil(qc) { + aggSig := qc.GetAggregatedSignature() + if !isInterfaceNil(aggSig) { + signersList := aggSig.GetSignersList() + for ii := 0; ii < signersList.Size(); ii++ { + // Skip signers that didn't sign. + if !signersList.Get(ii) { + continue + } + blockSigners = append(blockSigners, &PGBlockSigner{ + BlockSigner: BlockSigner{ + BlockHash: blockHashHex, + SignerIndex: uint64(ii), + }, + }) + } + } + } return &PGBlockEntry{ BlockEntry: BlockEntry{ - BlockHash: hex.EncodeToString(blockHash[:]), - PrevBlockHash: hex.EncodeToString(block.Header.PrevBlockHash[:]), - TxnMerkleRoot: hex.EncodeToString(block.Header.TransactionMerkleRoot[:]), - Timestamp: consumer.UnixNanoToTime(block.Header.TstampSecs * 1e9), - Height: block.Header.Height, - Nonce: block.Header.Nonce, - ExtraNonce: block.Header.ExtraNonce, - BadgerKey: keyBytes, + BlockHash: blockHashHex, + PrevBlockHash: hex.EncodeToString(block.Header.PrevBlockHash[:]), + TxnMerkleRoot: hex.EncodeToString(block.Header.TransactionMerkleRoot[:]), + Timestamp: consumer.UnixNanoToTime(uint64(block.Header.TstampNanoSecs)), + Height: block.Header.Height, + Nonce: block.Header.Nonce, + ExtraNonce: block.Header.ExtraNonce, + BlockVersion: block.Header.Version, + ProposerVotingPublicKey: block.Header.ProposerVotingPublicKey.ToString(), + ProposerRandomSeedSignature: block.Header.ProposerRandomSeedSignature.ToString(), + ProposedInView: block.Header.ProposedInView, + ProposerVotePartialSignature: block.Header.ProposerVotePartialSignature.ToString(), + BadgerKey: keyBytes, }, - } + }, blockSigners } // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func BlockBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func BlockBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -62,7 +107,7 @@ func BlockBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *li } // bulkInsertUtxoOperationsEntry inserts a batch of user_association entries into the database. -func bulkInsertBlockEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertBlockEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // If this block is a part of the initial sync, skip it - it will be handled by the utxo operations. if operationType == lib.DbOperationTypeInsert { return nil @@ -70,20 +115,74 @@ func bulkInsertBlockEntry(entries []*lib.StateChangeEntry, db *bun.DB, operation // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueBlocks := consumer.UniqueEntries(entries) + + // We need to check if this block is replacing an existing block at the same height. + // If it is, we need to delete the existing block and all transactions associated with it. + // Get all block with matching heights and different hashes. + heights := make([]uint64, len(entries)) + hashes := make([]string, len(entries)) + for ii, entry := range uniqueBlocks { + heights[ii] = entry.Encoder.(*lib.MsgDeSoBlock).Header.Height + hash, err := entry.Encoder.(*lib.MsgDeSoBlock).Hash() + if err != nil { + return errors.Wrapf(err, "entries.bulkInsertBlockEntry: Error getting block hash") + } + hashes[ii] = hex.EncodeToString(hash[:]) + } + blocks := []*PGBlockEntry{} + err := db.NewSelect(). + Model(&blocks). + Where("height IN (?)", bun.In(heights)). + Where("block_hash NOT IN (?)", bun.In(hashes)). + Scan(context.Background()) + if err != nil { + return errors.Wrapf(err, "entries.bulkInsertBlockEntry: Error getting blocks") + } + // If we have blocks at the same height, delete them and their transactions. + if len(blocks) > 0 { + keysToDelete := make([][]byte, len(blocks)) + for ii, block := range blocks { + keysToDelete[ii] = block.BadgerKey + } + if err = bulkDeleteBlockEntriesFromKeysToDelete(db, keysToDelete); err != nil { + return errors.Wrapf(err, "entries.bulkInsertBlockEntry: Error deleting blocks") + } + } + // Create a new array to hold the bun struct. pgBlockEntrySlice := make([]*PGBlockEntry, 0) pgTransactionEntrySlice := make([]*PGTransactionEntry, 0) + pgBlockSignersEntrySlice := make([]*PGBlockSigner, 0) for _, entry := range uniqueBlocks { block := entry.Encoder.(*lib.MsgDeSoBlock) - blockEntry := BlockEncoderToPGStruct(block, entry.KeyBytes) + blockEntry, blockSigners := BlockEncoderToPGStruct(block, entry.KeyBytes, params) pgBlockEntrySlice = append(pgBlockEntrySlice, blockEntry) + pgBlockSignersEntrySlice = append(pgBlockSignersEntrySlice, blockSigners...) for jj, transaction := range block.Txns { - pgTransactionEntry, err := TransactionEncoderToPGStruct(transaction, uint64(jj), blockEntry.BlockHash, blockEntry.Height, blockEntry.Timestamp, params) + indexInBlock := uint64(jj) + pgTransactionEntry, err := TransactionEncoderToPGStruct( + transaction, + &indexInBlock, + blockEntry.BlockHash, + blockEntry.Height, + blockEntry.Timestamp, + nil, + nil, + params, + ) if err != nil { return errors.Wrapf(err, "entries.bulkInsertBlockEntry: Problem converting transaction to PG struct") } pgTransactionEntrySlice = append(pgTransactionEntrySlice, pgTransactionEntry) + if transaction.TxnMeta.GetTxnType() != lib.TxnTypeAtomicTxnsWrapper { + continue + } + innerTxns, err := parseInnerTxnsFromAtomicTxn(pgTransactionEntry, params) + if err != nil { + return errors.Wrapf(err, "entries.bulkInsertBlockEntry: Problem parsing inner txns from atomic txn") + } + pgTransactionEntrySlice = append(pgTransactionEntrySlice, innerTxns...) } } @@ -97,22 +196,40 @@ func bulkInsertBlockEntry(entries []*lib.StateChangeEntry, db *bun.DB, operation return errors.Wrapf(err, "entries.bulkInsertBlock: Error inserting entries") } - err := bulkInsertTransactionEntry(pgTransactionEntrySlice, db, operationType) - if err != nil { + if err = bulkInsertTransactionEntry(pgTransactionEntrySlice, db, operationType); err != nil { return errors.Wrapf(err, "entries.bulkInsertBlock: Error inserting transaction entries") } + if len(pgBlockSignersEntrySlice) > 0 { + // Execute the insert query. + query := db.NewInsert().Model(&pgBlockSignersEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (block_hash, signer_index) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertBlockEntry: Error inserting block signers") + } + } + return nil } // bulkDeleteBlockEntry deletes a batch of block entries from the database. -func bulkDeleteBlockEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteBlockEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Transform the entries into a list of keys to delete. keysToDelete := consumer.KeysToDelete(uniqueEntries) + return bulkDeleteBlockEntriesFromKeysToDelete(db, keysToDelete) +} + +// bulkDeleteBlockEntriesFromKeysToDelete deletes a batch of block entries from the database. +// It also deletes any transactions and utxo operations associated with the block. +func bulkDeleteBlockEntriesFromKeysToDelete(db bun.IDB, keysToDelete [][]byte) error { // Execute the delete query on the blocks table. if _, err := db.NewDelete(). Model(&PGBlockEntry{}). @@ -121,7 +238,6 @@ func bulkDeleteBlockEntry(entries []*lib.StateChangeEntry, db *bun.DB, operation Exec(context.Background()); err != nil { return errors.Wrapf(err, "entries.bulkDeleteBlockEntry: Error deleting entries") } - // Get block hashes from keys to delete. blockHashHexesToDelete := make([]string, len(keysToDelete)) for ii, keyToDelete := range keysToDelete { @@ -130,12 +246,41 @@ func bulkDeleteBlockEntry(entries []*lib.StateChangeEntry, db *bun.DB, operation // Delete any transactions associated with the block. if _, err := db.NewDelete(). - Model(&PGBlockEntry{}). + Model(&PGTransactionEntry{}). Where("block_hash IN (?)", bun.In(blockHashHexesToDelete)). Returning(""). Exec(context.Background()); err != nil { return errors.Wrapf(err, "entries.bulkDeleteBlockEntry: Error deleting transaction entries") } + // Delete any utxo operations associated with the block. + if _, err := db.NewDelete(). + Model(&PGUtxoOperationEntry{}). + Where("block_hash IN (?)", bun.In(blockHashHexesToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteBlockEntry: Error deleting utxo operation entries") + } + + // Delete any signers associated with the block. + if _, err := db.NewDelete(). + Model(&PGBlockSigner{}). + Where("block_hash IN (?)", bun.In(blockHashHexesToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteBlockEntry: Error deleting block signers") + } return nil } + +// golang interface types are stored as a tuple of (type, value). A single i==nil check is not enough to +// determine if a pointer that implements an interface is nil. This function checks if the interface is nil +// by checking if the pointer itself is nil. +func isInterfaceNil(i interface{}) bool { + if i == nil { + return true + } + + value := reflect.ValueOf(i) + return value.Kind() == reflect.Ptr && value.IsNil() +} diff --git a/entries/bls_pkid_pair.go b/entries/bls_pkid_pair.go new file mode 100644 index 0000000..4fb2d1a --- /dev/null +++ b/entries/bls_pkid_pair.go @@ -0,0 +1,186 @@ +package entries + +import ( + "context" + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" +) + +// TODO: when to use nullzero vs use_zero? +type BLSPublicKeyPKIDPairEntry struct { + PKID string `bun:",nullzero"` + BLSPublicKey string `bun:",nullzero"` + + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGBLSPkidPairEntry struct { + bun.BaseModel `bun:"table:bls_public_key_pkid_pair_entry"` + BLSPublicKeyPKIDPairEntry +} + +type BLSPublicKeyPKIDPairSnapshotEntry struct { + PKID string `bun:",nullzero"` + BLSPublicKey string `bun:",nullzero"` + SnapshotAtEpochNumber uint64 `pg:",use_zero"` + + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGBLSPublicKeyPKIDPairSnapshotEntry struct { + bun.BaseModel `bun:"table:bls_public_key_pkid_pair_snapshot_entry"` + BLSPublicKeyPKIDPairSnapshotEntry +} + +// Convert the BLSPublicKeyPKIDPairEntry DeSo encoder to the PGBLSPkidPairEntry struct used by bun. +func BLSPublicKeyPKIDPairEncoderToPGStruct(blsPublicKeyPKIDPairEntry *lib.BLSPublicKeyPKIDPairEntry, keyBytes []byte, params *lib.DeSoParams) BLSPublicKeyPKIDPairEntry { + pgBLSPkidPairEntry := BLSPublicKeyPKIDPairEntry{ + BadgerKey: keyBytes, + } + + if blsPublicKeyPKIDPairEntry.PKID != nil { + pgBLSPkidPairEntry.PKID = consumer.PublicKeyBytesToBase58Check((*blsPublicKeyPKIDPairEntry.PKID)[:], params) + } + + if !blsPublicKeyPKIDPairEntry.BLSPublicKey.IsEmpty() { + pgBLSPkidPairEntry.BLSPublicKey = blsPublicKeyPKIDPairEntry.BLSPublicKey.ToString() + } + + return pgBLSPkidPairEntry +} + +// BLSPublicKeyPKIDPairSnapshotEncoderToPGStruct converts the BLSPublicKeyPKIDPairSnapshotEntry DeSo encoder to the +// PGBLSPublicKeyPKIDPairSnapshotEntry struct used by bun. +func BLSPublicKeyPKIDPairSnapshotEncoderToPGStruct( + blsPublicKeyPKIDPairEntry *lib.BLSPublicKeyPKIDPairEntry, keyBytes []byte, params *lib.DeSoParams, +) BLSPublicKeyPKIDPairSnapshotEntry { + prefixRemovedKeyBytes := keyBytes[1:] + epochNumber := lib.DecodeUint64(prefixRemovedKeyBytes[:8]) + + pgBLSPkidPairSnapshotEntry := BLSPublicKeyPKIDPairSnapshotEntry{ + SnapshotAtEpochNumber: epochNumber, + BadgerKey: keyBytes, + } + + if blsPublicKeyPKIDPairEntry.PKID != nil { + pgBLSPkidPairSnapshotEntry.PKID = consumer.PublicKeyBytesToBase58Check((*blsPublicKeyPKIDPairEntry.PKID)[:], params) + } + + if !blsPublicKeyPKIDPairEntry.BLSPublicKey.IsEmpty() { + pgBLSPkidPairSnapshotEntry.BLSPublicKey = blsPublicKeyPKIDPairEntry.BLSPublicKey.ToString() + } + + return pgBLSPkidPairSnapshotEntry +} + +// BLSPublicKeyPKIDPairBatchOperation is the entry point for processing a batch of BLSPublicKeyPKIDPair entries. +// It determines the appropriate handler based on the operation type and executes it. +func BLSPublicKeyPKIDPairBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeleteBLSPkidPairEntry(entries, db, operationType) + } else { + err = bulkInsertBLSPkidPairEntry(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.StakeBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertBLSPkidPairEntry inserts a batch of stake entries into the database. +func bulkInsertBLSPkidPairEntry( + entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams, +) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + uniqueBLSPkidPairEntries := consumer.FilterEntriesByPrefix( + uniqueEntries, lib.Prefixes.PrefixValidatorBLSPublicKeyPKIDPairEntry) + uniqueBLSPkidPairSnapshotEntries := consumer.FilterEntriesByPrefix( + uniqueEntries, lib.Prefixes.PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry) + // Create a new array to hold the bun struct. + pgBLSPkidPairEntrySlice := make([]*PGBLSPkidPairEntry, len(uniqueBLSPkidPairEntries)) + pgBLSPkidPairSnapshotEntrySlice := make([]*PGBLSPublicKeyPKIDPairSnapshotEntry, len(uniqueBLSPkidPairSnapshotEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueBLSPkidPairEntries { + pgBLSPkidPairEntrySlice[ii] = &PGBLSPkidPairEntry{BLSPublicKeyPKIDPairEntry: BLSPublicKeyPKIDPairEncoderToPGStruct( + entry.Encoder.(*lib.BLSPublicKeyPKIDPairEntry), entry.KeyBytes, params)} + } + + for ii, entry := range uniqueBLSPkidPairSnapshotEntries { + pgBLSPkidPairSnapshotEntrySlice[ii] = &PGBLSPublicKeyPKIDPairSnapshotEntry{ + BLSPublicKeyPKIDPairSnapshotEntry: BLSPublicKeyPKIDPairSnapshotEncoderToPGStruct( + entry.Encoder.(*lib.BLSPublicKeyPKIDPairEntry), entry.KeyBytes, params)} + } + + if len(pgBLSPkidPairEntrySlice) > 0 { + // Execute the insert query. + query := db.NewInsert().Model(&pgBLSPkidPairEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertBLSPkidPairEntry: Error inserting entries") + } + } + + if len(pgBLSPkidPairSnapshotEntrySlice) > 0 { + // Execute query for snapshot entries. + query := db.NewInsert().Model(&pgBLSPkidPairSnapshotEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertBLSPkidPairEntry: Error inserting snapshot entries") + } + } + + return nil +} + +// bulkDeleteBLSPkidPairEntry deletes a batch of stake entries from the database. +func bulkDeleteBLSPkidPairEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + // Transform the entries into a list of keys to delete. + keysToDelete := consumer.KeysToDelete(uniqueEntries) + blsPKIDPairEntryKeysToDelete := consumer.FilterKeysByPrefix(keysToDelete, + lib.Prefixes.PrefixValidatorBLSPublicKeyPKIDPairEntry) + blsPKIDPairSnapshotEntryKeysToDelete := consumer.FilterKeysByPrefix(keysToDelete, + lib.Prefixes.PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry) + + // Execute the delete query. + if len(blsPKIDPairEntryKeysToDelete) > 0 { + if _, err := db.NewDelete(). + Model(&PGBLSPkidPairEntry{}). + Where("badger_key IN (?)", bun.In(blsPKIDPairEntryKeysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteBLSPkidPairEntry: Error deleting entries") + } + } + + // Execute the delete query for snapshot entries. + if len(blsPKIDPairSnapshotEntryKeysToDelete) > 0 { + if _, err := db.NewDelete(). + Model(&PGBLSPublicKeyPKIDPairSnapshotEntry{}). + Where("badger_key IN (?)", bun.In(blsPKIDPairSnapshotEntryKeysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteBLSPkidPairEntry: Error deleting snapshot entries") + } + } + + return nil +} diff --git a/entries/dao_coin_limit_order.go b/entries/dao_coin_limit_order.go index eb77740..7d9902b 100644 --- a/entries/dao_coin_limit_order.go +++ b/entries/dao_coin_limit_order.go @@ -19,7 +19,7 @@ type DaoCoinLimitOrderEntry struct { OperationType uint8 `bun:",nullzero"` FillType uint8 `bun:",nullzero"` BlockHeight uint32 `bun:",nullzero"` - IsDaoCoinConst bool `bun:",nullzero"` + IsDaoCoinConst bool BadgerKey []byte `pg:",pk,use_zero"` } @@ -35,8 +35,8 @@ func DaoCoinLimitOrderEncoderToPGStruct(daoCoinLimitOrder *lib.DAOCoinLimitOrder TransactorPkid: consumer.PublicKeyBytesToBase58Check(daoCoinLimitOrder.TransactorPKID[:], params), BuyingDaoCoinCreatorPkid: consumer.PublicKeyBytesToBase58Check(daoCoinLimitOrder.BuyingDAOCoinCreatorPKID[:], params), SellingDaoCoinCreatorPkid: consumer.PublicKeyBytesToBase58Check(daoCoinLimitOrder.SellingDAOCoinCreatorPKID[:], params), - ScaledExchangeRateCoinsToSellPerCoinToBuyHex: daoCoinLimitOrder.ScaledExchangeRateCoinsToSellPerCoinToBuy.String(), - QuantityToFillInBaseUnitsHex: daoCoinLimitOrder.QuantityToFillInBaseUnits.String(), + ScaledExchangeRateCoinsToSellPerCoinToBuyHex: daoCoinLimitOrder.ScaledExchangeRateCoinsToSellPerCoinToBuy.Hex(), + QuantityToFillInBaseUnitsHex: daoCoinLimitOrder.QuantityToFillInBaseUnits.Hex(), OperationType: uint8(daoCoinLimitOrder.OperationType), FillType: uint8(daoCoinLimitOrder.FillType), BlockHeight: daoCoinLimitOrder.BlockHeight, @@ -48,7 +48,7 @@ func DaoCoinLimitOrderEncoderToPGStruct(daoCoinLimitOrder *lib.DAOCoinLimitOrder // DaoCoinLimitOrderBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func DaoCoinLimitOrderBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func DaoCoinLimitOrderBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -65,7 +65,7 @@ func DaoCoinLimitOrderBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB } // bulkInsertDaoCoinLimitOrderEntry inserts a batch of post_association entries into the database. -func bulkInsertDaoCoinLimitOrderEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertDaoCoinLimitOrderEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -89,7 +89,7 @@ func bulkInsertDaoCoinLimitOrderEntry(entries []*lib.StateChangeEntry, db *bun.D } // bulkDeletePostEntry deletes a batch of post_association entries from the database. -func bulkDeleteDaoCoinLimitOrderEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteDaoCoinLimitOrderEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/derived_key.go b/entries/derived_key.go index b14a7f7..2af1204 100644 --- a/entries/derived_key.go +++ b/entries/derived_key.go @@ -56,7 +56,7 @@ func DerivedKeyEncoderToPGStruct(derivedKeyEntry *lib.DerivedKeyEntry, keyBytes // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func DerivedKeyBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func DerivedKeyBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -73,7 +73,7 @@ func DerivedKeyBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, param } // bulkInsertDerivedKeyEntry inserts a batch of derived_key entries into the database. -func bulkInsertDerivedKeyEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertDerivedKeyEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -101,7 +101,7 @@ func bulkInsertDerivedKeyEntry(entries []*lib.StateChangeEntry, db *bun.DB, oper } // bulkDeletePostEntry deletes a batch of derived_key entries from the database. -func bulkDeleteDerivedKeyEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteDerivedKeyEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/deso_balance.go b/entries/deso_balance.go index 3eebff2..2621a00 100644 --- a/entries/deso_balance.go +++ b/entries/deso_balance.go @@ -36,7 +36,7 @@ func DesoBalanceEncoderToPGStruct(desoBalanceEntry *lib.DeSoBalanceEntry, keyByt // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func DesoBalanceBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func DesoBalanceBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -53,7 +53,7 @@ func DesoBalanceBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, para } // bulkInsertDiamondEntry inserts a batch of diamond entries into the database. -func bulkInsertDesoBalanceEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertDesoBalanceEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -77,7 +77,7 @@ func bulkInsertDesoBalanceEntry(entries []*lib.StateChangeEntry, db *bun.DB, ope } // bulkDeletePostEntry deletes a batch of diamond entries from the database. -func bulkDeleteDesoBalanceEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteDesoBalanceEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/diamond.go b/entries/diamond.go index dd07bf3..15ac3f8 100644 --- a/entries/diamond.go +++ b/entries/diamond.go @@ -41,7 +41,7 @@ func DiamondEncoderToPGStruct(diamondEntry *lib.DiamondEntry, keyBytes []byte, p // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func DiamondBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func DiamondBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -58,7 +58,7 @@ func DiamondBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params * } // bulkInsertDiamondEntry inserts a batch of diamond entries into the database. -func bulkInsertDiamondEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertDiamondEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -82,7 +82,7 @@ func bulkInsertDiamondEntry(entries []*lib.StateChangeEntry, db *bun.DB, operati } // bulkDeletePostEntry deletes a batch of diamond entries from the database. -func bulkDeleteDiamondEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteDiamondEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/epoch.go b/entries/epoch.go new file mode 100644 index 0000000..25901dc --- /dev/null +++ b/entries/epoch.go @@ -0,0 +1,98 @@ +package entries + +import ( + "context" + + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" +) + +// TODO: when to use nullzero vs use_zero? +type EpochEntry struct { + EpochNumber uint64 + InitialBlockHeight uint64 + InitialView uint64 + FinalBlockHeight uint64 + InitialLeaderIndexOffset uint64 + CreatedAtBlockTimestampNanoSecs int64 + SnapshotAtEpochNumber uint64 +} + +type PGEpochEntry struct { + bun.BaseModel `bun:"table:epoch_entry"` + EpochEntry +} + +// TODO: Do I need this? +type PGEpochUtxoOps struct { + bun.BaseModel `bun:"table:epoch_entry_utxo_ops"` + EpochEntry + UtxoOperation +} + +// Convert the EpochEntry DeSo encoder to the PGEpochEntry struct used by bun. +func EpochEntryEncoderToPGStruct(epochEntry *lib.EpochEntry, keyBytes []byte, params *lib.DeSoParams) EpochEntry { + + var snapshotAtEpochNumber uint64 + // Epochs use data snapshotted from two epochs ago. Epochs 0 and 1 use data from epoch 0. + if epochEntry.EpochNumber >= 2 { + snapshotAtEpochNumber = epochEntry.EpochNumber - 2 + } + return EpochEntry{ + EpochNumber: epochEntry.EpochNumber, + InitialBlockHeight: epochEntry.InitialBlockHeight, + InitialView: epochEntry.InitialView, + FinalBlockHeight: epochEntry.FinalBlockHeight, + InitialLeaderIndexOffset: epochEntry.InitialLeaderIndexOffset, + CreatedAtBlockTimestampNanoSecs: epochEntry.CreatedAtBlockTimestampNanoSecs, + SnapshotAtEpochNumber: snapshotAtEpochNumber, + } +} + +// EpochEntryBatchOperation is the entry point for processing a batch of Epoch entries. +// It determines the appropriate handler based on the operation type and executes it. +func EpochEntryBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + // Core only tracks the current epoch entry and never deletes them. + // In order to track all historical epoch entries, we don't use the badger + // key to uniquely identify them, but rather the epoch number. + if operationType == lib.DbOperationTypeDelete { + return errors.Wrapf(err, "entries.EpochEntryBatchOperation: Delete operation type not supported") + } else { + err = bulkInsertEpochEntry(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.EpochEntryBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertEpochEntry inserts a batch of locked stake entries into the database. +func bulkInsertEpochEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGEpochEntry, len(uniqueEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueEntries { + pgEntrySlice[ii] = &PGEpochEntry{EpochEntry: EpochEntryEncoderToPGStruct(entry.Encoder.(*lib.EpochEntry), entry.KeyBytes, params)} + } + + // Execute the insert query. + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (epoch_number) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertEpochEntry: Error inserting entries") + } + return nil +} diff --git a/entries/follow.go b/entries/follow.go index e808d9b..6179353 100644 --- a/entries/follow.go +++ b/entries/follow.go @@ -36,7 +36,7 @@ func FollowEncoderToPGStruct(followEntry *lib.FollowEntry, keyBytes []byte, para // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func FollowBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func FollowBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -53,7 +53,7 @@ func FollowBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *l } // bulkInsertFollowEntry inserts a batch of follow entries into the database. -func bulkInsertFollowEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertFollowEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -78,7 +78,7 @@ func bulkInsertFollowEntry(entries []*lib.StateChangeEntry, db *bun.DB, operatio } // bulkDeletePostEntry deletes a batch of follow entries from the database. -func bulkDeleteFollowEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteFollowEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/global_params.go b/entries/global_params.go new file mode 100644 index 0000000..fc64f01 --- /dev/null +++ b/entries/global_params.go @@ -0,0 +1,142 @@ +package entries + +import ( + "context" + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" +) + +type GlobalParamsEntry struct { + USDCentsPerBitcoin uint64 + CreateProfileFeeNanos uint64 + CreateNFTFeeNanos uint64 + MaxCopiesPerNFT uint64 + MinimumNetworkFeeNanosPerKB uint64 + MaxNonceExpirationBlockHeightOffset uint64 + StakeLockupEpochDuration uint64 + ValidatorJailEpochDuration uint64 + LeaderScheduleMaxNumValidators uint64 + ValidatorSetMaxNumValidators uint64 + StakingRewardsMaxNumStakes uint64 + StakingRewardsAPYBasisPoints uint64 + EpochDurationNumBlocks uint64 + JailInactiveValidatorGracePeriodEpochs uint64 + MaximumVestedIntersectionsPerLockupTransaction int + FeeBucketGrowthRateBasisPoints uint64 + BlockTimestampDriftNanoSecs int64 + MempoolMaxSizeBytes uint64 + MempoolFeeEstimatorNumMempoolBlocks uint64 + MempoolFeeEstimatorNumPastBlocks uint64 + MaxBlockSizeBytesPoS uint64 + SoftMaxBlockSizeBytesPoS uint64 + MaxTxnSizeBytesPoS uint64 + BlockProductionIntervalMillisecondsPoS uint64 + TimeoutIntervalMillisecondsPoS uint64 + + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGGlobalParamsEntry struct { + bun.BaseModel `bun:"table:global_params_entry"` + GlobalParamsEntry +} + +// Convert the GlobalParams DeSo encoder to the PGGlobalParamsEntry struct used by bun. +func GlobalParamsEncoderToPGStruct(globalParamsEntry *lib.GlobalParamsEntry, keyBytes []byte, params *lib.DeSoParams) GlobalParamsEntry { + mergedGlobalParamsEntry := lib.MergeGlobalParamEntryDefaults(globalParamsEntry, params) + + return GlobalParamsEntry{ + USDCentsPerBitcoin: mergedGlobalParamsEntry.USDCentsPerBitcoin, + CreateProfileFeeNanos: mergedGlobalParamsEntry.CreateProfileFeeNanos, + CreateNFTFeeNanos: mergedGlobalParamsEntry.CreateNFTFeeNanos, + MaxCopiesPerNFT: mergedGlobalParamsEntry.MaxCopiesPerNFT, + MinimumNetworkFeeNanosPerKB: mergedGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, + MaxNonceExpirationBlockHeightOffset: mergedGlobalParamsEntry.MaxNonceExpirationBlockHeightOffset, + StakeLockupEpochDuration: mergedGlobalParamsEntry.StakeLockupEpochDuration, + ValidatorJailEpochDuration: mergedGlobalParamsEntry.ValidatorJailEpochDuration, + LeaderScheduleMaxNumValidators: mergedGlobalParamsEntry.LeaderScheduleMaxNumValidators, + ValidatorSetMaxNumValidators: mergedGlobalParamsEntry.ValidatorSetMaxNumValidators, + StakingRewardsMaxNumStakes: mergedGlobalParamsEntry.StakingRewardsMaxNumStakes, + StakingRewardsAPYBasisPoints: mergedGlobalParamsEntry.StakingRewardsAPYBasisPoints, + EpochDurationNumBlocks: mergedGlobalParamsEntry.EpochDurationNumBlocks, + JailInactiveValidatorGracePeriodEpochs: mergedGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, + MaximumVestedIntersectionsPerLockupTransaction: mergedGlobalParamsEntry.MaximumVestedIntersectionsPerLockupTransaction, + FeeBucketGrowthRateBasisPoints: mergedGlobalParamsEntry.FeeBucketGrowthRateBasisPoints, + BlockTimestampDriftNanoSecs: mergedGlobalParamsEntry.BlockTimestampDriftNanoSecs, + MempoolMaxSizeBytes: mergedGlobalParamsEntry.MempoolMaxSizeBytes, + MempoolFeeEstimatorNumMempoolBlocks: mergedGlobalParamsEntry.MempoolFeeEstimatorNumMempoolBlocks, + MempoolFeeEstimatorNumPastBlocks: mergedGlobalParamsEntry.MempoolFeeEstimatorNumPastBlocks, + MaxBlockSizeBytesPoS: mergedGlobalParamsEntry.MaxBlockSizeBytesPoS, + SoftMaxBlockSizeBytesPoS: mergedGlobalParamsEntry.SoftMaxBlockSizeBytesPoS, + MaxTxnSizeBytesPoS: mergedGlobalParamsEntry.MaxTxnSizeBytesPoS, + BlockProductionIntervalMillisecondsPoS: mergedGlobalParamsEntry.BlockProductionIntervalMillisecondsPoS, + TimeoutIntervalMillisecondsPoS: mergedGlobalParamsEntry.TimeoutIntervalMillisecondsPoS, + + BadgerKey: keyBytes, + } +} + +// GlobalParamsBatchOperation is the entry point for processing a batch of global params entries. +// It determines the appropriate handler based on the operation type and executes it. +func GlobalParamsBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeleteGlobalParamsEntry(entries, db, operationType) + } else { + err = bulkInsertGlobalParamsEntry(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.PostBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertGlobalParamsEntry inserts a batch of global_params entries into the database. +func bulkInsertGlobalParamsEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGGlobalParamsEntry, len(uniqueEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueEntries { + pgEntrySlice[ii] = &PGGlobalParamsEntry{GlobalParamsEntry: GlobalParamsEncoderToPGStruct(entry.Encoder.(*lib.GlobalParamsEntry), entry.KeyBytes, params)} + } + + // Execute the insert query. + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertGlobalParamsEntry: Error inserting entries") + } + return nil +} + +// bulkDeletePostEntry deletes a batch of global_params entries from the database. +func bulkDeleteGlobalParamsEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + // Transform the entries into a list of keys to delete. + keysToDelete := consumer.KeysToDelete(uniqueEntries) + + // Execute the delete query. + if _, err := db.NewDelete(). + Model(&PGGlobalParamsEntry{}). + Where("badger_key IN (?)", bun.In(keysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteGlobalParamsEntry: Error deleting entries") + } + + return nil +} diff --git a/entries/helpers.go b/entries/helpers.go new file mode 100644 index 0000000..ea56c5f --- /dev/null +++ b/entries/helpers.go @@ -0,0 +1,44 @@ +package entries + +import ( + "fmt" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/uptrace/bun" +) + +// GetDbHandle returns the correct interface to use for database operations. +// If a transaction is open, it returns the transaction handle, otherwise it returns the db handle. +func GetDbHandle(tx *bun.Tx, db *bun.DB) bun.IDB { + if tx != nil { + return tx + } + return db +} + +// CreateSavepoint creates a savepoint in the current transaction. If no transaction is open, it returns an empty string. +// The randomly generated savepoint name is returned if the savepoint is created successfully. +func CreateSavepoint(tx *bun.Tx) (string, error) { + if tx == nil { + return "", nil + } + savepointName := uuid.New().String() + + _, err := tx.Exec(fmt.Sprintf("SAVEPOINT %s", savepointName)) + if err != nil { + return "", errors.Wrapf(err, "PostgresDataHandler.CreateSavepoint: Error creating savepoint") + } + + return savepointName, nil +} + +func RollbackToSavepoint(tx *bun.Tx, savepointName string) error { + if tx == nil || savepointName == "" { + return nil + } + _, err := tx.Exec(fmt.Sprintf("ROLLBACK TO SAVEPOINT %s", savepointName)) + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.RollbackToSavepoint: Error reverting to savepoint") + } + return nil +} diff --git a/entries/jailed_history.go b/entries/jailed_history.go new file mode 100644 index 0000000..32a342b --- /dev/null +++ b/entries/jailed_history.go @@ -0,0 +1,112 @@ +package entries + +import ( + "context" + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" +) + +type JailedHistoryEntry struct { + ValidatorPKID string `bun:",pk,nullzero"` + JailedAtEpochNumber uint64 `bun:",pk"` + UnjailedAtEpochNumber uint64 `bun:",pk"` +} + +type PGJailedHistoryEvent struct { + bun.BaseModel `bun:"table:jailed_history_event"` + JailedHistoryEntry +} + +// Convert the UnjailValidatorStateChangeMetadata DeSo encoder to the JailedHistoryEntry struct used by bun. +func UnjailValidatorStateChangeMetadataEncoderToPGStruct( + unjailValidatorStateChangeMetadata *lib.UnjailValidatorStateChangeMetadata, + params *lib.DeSoParams, +) JailedHistoryEntry { + pgJailedHistoryEntry := JailedHistoryEntry{ + JailedAtEpochNumber: unjailValidatorStateChangeMetadata.JailedAtEpochNumber, + UnjailedAtEpochNumber: unjailValidatorStateChangeMetadata.UnjailedAtEpochNumber, + } + + if unjailValidatorStateChangeMetadata.ValidatorPKID != nil { + pgJailedHistoryEntry.ValidatorPKID = consumer.PublicKeyBytesToBase58Check( + (*unjailValidatorStateChangeMetadata.ValidatorPKID)[:], params) + } + + return pgJailedHistoryEntry +} + +// ValidatorBatchOperation is the entry point for processing a batch of Validator entries. +// It determines the appropriate handler based on the operation type and executes it. +func JailedHistoryEventBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeleteValidatorEntry(entries, db, operationType) + } else { + err = bulkInsertValidatorEntry(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.ValidatorBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertJailedHistoryEvent inserts a batch of jailed history events into the database. +func bulkInsertJailedHistoryEvent( + entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams, +) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGJailedHistoryEvent, len(uniqueEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueEntries { + pgEntrySlice[ii] = &PGJailedHistoryEvent{ + JailedHistoryEntry: UnjailValidatorStateChangeMetadataEncoderToPGStruct( + entry.Encoder.(*lib.UnjailValidatorStateChangeMetadata), params, + )} + } + + // Execute the insert query. + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (validator_pkid, jailed_at_epoch_number, unjailed_at_epoch_number) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertJailedHistoryEvent: Error inserting entries") + } + return nil +} + +// bulkDeleteJailedHistoryEvent deletes a batch of validator entries from the database. +func bulkDeleteJailedHistoryEvent(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + // Transform entries into PGJailedHistoryEvent. + pgEntrySlice := make([]*PGJailedHistoryEvent, len(uniqueEntries)) + for ii, entry := range uniqueEntries { + pgEntrySlice[ii] = &PGJailedHistoryEvent{ + JailedHistoryEntry: UnjailValidatorStateChangeMetadataEncoderToPGStruct( + entry.Encoder.(*lib.UnjailValidatorStateChangeMetadata), nil, + )} + } + + // Execute the delete query. + if _, err := db.NewDelete(). + Model(uniqueEntries). + WherePK(). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteValidatorEntry: Error deleting entries") + } + + return nil +} diff --git a/entries/like.go b/entries/like.go index 8c6d890..18ba75f 100644 --- a/entries/like.go +++ b/entries/like.go @@ -37,7 +37,7 @@ func LikeEncoderToPGStruct(likeEntry *lib.LikeEntry, keyBytes []byte, params *li // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func LikeBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func LikeBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -54,7 +54,7 @@ func LikeBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib } // bulkInsertLikeEntry inserts a batch of like entries into the database. -func bulkInsertLikeEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertLikeEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -79,7 +79,7 @@ func bulkInsertLikeEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationT } // bulkDeletePostEntry deletes a batch of like entries from the database. -func bulkDeleteLikeEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteLikeEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/locked_stake.go b/entries/locked_stake.go new file mode 100644 index 0000000..bcb32f3 --- /dev/null +++ b/entries/locked_stake.go @@ -0,0 +1,117 @@ +package entries + +import ( + "context" + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" + "github.com/uptrace/bun/extra/bunbig" +) + +// TODO: when to use nullzero vs use_zero? +type LockedStakeEntry struct { + StakerPKID string `bun:",nullzero"` + ValidatorPKID string `bun:",nullzero"` + LockedAmountNanos *bunbig.Int `pg:",use_zero"` + LockedAtEpochNumber uint64 + + ExtraData map[string]string `bun:"type:jsonb"` + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGLockedStakeEntry struct { + bun.BaseModel `bun:"table:locked_stake_entry"` + LockedStakeEntry +} + +// TODO: Do I need this? +type PGLockedStakeEntryUtxoOps struct { + bun.BaseModel `bun:"table:locked_stake_entry_utxo_ops"` + LockedStakeEntry + UtxoOperation +} + +// Convert the LockedStakeEntry DeSo encoder to the PGLockedStakeEntry struct used by bun. +func LockedStakeEncoderToPGStruct(lockedStakeEntry *lib.LockedStakeEntry, keyBytes []byte, params *lib.DeSoParams) LockedStakeEntry { + pgLockedStakeEntry := LockedStakeEntry{ + ExtraData: consumer.ExtraDataBytesToString(lockedStakeEntry.ExtraData), + BadgerKey: keyBytes, + } + + if lockedStakeEntry.StakerPKID != nil { + pgLockedStakeEntry.StakerPKID = consumer.PublicKeyBytesToBase58Check((*lockedStakeEntry.StakerPKID)[:], params) + } + + if lockedStakeEntry.ValidatorPKID != nil { + pgLockedStakeEntry.ValidatorPKID = consumer.PublicKeyBytesToBase58Check((*lockedStakeEntry.ValidatorPKID)[:], params) + } + + pgLockedStakeEntry.LockedAtEpochNumber = lockedStakeEntry.LockedAtEpochNumber + pgLockedStakeEntry.LockedAmountNanos = bunbig.FromMathBig(lockedStakeEntry.LockedAmountNanos.ToBig()) + + return pgLockedStakeEntry +} + +// LockedStakeBatchOperation is the entry point for processing a batch of LockedStake entries. +// It determines the appropriate handler based on the operation type and executes it. +func LockedStakeBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeleteLockedStakeEntry(entries, db, operationType) + } else { + err = bulkInsertLockedStakeEntry(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.LockedStakeBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertLockedStakeEntry inserts a batch of locked stake entries into the database. +func bulkInsertLockedStakeEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGLockedStakeEntry, len(uniqueEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueEntries { + pgEntrySlice[ii] = &PGLockedStakeEntry{LockedStakeEntry: LockedStakeEncoderToPGStruct(entry.Encoder.(*lib.LockedStakeEntry), entry.KeyBytes, params)} + } + + // Execute the insert query. + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertLockedStakeEntry: Error inserting entries") + } + return nil +} + +// bulkDeleteLockedStakeEntry deletes a batch of locked stake entries from the database. +func bulkDeleteLockedStakeEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + // Transform the entries into a list of keys to delete. + keysToDelete := consumer.KeysToDelete(uniqueEntries) + + // Execute the delete query. + if _, err := db.NewDelete(). + Model(&PGLockedStakeEntry{}). + Where("badger_key IN (?)", bun.In(keysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteLockedStakeEntry: Error deleting entries") + } + + return nil +} diff --git a/entries/lockup.go b/entries/lockup.go new file mode 100644 index 0000000..16c5ee5 --- /dev/null +++ b/entries/lockup.go @@ -0,0 +1,117 @@ +package entries + +import ( + "context" + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" + "github.com/uptrace/bun/extra/bunbig" +) + +// TODO: when to use nullzero vs use_zero? +type LockedBalanceEntry struct { + HodlerPKID string `bun:",nullzero"` + ProfilePKID string `bun:",nullzero"` + UnlockTimestampNanoSecs int64 + VestingEndTimestampNanoSecs int64 + BalanceBaseUnits *bunbig.Int `pg:",use_zero"` + + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGLockedBalanceEntry struct { + bun.BaseModel `bun:"table:locked_balance_entry"` + LockedBalanceEntry +} + +// TODO: Do I need this? +type PGLockedBalanceEntryUtxoOps struct { + bun.BaseModel `bun:"table:locked_balance_entry_utxo_ops"` + LockedBalanceEntry + UtxoOperation +} + +// Convert the LockedBalanceEntry DeSo encoder to the PGLockedBalnceEntry struct used by bun. +func LockedBalanceEntryEncoderToPGStruct(lockedBalanceEntry *lib.LockedBalanceEntry, keyBytes []byte, params *lib.DeSoParams) LockedBalanceEntry { + pgLockedBalanceEntry := LockedBalanceEntry{ + BadgerKey: keyBytes, + } + + if lockedBalanceEntry.HODLerPKID != nil { + pgLockedBalanceEntry.HodlerPKID = consumer.PublicKeyBytesToBase58Check((*lockedBalanceEntry.HODLerPKID)[:], params) + } + + if lockedBalanceEntry.ProfilePKID != nil { + pgLockedBalanceEntry.ProfilePKID = consumer.PublicKeyBytesToBase58Check((*lockedBalanceEntry.ProfilePKID)[:], params) + } + + pgLockedBalanceEntry.UnlockTimestampNanoSecs = lockedBalanceEntry.UnlockTimestampNanoSecs + pgLockedBalanceEntry.VestingEndTimestampNanoSecs = lockedBalanceEntry.VestingEndTimestampNanoSecs + pgLockedBalanceEntry.BalanceBaseUnits = bunbig.FromMathBig(lockedBalanceEntry.BalanceBaseUnits.ToBig()) + + return pgLockedBalanceEntry +} + +// LockedBalanceEntryBatchOperation is the entry point for processing a batch of LockedBalance entries. +// It determines the appropriate handler based on the operation type and executes it. +func LockedBalanceEntryBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeleteLockedBalanceEntry(entries, db, operationType) + } else { + err = bulkInsertLockedBalanceEntry(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.LockedBalanceEntryBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertLockedBalanceEntry inserts a batch of locked stake entries into the database. +func bulkInsertLockedBalanceEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGLockedBalanceEntry, len(uniqueEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueEntries { + pgEntrySlice[ii] = &PGLockedBalanceEntry{LockedBalanceEntry: LockedBalanceEntryEncoderToPGStruct(entry.Encoder.(*lib.LockedBalanceEntry), entry.KeyBytes, params)} + } + + // Execute the insert query. + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertLockedBalanceEntry: Error inserting entries") + } + return nil +} + +// bulkDeleteLockedBalanceEntry deletes a batch of locked stake entries from the database. +func bulkDeleteLockedBalanceEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + // Transform the entries into a list of keys to delete. + keysToDelete := consumer.KeysToDelete(uniqueEntries) + + // Execute the delete query. + if _, err := db.NewDelete(). + Model(&PGLockedBalanceEntry{}). + Where("badger_key IN (?)", bun.In(keysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteLockedBalanceEntry: Error deleting entries") + } + + return nil +} diff --git a/entries/message.go b/entries/message.go index 10ebf77..1a29d2a 100644 --- a/entries/message.go +++ b/entries/message.go @@ -55,7 +55,7 @@ func MessageEncoderToPGStruct(messageEntry *lib.MessageEntry, keyBytes []byte, p // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func MessageBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func MessageBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -72,7 +72,7 @@ func MessageBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params * } // bulkInsertMessageEntry inserts a batch of message entries into the database. -func bulkInsertMessageEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertMessageEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -96,7 +96,7 @@ func bulkInsertMessageEntry(entries []*lib.StateChangeEntry, db *bun.DB, operati } // bulkDeletePostEntry deletes a batch of message entries from the database. -func bulkDeleteMessageEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteMessageEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/new_message.go b/entries/new_message.go index eecba19..96a856c 100644 --- a/entries/new_message.go +++ b/entries/new_message.go @@ -3,6 +3,7 @@ package entries import ( "bytes" "context" + "encoding/hex" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/state-consumer/consumer" "github.com/pkg/errors" @@ -11,14 +12,14 @@ import ( ) type NewMessageEntry struct { - SenderAccessGroupOwnerPublicKey string `bun:",nullzero"` - SenderAccessGroupKeyName string `bun:",nullzero"` - SenderAccessGroupPublicKey string `bun:",nullzero"` - RecipientAccessGroupOwnerPublicKey string `bun:",nullzero"` - RecipientAccessGroupKeyName string `bun:",nullzero"` - RecipientAccessGroupPublicKey string `bun:",nullzero"` - EncryptedText string `pg:",use_zero"` - IsGroupChatMessage bool `bun:",nullzero"` + SenderAccessGroupOwnerPublicKey string `bun:",nullzero"` + SenderAccessGroupKeyName string `bun:",nullzero"` + SenderAccessGroupPublicKey string `bun:",nullzero"` + RecipientAccessGroupOwnerPublicKey string `bun:",nullzero"` + RecipientAccessGroupKeyName string `bun:",nullzero"` + RecipientAccessGroupPublicKey string `bun:",nullzero"` + EncryptedText string `pg:",use_zero"` + IsGroupChatMessage bool Timestamp time.Time `pg:",use_zero"` ExtraData map[string]string `bun:"type:jsonb"` @@ -45,7 +46,7 @@ func NewMessageEncoderToPGStruct(newMessageEntry *lib.NewMessageEntry, keyBytes } pgNewMessageEntry := NewMessageEntry{ - EncryptedText: string(newMessageEntry.EncryptedText[:]), + EncryptedText: hex.EncodeToString(newMessageEntry.EncryptedText[:]), Timestamp: consumer.UnixNanoToTime(newMessageEntry.TimestampNanos), ExtraData: consumer.ExtraDataBytesToString(newMessageEntry.ExtraData), IsGroupChatMessage: isGroupChatMessage, @@ -81,7 +82,7 @@ func NewMessageEncoderToPGStruct(newMessageEntry *lib.NewMessageEntry, keyBytes // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func NewMessageBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func NewMessageBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -98,7 +99,7 @@ func NewMessageBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, param } // bulkInsertNewMessageEntry inserts a batch of new_message entries into the database. -func bulkInsertNewMessageEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertNewMessageEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -123,7 +124,7 @@ func bulkInsertNewMessageEntry(entries []*lib.StateChangeEntry, db *bun.DB, oper } // bulkDeletePostEntry deletes a batch of new_message entries from the database. -func bulkDeleteNewMessageEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteNewMessageEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/nft.go b/entries/nft.go index 02647f2..59a68c8 100644 --- a/entries/nft.go +++ b/entries/nft.go @@ -62,7 +62,7 @@ func NftEncoderToPGStruct(nftEntry *lib.NFTEntry, keyBytes []byte, params *lib.D // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func NftBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func NftBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -79,7 +79,7 @@ func NftBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib. } // bulkInsertNftEntry inserts a batch of nft entries into the database. -func bulkInsertNftEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertNftEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -104,7 +104,7 @@ func bulkInsertNftEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationTy } // bulkDeletePostEntry deletes a batch of nft entries from the database. -func bulkDeleteNftEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteNftEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/nft_bid.go b/entries/nft_bid.go index 9ea5fef..9648305 100644 --- a/entries/nft_bid.go +++ b/entries/nft_bid.go @@ -47,7 +47,7 @@ func NftBidEncoderToPGStruct(nftBidEntry *lib.NFTBidEntry, keyBytes []byte, para // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func NftBidBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func NftBidBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -64,7 +64,7 @@ func NftBidBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *l } // bulkInsertNftBidEntry inserts a batch of nft_bid entries into the database. -func bulkInsertNftBidEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertNftBidEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -88,7 +88,7 @@ func bulkInsertNftBidEntry(entries []*lib.StateChangeEntry, db *bun.DB, operatio } // bulkDeletePostEntry deletes a batch of nft_bid entries from the database. -func bulkDeleteNftBidEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteNftBidEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/pkid.go b/entries/pkid.go index a085aa2..f7058d0 100644 --- a/entries/pkid.go +++ b/entries/pkid.go @@ -4,6 +4,7 @@ import ( "context" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/state-consumer/consumer" + "github.com/golang/glog" "github.com/pkg/errors" "github.com/uptrace/bun" ) @@ -25,8 +26,20 @@ type PGPkidEntryUtxoOps struct { UtxoOperation } +type LeaderScheduleEntry struct { + SnapshotAtEpochNumber uint64 `pg:",use_zero"` + LeaderIndex uint16 `pg:",use_zero"` + ValidatorPKID string `pg:",use_zero"` + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGLeaderScheduleEntry struct { + bun.BaseModel `bun:"table:leader_schedule_entry"` + LeaderScheduleEntry +} + // Convert the Diamond DeSo encoder to the PG struct used by bun. -func PkidEncoderToPGStruct(pkidEntry *lib.PKIDEntry, keyBytes []byte, params *lib.DeSoParams) PkidEntry { +func PkidEntryEncoderToPGStruct(pkidEntry *lib.PKIDEntry, keyBytes []byte, params *lib.DeSoParams) PkidEntry { return PkidEntry{ Pkid: consumer.PublicKeyBytesToBase58Check(pkidEntry.PKID[:], params), PublicKey: consumer.PublicKeyBytesToBase58Check(pkidEntry.PublicKey[:], params), @@ -34,9 +47,27 @@ func PkidEncoderToPGStruct(pkidEntry *lib.PKIDEntry, keyBytes []byte, params *li } } +// Convert the leader schedule entry to the PG struct used by bun. +func LeaderScheduleEncoderToPGStruct(validatorPKID *lib.PKID, keyBytes []byte, params *lib.DeSoParams, +) *LeaderScheduleEntry { + prefixRemovedKeyBytes := keyBytes[1:] + if len(prefixRemovedKeyBytes) != 10 { + glog.Errorf("LeaderScheduleEncoderToPGStruct: Invalid key length: %d", len(prefixRemovedKeyBytes)) + return nil + } + epochNumber := lib.DecodeUint64(prefixRemovedKeyBytes[:8]) + leaderIndex := lib.DecodeUint16(prefixRemovedKeyBytes[8:10]) + return &LeaderScheduleEntry{ + ValidatorPKID: consumer.PublicKeyBytesToBase58Check(validatorPKID[:], params), + SnapshotAtEpochNumber: epochNumber, + LeaderIndex: leaderIndex, + BadgerKey: keyBytes, + } +} + // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func PkidBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func PkidEntryBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -53,7 +84,7 @@ func PkidBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib } // bulkInsertDiamondEntry inserts a batch of diamond entries into the database. -func bulkInsertPkidEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertPkidEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -61,7 +92,7 @@ func bulkInsertPkidEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationT // Loop through the entries and convert them to PGPostEntry. for ii, entry := range uniqueEntries { - pgEntrySlice[ii] = &PGPkidEntry{PkidEntry: PkidEncoderToPGStruct(entry.Encoder.(*lib.PKIDEntry), entry.KeyBytes, params)} + pgEntrySlice[ii] = &PGPkidEntry{PkidEntry: PkidEntryEncoderToPGStruct(entry.Encoder.(*lib.PKIDEntry), entry.KeyBytes, params)} } query := db.NewInsert().Model(&pgEntrySlice) @@ -77,7 +108,7 @@ func bulkInsertPkidEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationT } // bulkDeletePostEntry deletes a batch of diamond entries from the database. -func bulkDeletePkidEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeletePkidEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) @@ -95,3 +126,80 @@ func bulkDeletePkidEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationT return nil } + +func PkidBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeletePkid(entries, db, operationType) + } else { + err = bulkInsertPkid(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.PostBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertPkid inserts a batch of PKIDs into the database. +func bulkInsertPkid(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + uniqueLeaderScheduleEntries := consumer.FilterEntriesByPrefix( + uniqueEntries, lib.Prefixes.PrefixSnapshotLeaderSchedule) + // NOTE: if we need to support parsing other indexes for PKIDs beyond LeaderSchedule, + // we will need to filter the uniqueEntries by the appropriate prefix and then convert + // the entries to the appropriate PG struct. + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGLeaderScheduleEntry, len(uniqueLeaderScheduleEntries)) + + // Loop through the entries and convert them to PGPostEntry. + for ii, entry := range uniqueLeaderScheduleEntries { + leaderScheduleEntry := LeaderScheduleEncoderToPGStruct(entry.Encoder.(*lib.PKID), entry.KeyBytes, params) + if leaderScheduleEntry == nil { + glog.Errorf("bulkInsertPkid: Error converting LeaderScheduleEntry to PG struct") + continue + } + pgEntrySlice[ii] = &PGLeaderScheduleEntry{LeaderScheduleEntry: *leaderScheduleEntry} + } + + if len(pgEntrySlice) > 0 { + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertPkid: Error inserting entries") + } + } + + return nil +} + +// bulkDeletePKID deletes a batch of PKIDs from the database. +func bulkDeletePkid(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + // Transform the entries into a list of keys to delete. + keysToDelete := consumer.KeysToDelete(uniqueEntries) + leaderSchedKeysToDelete := consumer.FilterKeysByPrefix(keysToDelete, lib.Prefixes.PrefixSnapshotLeaderSchedule) + + if len(leaderSchedKeysToDelete) > 0 { + // Execute the delete query. + if _, err := db.NewDelete(). + Model(&PGLeaderScheduleEntry{}). + Where("badger_key IN (?)", bun.In(leaderSchedKeysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeletePkid: Error deleting entries") + } + } + + return nil +} diff --git a/entries/post.go b/entries/post.go index e64a816..51a6e56 100644 --- a/entries/post.go +++ b/entries/post.go @@ -60,7 +60,7 @@ func PostEntryEncoderToPGStruct(postEntry *lib.PostEntry, keyBytes []byte, param IsNFT: postEntry.IsNFT, NumNFTCopies: postEntry.NumNFTCopies, NumNFTCopiesForSale: postEntry.NumNFTCopiesForSale, - NumNFTCopiesBurned: postEntry.NumNFTCopiesBurned, + NumNFTCopiesBurned: postEntry.NumNFTCopiesBurned, HasUnlockable: postEntry.HasUnlockable, NFTRoyaltyToCreatorBasisPoints: postEntry.NFTRoyaltyToCreatorBasisPoints, NFTRoyaltyToCoinBasisPoints: postEntry.NFTRoyaltyToCoinBasisPoints, @@ -90,7 +90,7 @@ func PostEntryEncoderToPGStruct(postEntry *lib.PostEntry, keyBytes []byte, param // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func PostBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func PostBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -107,7 +107,7 @@ func PostBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib } // bulkInsertPostEntry inserts a batch of post entries into the database. -func bulkInsertPostEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertPostEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -135,7 +135,7 @@ func bulkInsertPostEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationT } // bulkDeletePostEntry deletes a batch of post entries from the database. -func bulkDeletePostEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeletePostEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/post_association.go b/entries/post_association.go index d6882fd..bd5bae2 100644 --- a/entries/post_association.go +++ b/entries/post_association.go @@ -38,6 +38,7 @@ func PostAssociationEncoderToPGStruct(postAssociationEntry *lib.PostAssociationE pgEntry := PostAssociationEntry{ AssociationType: string(postAssociationEntry.AssociationType[:]), AssociationValue: string(postAssociationEntry.AssociationValue[:]), + BlockHeight: postAssociationEntry.BlockHeight, ExtraData: consumer.ExtraDataBytesToString(postAssociationEntry.ExtraData), BadgerKey: keyBytes, } @@ -60,7 +61,7 @@ func PostAssociationEncoderToPGStruct(postAssociationEntry *lib.PostAssociationE // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func PostAssociationBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func PostAssociationBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -77,7 +78,7 @@ func PostAssociationBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, } // bulkInsertPostAssociationEntry inserts a batch of post_association entries into the database. -func bulkInsertPostAssociationEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertPostAssociationEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -101,7 +102,7 @@ func bulkInsertPostAssociationEntry(entries []*lib.StateChangeEntry, db *bun.DB, } // bulkDeletePostEntry deletes a batch of post_association entries from the database. -func bulkDeletePostAssociationEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeletePostAssociationEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/profile.go b/entries/profile.go index 436f84f..0f86ea6 100644 --- a/entries/profile.go +++ b/entries/profile.go @@ -49,7 +49,7 @@ func ProfileEntryEncoderToPGStruct(profileEntry *lib.ProfileEntry, keyBytes []by MintingDisabled: profileEntry.CreatorCoinEntry.MintingDisabled, DesoLockedNanos: profileEntry.CreatorCoinEntry.DeSoLockedNanos, CcCoinsInCirculationNanos: profileEntry.CreatorCoinEntry.CoinsInCirculationNanos.Uint64(), - DaoCoinsInCirculationNanosHex: profileEntry.DAOCoinEntry.CoinsInCirculationNanos.String(), + DaoCoinsInCirculationNanosHex: profileEntry.DAOCoinEntry.CoinsInCirculationNanos.Hex(), DaoCoinMintingDisabled: profileEntry.DAOCoinEntry.MintingDisabled, DaoCoinTransferRestrictionStatus: profileEntry.DAOCoinEntry.TransferRestrictionStatus, ExtraData: consumer.ExtraDataBytesToString(profileEntry.ExtraData), @@ -60,7 +60,7 @@ func ProfileEntryEncoderToPGStruct(profileEntry *lib.ProfileEntry, keyBytes []by // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func ProfileBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func ProfileBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -77,7 +77,7 @@ func ProfileBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params * } // bulkInsertPostEntry inserts a batch of post entries into the database. -func bulkInsertProfileEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertProfileEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -100,7 +100,7 @@ func bulkInsertProfileEntry(entries []*lib.StateChangeEntry, db *bun.DB, operati } // bulkDeletePostEntry deletes a batch of profile entries from the database. -func bulkDeleteProfileEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteProfileEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/stake.go b/entries/stake.go new file mode 100644 index 0000000..66bbbca --- /dev/null +++ b/entries/stake.go @@ -0,0 +1,118 @@ +package entries + +import ( + "context" + "github.com/deso-protocol/backend/routes" + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" + "github.com/uptrace/bun/extra/bunbig" +) + +// TODO: when to use nullzero vs use_zero? +type StakeEntry struct { + StakerPKID string `bun:",nullzero"` + ValidatorPKID string `bun:",nullzero"` + RewardMethod routes.StakeRewardMethod + StakeAmountNanos *bunbig.Int `pg:",use_zero"` + + ExtraData map[string]string `bun:"type:jsonb"` + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGStakeEntry struct { + bun.BaseModel `bun:"table:stake_entry"` + StakeEntry +} + +// TODO: Do I need this? +type PGStakeEntryUtxoOps struct { + bun.BaseModel `bun:"table:stake_entry_utxo_ops"` + StakeEntry + UtxoOperation +} + +// Convert the StakeEntry DeSo encoder to the PGStakeEntry struct used by bun. +func StakeEncoderToPGStruct(stakeEntry *lib.StakeEntry, keyBytes []byte, params *lib.DeSoParams) StakeEntry { + pgStakeEntry := StakeEntry{ + ExtraData: consumer.ExtraDataBytesToString(stakeEntry.ExtraData), + BadgerKey: keyBytes, + } + + if stakeEntry.StakerPKID != nil { + pgStakeEntry.StakerPKID = consumer.PublicKeyBytesToBase58Check((*stakeEntry.StakerPKID)[:], params) + } + + if stakeEntry.ValidatorPKID != nil { + pgStakeEntry.ValidatorPKID = consumer.PublicKeyBytesToBase58Check((*stakeEntry.ValidatorPKID)[:], params) + } + + pgStakeEntry.RewardMethod = routes.FromLibStakeRewardMethod(stakeEntry.RewardMethod) + pgStakeEntry.StakeAmountNanos = bunbig.FromMathBig(stakeEntry.StakeAmountNanos.ToBig()) + + return pgStakeEntry +} + +// StakeBatchOperation is the entry point for processing a batch of Stake entries. +// It determines the appropriate handler based on the operation type and executes it. +func StakeBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeleteStakeEntry(entries, db, operationType) + } else { + err = bulkInsertStakeEntry(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.StakeBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertStakeEntry inserts a batch of stake entries into the database. +func bulkInsertStakeEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGStakeEntry, len(uniqueEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueEntries { + pgEntrySlice[ii] = &PGStakeEntry{StakeEntry: StakeEncoderToPGStruct(entry.Encoder.(*lib.StakeEntry), entry.KeyBytes, params)} + } + + // Execute the insert query. + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertStakeEntry: Error inserting entries") + } + return nil +} + +// bulkDeleteStakeEntry deletes a batch of stake entries from the database. +func bulkDeleteStakeEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + // Transform the entries into a list of keys to delete. + keysToDelete := consumer.KeysToDelete(uniqueEntries) + + // Execute the delete query. + if _, err := db.NewDelete(). + Model(&PGStakeEntry{}). + Where("badger_key IN (?)", bun.In(keysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteStakeEntry: Error deleting entries") + } + + return nil +} diff --git a/entries/stake_reward.go b/entries/stake_reward.go new file mode 100644 index 0000000..8b86ec1 --- /dev/null +++ b/entries/stake_reward.go @@ -0,0 +1,48 @@ +package entries + +import ( + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/uptrace/bun" +) + +type StakeReward struct { + StakerPKID string `bun:",nullzero"` + ValidatorPKID string `bun:",nullzero"` + RewardMethod lib.StakingRewardMethod // TODO: we probably want this to be human readable? + RewardNanos uint64 `pg:",use_zero"` + IsValidatorCommission bool + BlockHash string + + UtxoOpIndex uint64 `pg:",use_zero"` +} + +type PGStakeReward struct { + bun.BaseModel `bun:"table:stake_reward"` + StakeReward +} + +// Convert the StakeRewardStateChangeMetadata DeSo encoder to the PGStakeReward struct used by bun. +func StakeRewardEncoderToPGStruct( + stakeReward *lib.StakeRewardStateChangeMetadata, + params *lib.DeSoParams, + blockHash string, + utxoOpIndex uint64, +) StakeReward { + pgStakeReward := StakeReward{} + + if stakeReward.StakerPKID != nil { + pgStakeReward.StakerPKID = consumer.PublicKeyBytesToBase58Check((*stakeReward.StakerPKID)[:], params) + } + + if stakeReward.ValidatorPKID != nil { + pgStakeReward.ValidatorPKID = consumer.PublicKeyBytesToBase58Check((*stakeReward.ValidatorPKID)[:], params) + } + + pgStakeReward.RewardMethod = stakeReward.StakingRewardMethod + pgStakeReward.RewardNanos = stakeReward.RewardNanos + pgStakeReward.IsValidatorCommission = stakeReward.IsValidatorCommission + pgStakeReward.BlockHash = blockHash + pgStakeReward.UtxoOpIndex = utxoOpIndex + return pgStakeReward +} diff --git a/entries/transaction.go b/entries/transaction.go index c5fe312..4a48073 100644 --- a/entries/transaction.go +++ b/entries/transaction.go @@ -30,10 +30,14 @@ type TransactionEntry struct { PublicKey string ExtraData map[string]string `bun:"type:jsonb"` Signature []byte - IndexInBlock uint64 + IndexInBlock *uint64 BlockHeight uint64 Timestamp time.Time `pg:",use_zero"` - BadgerKey []byte `pg:",use_zero"` + // Atomic fields + WrapperTransactionHash *string + IndexInWrapperTransaction *uint64 + + BadgerKey []byte `pg:",use_zero"` } type PGTransactionEntry struct { @@ -41,7 +45,16 @@ type PGTransactionEntry struct { TransactionEntry } -func TransactionEncoderToPGStruct(transaction *lib.MsgDeSoTxn, blockIndex uint64, blockHash string, blockHeight uint64, timestamp time.Time, params *lib.DeSoParams) (*PGTransactionEntry, error) { +func TransactionEncoderToPGStruct( + transaction *lib.MsgDeSoTxn, + blockIndex *uint64, + blockHash string, + blockHeight uint64, + timestamp time.Time, + wrapperTransactionHash *string, + indexInWrapperTransaction *uint64, + params *lib.DeSoParams, +) (*PGTransactionEntry, error) { var txInputs []map[string]string for _, input := range transaction.TxInputs { @@ -70,23 +83,25 @@ func TransactionEncoderToPGStruct(transaction *lib.MsgDeSoTxn, blockIndex uint64 transactionEntry := &PGTransactionEntry{ TransactionEntry: TransactionEntry{ - TransactionHash: hex.EncodeToString(transaction.Hash()[:]), - TransactionId: consumer.PublicKeyBytesToBase58Check(transaction.Hash()[:], params), - BlockHash: blockHash, - Version: uint16(transaction.TxnVersion), - Inputs: txInputs, - Outputs: txOutputs, - FeeNanos: transaction.TxnFeeNanos, - TxnMeta: transaction.TxnMeta, - TxnMetaBytes: txnMetaBytes, - TxnBytes: txnBytes, - TxnType: uint16(transaction.TxnMeta.GetTxnType()), - PublicKey: consumer.PublicKeyBytesToBase58Check(transaction.PublicKey[:], params), - ExtraData: consumer.ExtraDataBytesToString(transaction.ExtraData), - IndexInBlock: blockIndex, - BlockHeight: blockHeight, - Timestamp: timestamp, - BadgerKey: transaction.Hash()[:], + TransactionHash: hex.EncodeToString(transaction.Hash()[:]), + TransactionId: consumer.PublicKeyBytesToBase58Check(transaction.Hash()[:], params), + BlockHash: blockHash, + Version: uint16(transaction.TxnVersion), + Inputs: txInputs, + Outputs: txOutputs, + FeeNanos: transaction.TxnFeeNanos, + TxnMeta: transaction.TxnMeta, + TxnMetaBytes: txnMetaBytes, + TxnBytes: txnBytes, + TxnType: uint16(transaction.TxnMeta.GetTxnType()), + PublicKey: consumer.PublicKeyBytesToBase58Check(transaction.PublicKey[:], params), + ExtraData: consumer.ExtraDataBytesToString(transaction.ExtraData), + IndexInBlock: blockIndex, + BlockHeight: blockHeight, + Timestamp: timestamp, + WrapperTransactionHash: wrapperTransactionHash, + IndexInWrapperTransaction: indexInWrapperTransaction, + BadgerKey: transaction.Hash()[:], }, } @@ -103,7 +118,7 @@ func TransactionEncoderToPGStruct(transaction *lib.MsgDeSoTxn, blockIndex uint64 // TransactionBatchOperation is the entry point for processing a batch of transaction entries. It determines the appropriate handler // based on the operation type and executes it. -func TransactionBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func TransactionBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -127,16 +142,41 @@ func transformTransactionEntry(entries []*lib.StateChangeEntry, params *lib.DeSo for _, entry := range uniqueTransactions { transaction := entry.Encoder.(*lib.MsgDeSoTxn) - transactionEntry, err := TransactionEncoderToPGStruct(transaction, 0, "", 0, time.Now(), params) + txIndexInBlock := uint64(0) + transactionEntry, err := TransactionEncoderToPGStruct( + transaction, + &txIndexInBlock, + "", + 0, + time.Now(), + nil, + nil, + params, + ) if err != nil { - return nil, errors.Wrapf(err, "entries.transformAndBulkInsertTransactionEntry: Problem converting transaction to PG struct") + return nil, errors.Wrapf( + err, + "entries.transformAndBulkInsertTransactionEntry: Problem converting transaction to PG struct", + ) } pgTransactionEntrySlice = append(pgTransactionEntrySlice, transactionEntry) + if transactionEntry.TxnMeta.GetTxnType() != lib.TxnTypeAtomicTxnsWrapper { + continue + } + innerTxns, err := parseInnerTxnsFromAtomicTxn(transactionEntry, params) + if err != nil { + return nil, errors.Wrapf( + err, + "entries.transformAndBulkInsertTransactionEntry: Problem parsing inner txns from atomic txn", + ) + } + pgTransactionEntrySlice = append(pgTransactionEntrySlice, innerTxns...) + } return pgTransactionEntrySlice, nil } -func bulkInsertTransactionEntry(entries []*PGTransactionEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkInsertTransactionEntry(entries []*PGTransactionEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Bulk insert the entries. transactionQuery := db.NewInsert().Model(&entries) @@ -151,7 +191,7 @@ func bulkInsertTransactionEntry(entries []*PGTransactionEntry, db *bun.DB, opera } // transformAndBulkInsertTransactionEntry inserts a batch of user_association entries into the database. -func transformAndBulkInsertTransactionEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func transformAndBulkInsertTransactionEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { pgTransactionEntrySlice, err := transformTransactionEntry(entries, params) if err != nil { return errors.Wrapf(err, "entries.transformAndBulkInsertTransactionEntry: Problem transforming transaction entries") @@ -166,7 +206,7 @@ func transformAndBulkInsertTransactionEntry(entries []*lib.StateChangeEntry, db } // bulkDeleteTransactionEntry deletes a batch of transaction entries from the database. -func bulkDeleteTransactionEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteTransactionEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) @@ -184,3 +224,41 @@ func bulkDeleteTransactionEntry(entries []*lib.StateChangeEntry, db *bun.DB, ope return nil } + +func parseInnerTxnsFromAtomicTxn( + pgAtomicTxn *PGTransactionEntry, + params *lib.DeSoParams, +) ( + []*PGTransactionEntry, + error, +) { + if pgAtomicTxn == nil { + return nil, fmt.Errorf("parseInnerTxnsFromAtomicTxn: atomicTxn is nil") + } + if pgAtomicTxn.TxnMeta.GetTxnType() != lib.TxnTypeAtomicTxnsWrapper { + return nil, fmt.Errorf("parseInnerTxnsFromAtomicTxn: txn is not an atomic txn") + } + realTxMeta, ok := pgAtomicTxn.TxnMeta.(*lib.AtomicTxnsWrapperMetadata) + if !ok { + return nil, fmt.Errorf("parseInnerTxnsFromAtomicTxn: txn meta is not an atomic txn wrapper") + } + innerTxns := make([]*PGTransactionEntry, 0) + for ii, txn := range realTxMeta.Txns { + indexInWrapper := uint64(ii) + pgInnerTxn, err := TransactionEncoderToPGStruct( + txn, + nil, + pgAtomicTxn.BlockHash, + pgAtomicTxn.BlockHeight, + pgAtomicTxn.Timestamp, + &pgAtomicTxn.TransactionHash, + &indexInWrapper, + params, + ) + if err != nil { + return nil, errors.Wrapf(err, "getInnerTxnsFromAtomicTxn: Problem converting inner txn to PG struct") + } + innerTxns = append(innerTxns, pgInnerTxn) + } + return innerTxns, nil +} diff --git a/entries/user_association.go b/entries/user_association.go index 6f02be2..029399d 100644 --- a/entries/user_association.go +++ b/entries/user_association.go @@ -38,6 +38,7 @@ func UserAssociationEncoderToPGStruct(userAssociationEntry *lib.UserAssociationE pgEntry := UserAssociationEntry{ AssociationType: string(userAssociationEntry.AssociationType[:]), AssociationValue: string(userAssociationEntry.AssociationValue[:]), + BlockHeight: userAssociationEntry.BlockHeight, ExtraData: consumer.ExtraDataBytesToString(userAssociationEntry.ExtraData), BadgerKey: keyBytes, } @@ -60,7 +61,7 @@ func UserAssociationEncoderToPGStruct(userAssociationEntry *lib.UserAssociationE // PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler // based on the operation type and executes it. -func UserAssociationBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func UserAssociationBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -77,7 +78,7 @@ func UserAssociationBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, } // bulkInsertUserAssociationEntry inserts a batch of user_association entries into the database. -func bulkInsertUserAssociationEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertUserAssociationEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) // Create a new array to hold the bun struct. @@ -101,7 +102,7 @@ func bulkInsertUserAssociationEntry(entries []*lib.StateChangeEntry, db *bun.DB, } // bulkDeletePostEntry deletes a batch of user_association entries from the database. -func bulkDeleteUserAssociationEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteUserAssociationEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) diff --git a/entries/utxo_operation.go b/entries/utxo_operation.go index 6234e84..5024463 100644 --- a/entries/utxo_operation.go +++ b/entries/utxo_operation.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/state-consumer/consumer" + "github.com/golang/glog" "github.com/pkg/errors" "github.com/uptrace/bun" "time" @@ -64,9 +65,9 @@ func ConvertUtxoOperationKeyToBlockHashHex(keyBytes []byte) string { return hex.EncodeToString(keyBytes[1:]) } -// PostBatchOperation is the entry point for processing a batch of post entries. It determines the appropriate handler +// UtxoOperationBatchOperation is the entry point for processing a batch of utxo operations. It determines the appropriate handler // based on the operation type and executes it. -func UtxoOperationBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, params *lib.DeSoParams) error { +func UtxoOperationBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { // We check before we call this function that there is at least one operation type. // We also ensure before this that all entries have the same operation type. operationType := entries[0].OperationType @@ -83,7 +84,7 @@ func UtxoOperationBatchOperation(entries []*lib.StateChangeEntry, db *bun.DB, pa } // bulkInsertUtxoOperationsEntry inserts a batch of utxo operation entries into the database. -func bulkInsertUtxoOperationsEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { +func bulkInsertUtxoOperationsEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) @@ -92,11 +93,14 @@ func bulkInsertUtxoOperationsEntry(entries []*lib.StateChangeEntry, db *bun.DB, transactionUpdates := make([]*PGTransactionEntry, 0) affectedPublicKeys := make([]*PGAffectedPublicKeyEntry, 0) blockEntries := make([]*PGBlockEntry, 0) + pgBlockSigners := make([]*PGBlockSigner, 0) + stakeRewardEntries := make([]*PGStakeReward, 0) + jailedHistoryEntries := make([]*PGJailedHistoryEvent, 0) // Start timer to track how long it takes to insert the entries. start := time.Now() - fmt.Printf("entries.bulkInsertUtxoOperationsEntry: Inserting %v entries\n", len(uniqueEntries)) + glog.V(2).Infof("entries.bulkInsertUtxoOperationsEntry: Inserting %v entries\n", len(uniqueEntries)) transactionCount := 0 // Whether we are inserting transactions for the first time, or just updating them. @@ -107,24 +111,62 @@ func bulkInsertUtxoOperationsEntry(entries []*lib.StateChangeEntry, db *bun.DB, for _, entry := range uniqueEntries { transactions := []*PGTransactionEntry{} + innerTransactions := []*PGTransactionEntry{} + innerTransactionsUtxoOperations := [][]*lib.UtxoOperation{} // We can use this function regardless of the db prefix, because both block_hash and transaction_hash // are stored in the same blockHashHex format in the key. blockHash := ConvertUtxoOperationKeyToBlockHashHex(entry.KeyBytes) + utxoOperations, ok := entry.Encoder.(*lib.UtxoOperationBundle) + if !ok { + return fmt.Errorf("entries.bulkInsertUtxoOperationsEntry: Problem with entry %v", entry) + } + // Check to see if the state change entry has an attached block. - // Note that this only happens during the iniltial sync, in order to speed up the sync process. + // Note that this only happens during the initial sync, in order to speed up the sync process. if entry.Block != nil { insertTransactions = true block := entry.Block - blockEntry := BlockEncoderToPGStruct(block, entry.KeyBytes) + blockEntry, blockSigners := BlockEncoderToPGStruct(block, entry.KeyBytes, params) blockEntries = append(blockEntries, blockEntry) + pgBlockSigners = append(pgBlockSigners, blockSigners...) for ii, txn := range block.Txns { - pgTxn, err := TransactionEncoderToPGStruct(txn, uint64(ii), blockEntry.BlockHash, blockEntry.Height, blockEntry.Timestamp, params) + indexInBlock := uint64(ii) + pgTxn, err := TransactionEncoderToPGStruct( + txn, + &indexInBlock, + blockEntry.BlockHash, + blockEntry.Height, + blockEntry.Timestamp, + nil, + nil, + params, + ) if err != nil { return errors.Wrapf(err, "entries.bulkInsertUtxoOperationsEntry: Problem converting transaction to PG struct") } transactions = append(transactions, pgTxn) + if txn.TxnMeta.GetTxnType() != lib.TxnTypeAtomicTxnsWrapper { + continue + } + // If we have an atomic transaction, we need to parse the inner transactions. + if ii >= len(utxoOperations.UtxoOpBundle) { + return fmt.Errorf("entries.bulkInsertUtxoOperationsEntry: not enough utxo operations") + } + innerTxns, innerUtxoOps, err := getInnerTxnsFromAtomicTxn( + pgTxn, + utxoOperations.UtxoOpBundle[ii], + params, + ) + if err != nil { + return errors.Wrapf( + err, + "entries.bulkInsertUtxoOperationsEntry: Problem getting inner transactions", + ) + } + innerTransactions = append(innerTransactions, innerTxns...) + innerTransactionsUtxoOperations = append(innerTransactionsUtxoOperations, innerUtxoOps...) } } else { // If the block isn't available on the entry itself, we can retrieve it from the database. @@ -146,81 +188,99 @@ func bulkInsertUtxoOperationsEntry(entries []*lib.StateChangeEntry, db *bun.DB, // Note: it's normally considered bad practice to use string formatting to insert values into a query. However, // in this case, the filterField is a constant and the value is clearly only block hash or transaction hash - // so there is no risk of SQL injection. - err := db.NewSelect().Model(&transactions).Column("txn_bytes", "transaction_hash", "timestamp", "txn_type").Where(fmt.Sprintf("%s = ?", filterField), blockHash).Order("index_in_block ASC").Scan(context.Background()) + err := db.NewSelect(). + Model(&transactions). + Column( + "txn_bytes", + "transaction_hash", + "timestamp", + "txn_type", + "block_hash", + "block_height", + ).Where(fmt.Sprintf("%s = ?", filterField), blockHash).Where("wrapper_transaction_hash IS NULL").Order("index_in_block ASC").Scan(context.Background()) if err != nil { return fmt.Errorf("entries.bulkInsertUtxoOperationsEntry: Problem getting transactions at block height %v: %v", entry.BlockHeight, err) } - } - - utxoOperations, ok := entry.Encoder.(*lib.UtxoOperationBundle) - if !ok { - return fmt.Errorf("entries.bulkInsertUtxoOperationsEntry: Problem with entry %v", entry) - } - - transactionCount += len(utxoOperations.UtxoOpBundle) - // Create a wait group to wait for all the goroutines to finish. - for jj := range utxoOperations.UtxoOpBundle { - - utxoOps := utxoOperations.UtxoOpBundle[jj] - // Update the transaction metadata for this transaction. - if jj < len(transactions) { - transaction := &lib.MsgDeSoTxn{} - err := transaction.FromBytes(transactions[jj].TxnBytes) - if err != nil { - return fmt.Errorf("entries.bulkInsertUtxoOperationsEntry: Problem decoding transaction for entry %+v at block height %v", entry, entry.BlockHeight) + for ii, pgTxn := range transactions { + // Hack our way around the fact that we can't unmarshal the txn meta for atomic txns. + if pgTxn.TxnType != uint16(lib.TxnTypeAtomicTxnsWrapper) { + continue + } + atomicTxn := &lib.MsgDeSoTxn{} + if err = atomicTxn.FromBytes(pgTxn.TxnBytes); err != nil { + return errors.Wrapf(err, "entries.bulkInsertUtxoOperationsEntry: Problem decoding atomic txn") + } + // Recreate the transaction encoder instead of using the one from the db. + pgAtomicTxn, err := TransactionEncoderToPGStruct( + atomicTxn, + pgTxn.IndexInBlock, + pgTxn.BlockHash, + pgTxn.BlockHeight, + pgTxn.Timestamp, + nil, + nil, + params, + ) + + // If we have an atomic transaction, we need to parse the inner transactions. + if ii >= len(utxoOperations.UtxoOpBundle) { + return fmt.Errorf("entries.bulkInsertUtxoOperationsEntry: not enough utxo operations") } - txIndexMetadata, err := consumer.ComputeTransactionMetadata(transaction, blockHash, &lib.DeSoMainnetParams, transaction.TxnFeeNanos, uint64(jj), utxoOps) + innerTxns, innerUtxoOps, err := getInnerTxnsFromAtomicTxn( + pgAtomicTxn, + utxoOperations.UtxoOpBundle[ii], + params, + ) if err != nil { - return fmt.Errorf("entries.bulkInsertUtxoOperationsEntry: Problem computing transaction metadata for entry %+v at block height %v", entry, entry.BlockHeight) + return errors.Wrapf( + err, + "entries.bulkInsertUtxoOperationsEntry: Problem getting inner transactions", + ) } + innerTransactions = append(innerTransactions, innerTxns...) + innerTransactionsUtxoOperations = append(innerTransactionsUtxoOperations, innerUtxoOps...) + } + } - metadata := txIndexMetadata.GetEncoderForTxType(transaction.TxnMeta.GetTxnType()) - basicTransferMetadata := txIndexMetadata.BasicTransferTxindexMetadata - basicTransferMetadata.UtxoOps = nil - - transactions[jj].TxIndexMetadata = metadata - - transactions[jj].TxIndexBasicTransferMetadata = txIndexMetadata.GetEncoderForTxType(lib.TxnTypeBasicTransfer) - - // Track which public keys have already been added to the affected public keys slice, to avoid duplicates. - affectedPublicKeyMetadataSet := make(map[string]bool) - affectedPublicKeySet := make(map[string]bool) + transactionCount += len(utxoOperations.UtxoOpBundle) - // Loop through the affected public keys and add them to the affected public keys slice. - for _, affectedPublicKey := range txIndexMetadata.AffectedPublicKeys { - // Skip if we've already added this public key/metadata. - apkmDuplicateKey := fmt.Sprintf("%v:%v", affectedPublicKey.PublicKeyBase58Check, affectedPublicKey.Metadata) - if _, ok := affectedPublicKeyMetadataSet[apkmDuplicateKey]; ok { - continue - } - affectedPublicKeyMetadataSet[apkmDuplicateKey] = true + var err error + // TODO: Create a wait group to wait for all the goroutines to finish. + transactionUpdates, affectedPublicKeys, stakeRewardEntries, jailedHistoryEntries, err = + parseUtxoOperationBundle( + entry, + utxoOperations.UtxoOpBundle, + transactions, + blockHash, + params, + ) + if err != nil { + return errors.Wrapf(err, "entries.bulkInsertUtxoOperationsEntry: Problem parsing utxo operation bundle") + } - // Track which public keys have already been added to the affected public keys slice. If they have, - // mark this record as a duplicate to make it easier to filter out. - apkIsDuplicate := false - if _, ok := affectedPublicKeySet[affectedPublicKey.PublicKeyBase58Check]; ok { - apkIsDuplicate = true - } - affectedPublicKeySet[affectedPublicKey.PublicKeyBase58Check] = true - - affectedPublicKeyEntry := &PGAffectedPublicKeyEntry{ - AffectedPublicKeyEntry: AffectedPublicKeyEntry{ - PublicKey: affectedPublicKey.PublicKeyBase58Check, - Metadata: affectedPublicKey.Metadata, - IsDuplicate: apkIsDuplicate, - Timestamp: transactions[jj].Timestamp, - TxnType: transactions[jj].TxnType, - TransactionHash: transactions[jj].TransactionHash, - }, - } - affectedPublicKeys = append(affectedPublicKeys, affectedPublicKeyEntry) - } - transactionUpdates = append(transactionUpdates, transactions[jj]) - } + // Parse inner txns and their utxo operations + innerTransactionUpdates, innerAffectedPublicKeys, innerStakeRewardEntries, innerJailedHistoryEntries, err := + parseUtxoOperationBundle( + entry, + innerTransactionsUtxoOperations, + innerTransactions, + blockHash, + params, + ) + if err != nil { + return errors.Wrapf( + err, + "entries.bulkInsertUtxoOperationsEntry: Problem parsing inner utxo operation bundle", + ) } + transactionUpdates = append(transactionUpdates, innerTransactionUpdates...) + affectedPublicKeys = append(affectedPublicKeys, innerAffectedPublicKeys...) + stakeRewardEntries = append(stakeRewardEntries, innerStakeRewardEntries...) + jailedHistoryEntries = append(jailedHistoryEntries, innerJailedHistoryEntries...) + transactionCount += len(innerTransactionsUtxoOperations) // Print how long it took to insert the entries. } - fmt.Printf("entries.bulkInsertUtxoOperationsEntry: Processed %v txns in %v s\n", transactionCount, time.Since(start)) + glog.V(2).Infof("entries.bulkInsertUtxoOperationsEntry: Processed %v txns in %v s\n", transactionCount, time.Since(start)) start = time.Now() @@ -242,6 +302,18 @@ func bulkInsertUtxoOperationsEntry(entries []*lib.StateChangeEntry, db *bun.DB, return errors.Wrapf(err, "entries.bulkInsertBlock: Error inserting entries") } + if len(pgBlockSigners) > 0 { + blockSignerQuery := db.NewInsert().Model(&pgBlockSigners) + + if operationType == lib.DbOperationTypeUpsert { + blockSignerQuery = blockSignerQuery.On("CONFLICT (block_hash, signer_index) DO UPDATE") + } + + if _, err := blockSignerQuery.Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertBlockSigners: Error inserting block signer entries") + } + } + } else { values := db.NewValues(&transactionUpdates) _, err := db.NewUpdate(). @@ -260,7 +332,7 @@ func bulkInsertUtxoOperationsEntry(entries []*lib.StateChangeEntry, db *bun.DB, } } - fmt.Printf("entries.bulkInsertUtxoOperationsEntry: Updated %v txns in %v s\n", len(transactionUpdates), time.Since(start)) + glog.V(2).Infof("entries.bulkInsertUtxoOperationsEntry: Updated %v txns in %v s\n", len(transactionUpdates), time.Since(start)) start = time.Now() @@ -272,12 +344,31 @@ func bulkInsertUtxoOperationsEntry(entries []*lib.StateChangeEntry, db *bun.DB, } } - fmt.Printf("entries.bulkInsertUtxoOperationsEntry: Inserted %v affected public keys in %v s\n", len(affectedPublicKeys), time.Since(start)) + glog.V(2).Infof("entries.bulkInsertUtxoOperationsEntry: Inserted %v affected public keys in %v s\n", len(affectedPublicKeys), time.Since(start)) + + start = time.Now() + + // Insert stake rewards into db + if len(stakeRewardEntries) > 0 { + _, err := db.NewInsert().Model(&stakeRewardEntries).On("CONFLICT (block_hash, utxo_op_index) DO UPDATE").Exec(context.Background()) + if err != nil { + return errors.Wrapf(err, "InsertStakeRewards: Problem inserting stake rewards") + } + } + glog.V(2).Infof("entries.bulkInsertUtxoOperationsEntry: Inserted %v stake rewards in %v s\n", len(stakeRewardEntries), time.Since(start)) + + if len(jailedHistoryEntries) > 0 { + _, err := db.NewInsert().Model(&jailedHistoryEntries).On("CONFLICT (validator_pkid, jailed_at_epoch_number, unjailed_at_epoch_number) DO NOTHING").Exec(context.Background()) + if err != nil { + return errors.Wrapf(err, "InsertJailedHistory: Problem inserting jailed history") + } + } + return nil } // bulkDeletePostEntry deletes a batch of utxo_operation entries from the database. -func bulkDeleteUtxoOperationEntry(entries []*lib.StateChangeEntry, db *bun.DB, operationType lib.StateSyncerOperationType) error { +func bulkDeleteUtxoOperationEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { // Track the unique entries we've inserted so we don't insert the same entry twice. uniqueEntries := consumer.UniqueEntries(entries) @@ -295,3 +386,177 @@ func bulkDeleteUtxoOperationEntry(entries []*lib.StateChangeEntry, db *bun.DB, o return nil } + +func parseUtxoOperationBundle( + entry *lib.StateChangeEntry, + utxoOpBundle [][]*lib.UtxoOperation, + transactions []*PGTransactionEntry, + blockHashHex string, + params *lib.DeSoParams, +) ( + []*PGTransactionEntry, + []*PGAffectedPublicKeyEntry, + []*PGStakeReward, + []*PGJailedHistoryEvent, + error, +) { + var affectedPublicKeys []*PGAffectedPublicKeyEntry + var transactionUpdates []*PGTransactionEntry + var jailedHistoryEntries []*PGJailedHistoryEvent + var stakeRewardEntries []*PGStakeReward + for jj := range utxoOpBundle { + utxoOps := utxoOpBundle[jj] + // Update the transaction metadata for this transaction. + if jj < len(transactions) { + transaction := &lib.MsgDeSoTxn{} + err := transaction.FromBytes(transactions[jj].TxnBytes) + if err != nil { + return nil, + nil, + nil, + nil, + errors.Wrapf( + err, + "parseUtxoOperationBundle: Problem decoding transaction for entry %+v at "+ + "block height %v", + entry, + entry.BlockHeight, + ) + } + txIndexMetadata, err := consumer.ComputeTransactionMetadata(transaction, blockHashHex, params, transaction.TxnFeeNanos, uint64(jj), utxoOps) + if err != nil { + // If we fail to compute txindex metadata, log the error and continue to the next transaction. + // We still append this txn to the transactionUpdates slice so that we can have it in the db. + glog.Errorf("parseUtxoOperationBundle: Problem computing transaction metadata for "+ + "entry %+v at block height %v: %v", entry, entry.BlockHeight, err) + transactionUpdates = append(transactionUpdates, transactions[jj]) + continue + } + metadata := txIndexMetadata.GetEncoderForTxType(transaction.TxnMeta.GetTxnType()) + basicTransferMetadata := txIndexMetadata.BasicTransferTxindexMetadata + basicTransferMetadata.UtxoOps = nil + + // For atomic transactions, we need to remove the UtxoOps from the metadata for each inner transaction. + if metadata != nil && metadata.GetEncoderType() == lib.EncoderTypeAtomicTxnsWrapperTxindexMetadata { + atomicTxnMetadata := metadata.(*lib.AtomicTxnsWrapperTxindexMetadata) + for _, innerTxnMetadata := range atomicTxnMetadata.InnerTxnsTransactionMetadata { + if innerTxnMetadata.BasicTransferTxindexMetadata == nil { + continue + } + innerTxnMetadata.BasicTransferTxindexMetadata.UtxoOps = nil + } + } + transactions[jj].TxIndexMetadata = metadata + + transactions[jj].TxIndexBasicTransferMetadata = txIndexMetadata.GetEncoderForTxType(lib.TxnTypeBasicTransfer) + + // Track which public keys have already been added to the affected public keys slice, to avoid duplicates. + affectedPublicKeyMetadataSet := make(map[string]bool) + affectedPublicKeySet := make(map[string]bool) + + switch transaction.TxnMeta.GetTxnType() { + case lib.TxnTypeUnjailValidator: + // Find the unjail utxo op + var unjailUtxoOp *lib.UtxoOperation + for _, utxoOp := range utxoOps { + if utxoOp.Type == lib.OperationTypeUnjailValidator { + unjailUtxoOp = utxoOp + break + } + } + if unjailUtxoOp == nil { + glog.Error("parseUtxoOperationBundle: Problem finding unjail utxo op") + continue + } + scm, ok := unjailUtxoOp.StateChangeMetadata.(*lib.UnjailValidatorStateChangeMetadata) + if !ok { + glog.Error("parseUtxoOperationBundle: Problem with state change metadata for unjail") + continue + } + // Parse the jailed history event and add it to the slice. + jailedHistoryEntries = append(jailedHistoryEntries, + &PGJailedHistoryEvent{ + JailedHistoryEntry: UnjailValidatorStateChangeMetadataEncoderToPGStruct(scm, params), + }, + ) + } + + // Loop through the affected public keys and add them to the affected public keys slice. + for _, affectedPublicKey := range txIndexMetadata.AffectedPublicKeys { + // Skip if we've already added this public key/metadata. + apkmDuplicateKey := fmt.Sprintf("%v:%v", affectedPublicKey.PublicKeyBase58Check, affectedPublicKey.Metadata) + if _, ok := affectedPublicKeyMetadataSet[apkmDuplicateKey]; ok { + continue + } + affectedPublicKeyMetadataSet[apkmDuplicateKey] = true + + // Track which public keys have already been added to the affected public keys slice. If they have, + // mark this record as a duplicate to make it easier to filter out. + apkIsDuplicate := false + if _, ok := affectedPublicKeySet[affectedPublicKey.PublicKeyBase58Check]; ok { + apkIsDuplicate = true + } + affectedPublicKeySet[affectedPublicKey.PublicKeyBase58Check] = true + + affectedPublicKeyEntry := &PGAffectedPublicKeyEntry{ + AffectedPublicKeyEntry: AffectedPublicKeyEntry{ + PublicKey: affectedPublicKey.PublicKeyBase58Check, + Metadata: affectedPublicKey.Metadata, + IsDuplicate: apkIsDuplicate, + Timestamp: transactions[jj].Timestamp, + TxnType: transactions[jj].TxnType, + TransactionHash: transactions[jj].TransactionHash, + }, + } + affectedPublicKeys = append(affectedPublicKeys, affectedPublicKeyEntry) + } + transactionUpdates = append(transactionUpdates, transactions[jj]) + } else if jj == len(transactions) { + // TODO: parse utxo operations for the block level index. + // Examples: deletion of expired nonces, staking rewards (restaked + // + payed to balance), validator jailing, updating validator's + // last active at epoch. + for ii, utxoOp := range utxoOps { + switch utxoOp.Type { + case lib.OperationTypeStakeDistributionRestake, lib.OperationTypeStakeDistributionPayToBalance: + stateChangeMetadata, ok := utxoOp.StateChangeMetadata.(*lib.StakeRewardStateChangeMetadata) + if !ok { + glog.Error("parseUtxoOperationBundle: Problem with state change metadata for " + + "stake rewards") + continue + } + stakeReward := PGStakeReward{ + StakeReward: StakeRewardEncoderToPGStruct(stateChangeMetadata, params, blockHashHex, uint64(ii)), + } + stakeRewardEntries = append(stakeRewardEntries, &stakeReward) + } + } + } + } + return transactionUpdates, affectedPublicKeys, stakeRewardEntries, jailedHistoryEntries, nil +} + +func getInnerTxnsFromAtomicTxn( + pgAtomicTxn *PGTransactionEntry, + utxoOperations []*lib.UtxoOperation, + params *lib.DeSoParams, +) ( + []*PGTransactionEntry, + [][]*lib.UtxoOperation, + error, +) { + innerTxns, err := parseInnerTxnsFromAtomicTxn(pgAtomicTxn, params) + if err != nil { + return nil, nil, errors.Wrapf(err, "getInnerTxnsFromAtomicTxn: Problem parsing inner txns") + } + atomicUtxoOp := consumer.GetUtxoOpByOperationType(utxoOperations, lib.OperationTypeAtomicTxnsWrapper) + if atomicUtxoOp == nil { + return nil, nil, fmt.Errorf("getInnerTxnsFromAtomicTxn: atomic txn has no utxo operation") + } + if atomicUtxoOp.AtomicTxnsInnerUtxoOps == nil || + len(atomicUtxoOp.AtomicTxnsInnerUtxoOps) != len(innerTxns) { + return nil, nil, fmt.Errorf("getInnerTxnsFromAtomicTxn: atomic txn has no inner utxo operations") + } + glog.Infof("getInnerTxnsFromAtomicTxn: Found %v inner txns", atomicUtxoOp.AtomicTxnsInnerUtxoOps) + return innerTxns, atomicUtxoOp.AtomicTxnsInnerUtxoOps, nil +} diff --git a/entries/validator.go b/entries/validator.go new file mode 100644 index 0000000..18f00d7 --- /dev/null +++ b/entries/validator.go @@ -0,0 +1,236 @@ +package entries + +import ( + "context" + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" + "github.com/uptrace/bun/extra/bunbig" +) + +// TODO: when to use nullzero vs use_zero? +type ValidatorEntry struct { + ValidatorPKID string `bun:",nullzero"` + Domains []string `bun:",array"` + DisableDelegatedStake bool + DelegatedStakeCommissionBasisPoints uint64 + VotingPublicKey string `bun:",nullzero"` + VotingAuthorization string `bun:",nullzero"` + // Use bunbig.Int to store the balance as a numeric in the pg database. + TotalStakeAmountNanos *bunbig.Int `pg:",use_zero"` + LastActiveAtEpochNumber uint64 + JailedAtEpochNumber uint64 + + ExtraData map[string]string `bun:"type:jsonb"` + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGValidatorEntry struct { + bun.BaseModel `bun:"table:validator_entry"` + ValidatorEntry +} + +// TODO: Do I need this? +type PGValidatorEntryUtxoOps struct { + bun.BaseModel `bun:"table:validator_entry_utxo_ops"` + ValidatorEntry + UtxoOperation +} + +type SnapshotValidatorEntry struct { + ValidatorPKID string `bun:",nullzero"` + Domains []string `bun:",array"` + DisableDelegatedStake bool + DelegatedStakeCommissionBasisPoints uint64 + VotingPublicKey string `bun:",nullzero"` + VotingAuthorization string `bun:",nullzero"` + // Use bunbig.Int to store the balance as a numeric in the pg database. + TotalStakeAmountNanos *bunbig.Int `pg:",use_zero"` + LastActiveAtEpochNumber uint64 + JailedAtEpochNumber uint64 + SnapshotAtEpochNumber uint64 `pg:",use_zero"` + + ExtraData map[string]string `bun:"type:jsonb"` + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGSnapshotValidatorEntry struct { + bun.BaseModel `bun:"table:snapshot_validator_entry"` + SnapshotValidatorEntry +} + +// Convert the ValidatorEntry DeSo encoder to the PGValidatorEntry struct used by bun. +func ValidatorEncoderToPGStruct(validatorEntry *lib.ValidatorEntry, keyBytes []byte, params *lib.DeSoParams) ValidatorEntry { + pgValidatorEntry := ValidatorEntry{ + ExtraData: consumer.ExtraDataBytesToString(validatorEntry.ExtraData), + BadgerKey: keyBytes, + } + + if validatorEntry.ValidatorPKID != nil { + pgValidatorEntry.ValidatorPKID = consumer.PublicKeyBytesToBase58Check((*validatorEntry.ValidatorPKID)[:], params) + } + + if validatorEntry.Domains != nil { + pgValidatorEntry.Domains = make([]string, len(validatorEntry.Domains)) + for ii, domain := range validatorEntry.Domains { + pgValidatorEntry.Domains[ii] = string(domain) + } + } + + pgValidatorEntry.DisableDelegatedStake = validatorEntry.DisableDelegatedStake + pgValidatorEntry.DelegatedStakeCommissionBasisPoints = validatorEntry.DelegatedStakeCommissionBasisPoints + + if validatorEntry.VotingPublicKey != nil { + pgValidatorEntry.VotingPublicKey = validatorEntry.VotingPublicKey.ToString() + } + + if validatorEntry.VotingAuthorization != nil { + pgValidatorEntry.VotingAuthorization = validatorEntry.VotingAuthorization.ToString() + } + + pgValidatorEntry.TotalStakeAmountNanos = bunbig.FromMathBig(validatorEntry.TotalStakeAmountNanos.ToBig()) + pgValidatorEntry.LastActiveAtEpochNumber = validatorEntry.LastActiveAtEpochNumber + pgValidatorEntry.JailedAtEpochNumber = validatorEntry.JailedAtEpochNumber + + return pgValidatorEntry +} + +// ValidatorBatchOperation is the entry point for processing a batch of Validator entries. +// It determines the appropriate handler based on the operation type and executes it. +func ValidatorBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeleteValidatorEntry(entries, db, operationType) + } else { + err = bulkInsertValidatorEntry(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.ValidatorBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertValidatorEntry inserts a batch of validator entries into the database. +func bulkInsertValidatorEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + uniqueValidatorEntries := consumer.FilterEntriesByPrefix(uniqueEntries, lib.Prefixes.PrefixValidatorByPKID) + uniqueSnapshotValidatorEntries := consumer.FilterEntriesByPrefix(uniqueEntries, lib.Prefixes.PrefixSnapshotValidatorSetByPKID) + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGValidatorEntry, len(uniqueValidatorEntries)) + pgSnapshotEntrySlice := make([]*PGSnapshotValidatorEntry, len(uniqueSnapshotValidatorEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueValidatorEntries { + pgEntrySlice[ii] = &PGValidatorEntry{ValidatorEntry: ValidatorEncoderToPGStruct(entry.Encoder.(*lib.ValidatorEntry), entry.KeyBytes, params)} + } + for ii, entry := range uniqueSnapshotValidatorEntries { + pgSnapshotEntrySlice[ii] = &PGSnapshotValidatorEntry{SnapshotValidatorEntry: SnapshotValidatorEncoderToPGStruct(entry.Encoder.(*lib.ValidatorEntry), entry.KeyBytes, params)} + } + + // Execute the insert query. + if len(pgEntrySlice) > 0 { + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertValidatorEntry: Error inserting validator entries") + } + } + + if len(pgSnapshotEntrySlice) > 0 { + query := db.NewInsert().Model(&pgSnapshotEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertValidatorEntry: Error inserting snapshot validator entries") + } + } + return nil +} + +// bulkDeleteValidatorEntry deletes a batch of validator entries from the database. +func bulkDeleteValidatorEntry(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + uniqueKeys := consumer.KeysToDelete(uniqueEntries) + + // Transform the entries into a list of keys to delete. + validatorKeysToDelete := consumer.FilterKeysByPrefix(uniqueKeys, lib.Prefixes.PrefixValidatorByPKID) + + snapshotValidatorKeysToDelete := consumer.FilterKeysByPrefix( + uniqueKeys, + lib.Prefixes.PrefixSnapshotValidatorSetByPKID, + ) + + // Execute the delete query for validator entries. + if len(validatorKeysToDelete) > 0 { + if _, err := db.NewDelete(). + Model(&PGValidatorEntry{}). + Where("badger_key IN (?)", bun.In(validatorKeysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteValidatorEntry: Error deleting entries") + } + } + + // Execute the delete query for snapshot validator entries. + if len(snapshotValidatorKeysToDelete) > 0 { + if _, err := db.NewDelete(). + Model(&PGSnapshotValidatorEntry{}). + Where("badger_key IN (?)", bun.In(snapshotValidatorKeysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteSnapshotValidatorEntry: Error deleting entries") + } + } + + return nil +} + +// Convert the SnapshotValidatorEntry DeSo encoder to the PGSnapshotValidatorEntry struct used by bun. +func SnapshotValidatorEncoderToPGStruct(validatorEntry *lib.ValidatorEntry, keyBytes []byte, params *lib.DeSoParams) SnapshotValidatorEntry { + pgValidatorEntry := SnapshotValidatorEntry{ + ExtraData: consumer.ExtraDataBytesToString(validatorEntry.ExtraData), + BadgerKey: keyBytes, + } + + if validatorEntry.ValidatorPKID != nil { + pgValidatorEntry.ValidatorPKID = consumer.PublicKeyBytesToBase58Check((*validatorEntry.ValidatorPKID)[:], params) + } + + if validatorEntry.Domains != nil { + pgValidatorEntry.Domains = make([]string, len(validatorEntry.Domains)) + for ii, domain := range validatorEntry.Domains { + pgValidatorEntry.Domains[ii] = string(domain) + } + } + + pgValidatorEntry.DisableDelegatedStake = validatorEntry.DisableDelegatedStake + pgValidatorEntry.DelegatedStakeCommissionBasisPoints = validatorEntry.DelegatedStakeCommissionBasisPoints + + if validatorEntry.VotingPublicKey != nil { + pgValidatorEntry.VotingPublicKey = validatorEntry.VotingPublicKey.ToString() + } + + if validatorEntry.VotingAuthorization != nil { + pgValidatorEntry.VotingAuthorization = validatorEntry.VotingAuthorization.ToString() + } + + pgValidatorEntry.TotalStakeAmountNanos = bunbig.FromMathBig(validatorEntry.TotalStakeAmountNanos.ToBig()) + pgValidatorEntry.LastActiveAtEpochNumber = validatorEntry.LastActiveAtEpochNumber + pgValidatorEntry.JailedAtEpochNumber = validatorEntry.JailedAtEpochNumber + keyBytesWithoutPrefix := keyBytes[1:] + pgValidatorEntry.SnapshotAtEpochNumber = lib.DecodeUint64(keyBytesWithoutPrefix[:8]) + return pgValidatorEntry +} diff --git a/entries/yield_curve_point.go b/entries/yield_curve_point.go new file mode 100644 index 0000000..d545119 --- /dev/null +++ b/entries/yield_curve_point.go @@ -0,0 +1,109 @@ +package entries + +import ( + "context" + "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/state-consumer/consumer" + "github.com/pkg/errors" + "github.com/uptrace/bun" +) + +// TODO: when to use nullzero vs use_zero? +type LockupYieldCurvePoint struct { + ProfilePKID string `bun:",nullzero"` + LockupDurationNanoSecs int64 + LockupYieldAPYBasisPoints uint64 + + BadgerKey []byte `pg:",pk,use_zero"` +} + +type PGLockupYieldCurvePoint struct { + bun.BaseModel `bun:"table:yield_curve_point"` + LockupYieldCurvePoint +} + +// TODO: Do I need this? +type PGLockupYieldCurvePointUtxoOps struct { + bun.BaseModel `bun:"table:yield_curve_point_utxo_ops"` + LockupYieldCurvePoint + UtxoOperation +} + +// Convert the LockupYieldCurvePoint DeSo encoder to the PGLockedBalnceEntry struct used by bun. +func LockupYieldCurvePointEncoderToPGStruct(lockupYieldCurvePoint *lib.LockupYieldCurvePoint, keyBytes []byte, params *lib.DeSoParams) LockupYieldCurvePoint { + pgLockupYieldCurvePoint := LockupYieldCurvePoint{ + BadgerKey: keyBytes, + } + + if lockupYieldCurvePoint.ProfilePKID != nil { + pgLockupYieldCurvePoint.ProfilePKID = consumer.PublicKeyBytesToBase58Check((*lockupYieldCurvePoint.ProfilePKID)[:], params) + } + + pgLockupYieldCurvePoint.LockupDurationNanoSecs = lockupYieldCurvePoint.LockupDurationNanoSecs + pgLockupYieldCurvePoint.LockupYieldAPYBasisPoints = lockupYieldCurvePoint.LockupYieldAPYBasisPoints + + return pgLockupYieldCurvePoint +} + +// LockupYieldCurvePointBatchOperation is the entry point for processing a batch of LockedBalance entries. +// It determines the appropriate handler based on the operation type and executes it. +func LockupYieldCurvePointBatchOperation(entries []*lib.StateChangeEntry, db bun.IDB, params *lib.DeSoParams) error { + // We check before we call this function that there is at least one operation type. + // We also ensure before this that all entries have the same operation type. + operationType := entries[0].OperationType + var err error + if operationType == lib.DbOperationTypeDelete { + err = bulkDeleteLockupYieldCurvePoint(entries, db, operationType) + } else { + err = bulkInsertLockupYieldCurvePoint(entries, db, operationType, params) + } + if err != nil { + return errors.Wrapf(err, "entries.LockupYieldCurvePointBatchOperation: Problem with operation type %v", operationType) + } + return nil +} + +// bulkInsertLockupYieldCurvePoint inserts a batch of locked stake entries into the database. +func bulkInsertLockupYieldCurvePoint(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType, params *lib.DeSoParams) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + // Create a new array to hold the bun struct. + pgEntrySlice := make([]*PGLockupYieldCurvePoint, len(uniqueEntries)) + + // Loop through the entries and convert them to PGEntry. + for ii, entry := range uniqueEntries { + pgEntrySlice[ii] = &PGLockupYieldCurvePoint{LockupYieldCurvePoint: LockupYieldCurvePointEncoderToPGStruct(entry.Encoder.(*lib.LockupYieldCurvePoint), entry.KeyBytes, params)} + } + + // Execute the insert query. + query := db.NewInsert().Model(&pgEntrySlice) + + if operationType == lib.DbOperationTypeUpsert { + query = query.On("CONFLICT (badger_key) DO UPDATE") + } + + if _, err := query.Returning("").Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkInsertLockupYieldCurvePoint: Error inserting entries") + } + return nil +} + +// bulkDeleteLockupYieldCurvePoint deletes a batch of locked stake entries from the database. +func bulkDeleteLockupYieldCurvePoint(entries []*lib.StateChangeEntry, db bun.IDB, operationType lib.StateSyncerOperationType) error { + // Track the unique entries we've inserted so we don't insert the same entry twice. + uniqueEntries := consumer.UniqueEntries(entries) + + // Transform the entries into a list of keys to delete. + keysToDelete := consumer.KeysToDelete(uniqueEntries) + + // Execute the delete query. + if _, err := db.NewDelete(). + Model(&PGLockupYieldCurvePoint{}). + Where("badger_key IN (?)", bun.In(keysToDelete)). + Returning(""). + Exec(context.Background()); err != nil { + return errors.Wrapf(err, "entries.bulkDeleteLockupYieldCurvePoint: Error deleting entries") + } + + return nil +} diff --git a/go.mod b/go.mod index 56f0715..2272e7f 100644 --- a/go.mod +++ b/go.mod @@ -1,14 +1,19 @@ -module PostgresDataHandler +module github.com/deso-protocol/postgres-data-handler go 1.18 replace github.com/deso-protocol/core => ../core/ +replace github.com/deso-protocol/backend => ../backend/ + +replace github.com/deso-protocol/state-consumer => ../state-consumer/ + require ( - github.com/deso-protocol/backend v1.2.10-0.20230727205436-dba653dc043c - github.com/deso-protocol/core v1.2.10-0.20230314161821-4069c3e417d3 - github.com/deso-protocol/state-consumer v1.0.4-0.20240117002702-0f75e8691905 + github.com/deso-protocol/backend v0.0.0-00010101000000-000000000000 + github.com/deso-protocol/core v0.0.0-00010101000000-000000000000 + github.com/deso-protocol/state-consumer v0.0.0-00010101000000-000000000000 github.com/golang/glog v1.0.0 + github.com/google/uuid v1.3.0 github.com/pkg/errors v0.9.1 github.com/spf13/viper v1.7.1 github.com/uptrace/bun v1.1.14 @@ -20,16 +25,19 @@ require ( ) require ( - cloud.google.com/go v0.81.0 // indirect - cloud.google.com/go/storage v1.15.0 // indirect + cloud.google.com/go v0.107.0 // indirect + cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v0.8.0 // indirect + cloud.google.com/go/storage v1.27.0 // indirect github.com/DataDog/datadog-go v4.5.0+incompatible // indirect github.com/Microsoft/go-winio v0.4.16 // indirect github.com/btcsuite/btcd v0.21.0-beta // indirect + github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/btcsuite/btcutil v1.0.2 // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -37,9 +45,10 @@ require ( github.com/decred/dcrd/lru v1.1.1 // indirect github.com/deso-protocol/go-deadlock v1.0.0 // indirect github.com/deso-protocol/go-merkle-tree v1.0.0 // indirect - github.com/dgraph-io/badger/v3 v3.2103.5 // indirect + github.com/dgraph-io/badger/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect github.com/ethereum/go-ethereum v1.9.25 // indirect github.com/fatih/color v1.15.0 // indirect github.com/fatih/structs v1.1.0 // indirect @@ -55,9 +64,10 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.3 // indirect github.com/google/flatbuffers v2.0.0+incompatible // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 // indirect - github.com/google/uuid v1.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/h2non/bimg v1.1.5 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -65,7 +75,6 @@ require ( github.com/imdario/mergo v0.3.8 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect - github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kevinburke/go-types v0.0.0-20210723172823-2deba1f80ba7 // indirect github.com/kevinburke/rest v0.0.0-20210506044642-5611499aa33c // indirect @@ -81,6 +90,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect github.com/nyaruka/phonenumbers v1.0.69 // indirect github.com/oleiade/lane v1.0.1 // indirect + github.com/onflow/crypto v0.25.0 // indirect github.com/pelletier/go-toml v1.7.0 // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/philhofer/fwd v1.1.1 // indirect @@ -97,6 +107,7 @@ require ( github.com/spf13/cobra v1.1.3 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.8.1 // indirect github.com/subosito/gotenv v1.2.0 // indirect github.com/tinylib/msgp v1.1.2 // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect @@ -110,23 +121,23 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - go.opencensus.io v0.23.0 // indirect + go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.9.0 // indirect - golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect - golang.org/x/mod v0.8.0 // indirect + golang.org/x/image v0.15.0 // indirect golang.org/x/net v0.10.0 // indirect - golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c // indirect + golang.org/x/oauth2 v0.4.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect golang.org/x/tools v0.6.0 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/api v0.46.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + gonum.org/v1/gonum v0.6.1 // indirect + google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab // indirect - google.golang.org/grpc v1.37.0 // indirect - google.golang.org/protobuf v1.26.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc v1.53.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/AlecAivazis/survey.v1 v1.8.7 // indirect gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/kyokomi/emoji.v1 v1.5.1 // indirect diff --git a/go.sum b/go.sum index 458a324..ceecb93 100644 --- a/go.sum +++ b/go.sum @@ -5,40 +5,22 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.15.0 h1:Ljj+ZXVEhCr/1+4ZhvtteN1ND7UUsNTlduGclLh8GO0= -cloud.google.com/go/storage v1.15.0/go.mod h1:mjjQMoxxyGH7Jr8K5qrx6N2O0AHsczI61sMNn03GIZI= +cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= @@ -63,17 +45,16 @@ github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -86,6 +67,8 @@ github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnC github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= +github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -105,10 +88,10 @@ github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7N github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -117,17 +100,11 @@ github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GK github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -143,20 +120,12 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeC github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/decred/dcrd/lru v1.1.1 h1:kWFDaW0OWx6AD6Ki342c+JPmHbiVdE6rK81pT3fuo/Y= github.com/decred/dcrd/lru v1.1.1/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/deso-protocol/backend v1.2.10-0.20230727205436-dba653dc043c h1:OK+OYVDLhaP0wUqOpWO0Q11ijc3kwnkEpi4EqqdV9m8= -github.com/deso-protocol/backend v1.2.10-0.20230727205436-dba653dc043c/go.mod h1:BnxywYGwEBoGuJRVfq6QIJKfRNRhydeFm9EQ1/ibUak= github.com/deso-protocol/go-deadlock v1.0.0 h1:mw0pHy/19zgC+JFBStuQt1+1Ehv5OKA5NxXqecnL5ic= github.com/deso-protocol/go-deadlock v1.0.0/go.mod h1:K0Wd2OV2x7ck7SMYDraWerpKjFKUeBqaFcwz21tmkb8= github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+LsH/kRdL5l/Yzvg= github.com/deso-protocol/go-merkle-tree v1.0.0/go.mod h1:V/vbg/maaNv6G7zf9VVs645nLFx/jsO2L/awFB/S/ZU= -github.com/deso-protocol/state-consumer v1.0.4-0.20230915153810-81be6a7ec25f h1:MMqFppip/KN14upXJ8rkq8CCvz3PghkHqj5TfCYaXUo= -github.com/deso-protocol/state-consumer v1.0.4-0.20230915153810-81be6a7ec25f/go.mod h1:ivi9/WBRWK/AG/cgAcGpA6GdodBAaEWh9p8PfQT3r5I= -github.com/deso-protocol/state-consumer v1.0.4-0.20240107220224-c9c924b0d69e h1:MuYxzO6stsmbJADFz2433O/9DaDy95Dks+x4Hfd85qc= -github.com/deso-protocol/state-consumer v1.0.4-0.20240107220224-c9c924b0d69e/go.mod h1:ivi9/WBRWK/AG/cgAcGpA6GdodBAaEWh9p8PfQT3r5I= -github.com/deso-protocol/state-consumer v1.0.4-0.20240117002702-0f75e8691905 h1:CntCnCV7LiH/CjQvicwRVR5iOcaIlFBffiTDwtT4BEs= -github.com/deso-protocol/state-consumer v1.0.4-0.20240117002702-0f75e8691905/go.mod h1:ivi9/WBRWK/AG/cgAcGpA6GdodBAaEWh9p8PfQT3r5I= -github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= -github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= +github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= +github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -170,12 +139,11 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/go-ethereum v1.9.25 h1:mMiw/zOOtCLdGLWfcekua0qPrJTe7FVIiHJ4IKNTfR0= github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= @@ -187,6 +155,7 @@ github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fergusstrange/embedded-postgres v1.19.0 h1:NqDufJHeA03U7biULlPHZ0pZ10/mDOMKPILEpT50Fyk= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -197,8 +166,6 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373 h1:MHrlpWOOFhCfY1L9iCIUy5cv5HgDtempICenzJt+7ws= github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373/go.mod h1:Dcsy1kii/xFyNad5JqY/d0GO5mu91sungp5xotbm3Yk= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -218,29 +185,21 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -250,7 +209,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -259,47 +217,37 @@ github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210125172800-10e9aeb4a998/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= @@ -341,7 +289,6 @@ github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -359,12 +306,11 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= @@ -380,6 +326,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -390,7 +337,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -441,6 +387,8 @@ github.com/oleiade/lane v1.0.1 h1:hXofkn7GEOubzTwNpeL9MaNy8WxolCYb9cInAIeqShU= github.com/oleiade/lane v1.0.1/go.mod h1:IyTkraa4maLfjq/GmHR+Dxb4kCMtEGeb+qmhlrQ5Mk4= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg= +github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -492,7 +440,6 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -508,19 +455,17 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= @@ -528,7 +473,6 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= @@ -536,8 +480,10 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -545,7 +491,10 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= @@ -563,7 +512,6 @@ github.com/ttacon/libphonenumber v1.2.1/go.mod h1:E0TpmdVMq5dyVlQ7oenAkhsLu86OkU github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/unrolled/secure v1.0.8 h1:JaMvKbe4CRt8oyxVXn+xY+6jlqd7pyJNSVkmsBxxQsM= github.com/unrolled/secure v1.0.8/go.mod h1:fO+mEan+FLB0CdEnHf6Q4ZZVNqG+5fuLFnP8p0BXDPI= github.com/uptrace/bun v1.1.14 h1:S5vvNnjEynJ0CvnrBOD7MIRW7q/WbtvFXrdfy0lddAM= @@ -596,20 +544,13 @@ github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -618,7 +559,6 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -634,19 +574,21 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8= +golang.org/x/image v0.15.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -654,25 +596,16 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -690,62 +623,31 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c h1:SgVl/sCtkicsS7psKkje4H9YtjdEl3xsYh7N+5TDHqY= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180606202747-9527bec2660b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -756,7 +658,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -765,46 +666,20 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201026173827-119d4633e4d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -813,24 +688,21 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -846,78 +718,37 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.1 h1:/LSrTrgZtpbXyAR6+0e152SROCkJJSh7goYWVmdPFGc= +gonum.org/v1/gonum v0.6.1/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= -google.golang.org/api v0.46.0 h1:jkDWHOBIoNSD0OQpq4rtBVu+Rh325MPjXG1rakAp8JU= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= +google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -929,60 +760,18 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210420162539-3c870d7478d2/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab h1:dkb90hr43A2Q5as5ZBphcOF2II0+EqfCBqGp7qFSpN4= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -991,11 +780,11 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/AlecAivazis/survey.v1 v1.8.7 h1:oBJqtgsyBLg9K5FK9twNUbcPnbCPoh+R9a+7nag3qJM= gopkg.in/AlecAivazis/survey.v1 v1.8.7/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA= gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 h1:3C1EEjgFTPqrnS2SXuSqkBbZGacIOPJ7ScGJk4nrP9s= @@ -1003,7 +792,6 @@ gopkg.in/DataDog/dd-trace-go.v1 v1.29.0/go.mod h1:FLwUDeuH0z5hkvgvd04/M3MHQN4AF5 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1035,11 +823,9 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/handler/data_handler.go b/handler/data_handler.go index b630db5..2f40acc 100644 --- a/handler/data_handler.go +++ b/handler/data_handler.go @@ -1,11 +1,16 @@ package handler import ( - "PostgresDataHandler/entries" - "PostgresDataHandler/migrations/post_sync_migrations" + "context" + "crypto/rand" + "database/sql" + "encoding/hex" "fmt" "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/postgres-data-handler/entries" + "github.com/deso-protocol/postgres-data-handler/migrations/post_sync_migrations" "github.com/deso-protocol/state-consumer/consumer" + "github.com/golang/glog" "github.com/pkg/errors" "github.com/uptrace/bun" ) @@ -15,6 +20,8 @@ import ( type PostgresDataHandler struct { // A Postgres DB used for the storage of chain data. DB *bun.DB + // A bun transaction used for executing multiple operations in a single transaction. + Txn *bun.Tx // Params is a struct containing the current blockchain parameters. // It is used to determine which prefix to use for public keys. Params *lib.DeSoParams @@ -31,54 +38,91 @@ func (postgresDataHandler *PostgresDataHandler) HandleEntryBatch(batchedEntries var err error + // Get the correct db handle. + dbHandle := postgresDataHandler.GetDbHandle() + // Create a savepoint in the current transaction, if the transaction exists. + savepointName, err := postgresDataHandler.CreateSavepoint() + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.HandleEntryBatch: Error creating savepoint") + } + switch encoderType { case lib.EncoderTypePostEntry: - err = entries.PostBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.PostBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeProfileEntry: - err = entries.ProfileBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.ProfileBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeLikeEntry: - err = entries.LikeBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.LikeBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeDiamondEntry: - err = entries.DiamondBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.DiamondBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeFollowEntry: - err = entries.FollowBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.FollowBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeMessageEntry: - err = entries.MessageBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.MessageBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeBalanceEntry: - err = entries.BalanceBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.BalanceBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeNFTEntry: - err = entries.NftBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.NftBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeNFTBidEntry: - err = entries.NftBidBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.NftBidBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeDerivedKeyEntry: - err = entries.DerivedKeyBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.DerivedKeyBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeAccessGroupEntry: - err = entries.AccessGroupBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.AccessGroupBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeAccessGroupMemberEntry: - err = entries.AccessGroupMemberBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.AccessGroupMemberBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeNewMessageEntry: - err = entries.NewMessageBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.NewMessageBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeUserAssociationEntry: - err = entries.UserAssociationBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.UserAssociationBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypePostAssociationEntry: - err = entries.PostAssociationBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.PostAssociationBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypePKIDEntry: - err = entries.PkidBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.PkidEntryBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeDeSoBalanceEntry: - err = entries.DesoBalanceBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.DesoBalanceBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeDAOCoinLimitOrderEntry: - err = entries.DaoCoinLimitOrderBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.DaoCoinLimitOrderBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeUtxoOperationBundle: - err = entries.UtxoOperationBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.UtxoOperationBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeBlock: - err = entries.BlockBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.BlockBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) case lib.EncoderTypeTxn: - err = entries.TransactionBatchOperation(batchedEntries, postgresDataHandler.DB, postgresDataHandler.Params) + err = entries.TransactionBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypeStakeEntry: + err = entries.StakeBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypeValidatorEntry: + err = entries.ValidatorBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypeLockedStakeEntry: + err = entries.LockedStakeBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypeLockedBalanceEntry: + err = entries.LockedBalanceEntryBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypeLockupYieldCurvePoint: + err = entries.LockupYieldCurvePointBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypeEpochEntry: + err = entries.EpochEntryBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypePKID: + err = entries.PkidBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypeGlobalParamsEntry: + err = entries.GlobalParamsBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) + case lib.EncoderTypeBLSPublicKeyPKIDPairEntry: + err = entries.BLSPublicKeyPKIDPairBatchOperation(batchedEntries, dbHandle, postgresDataHandler.Params) } if err != nil { + // If an error occurs, revert to the savepoint and return the error. + rollbackErr := postgresDataHandler.RevertToSavepoint(savepointName) + if rollbackErr != nil { + return errors.Wrapf(rollbackErr, "PostgresDataHandler.HandleEntryBatch: Error reverting to savepoint") + } return errors.Wrapf(err, "PostgresDataHandler.CallBatchOperationForEncoderType") } + + // Release the savepoint. + err = postgresDataHandler.ReleaseSavepoint(savepointName) + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.HandleEntryBatch: Error releasing savepoint") + } return nil } @@ -102,3 +146,109 @@ func (postgresDataHandler *PostgresDataHandler) HandleSyncEvent(syncEvent consum return nil } + +func (postgresDataHandler *PostgresDataHandler) InitiateTransaction() error { + glog.V(2).Info("Initiating Txn\n") + // If a transaction is already open, rollback the current transaction. + if postgresDataHandler.Txn != nil { + err := postgresDataHandler.Txn.Rollback() + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.InitiateTransaction: Error rolling back current transaction") + } + } + tx, err := postgresDataHandler.DB.BeginTx(context.Background(), &sql.TxOptions{}) + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.InitiateTransaction: Error beginning transaction") + } + postgresDataHandler.Txn = &tx + return nil +} + +func (postgresDataHandler *PostgresDataHandler) CommitTransaction() error { + glog.V(2).Info("Committing Txn\n") + if postgresDataHandler.Txn == nil { + return fmt.Errorf("PostgresDataHandler.CommitTransaction: No transaction to commit") + } + err := postgresDataHandler.Txn.Commit() + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.CommitTransaction: Error committing transaction") + } + postgresDataHandler.Txn = nil + return nil +} + +func (postgresDataHandler *PostgresDataHandler) RollbackTransaction() error { + glog.V(2).Info("Rolling back Txn\n") + if postgresDataHandler.Txn == nil { + return fmt.Errorf("PostgresDataHandler.RollbackTransaction: No transaction to rollback") + } + err := postgresDataHandler.Txn.Rollback() + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.RollbackTransaction: Error rolling back transaction") + } + postgresDataHandler.Txn = nil + return nil +} + +// GetDbHandle returns the correct interface to use for database operations. +// If a transaction is open, it returns the transaction handle, otherwise it returns the db handle. +func (postgresDataHandler *PostgresDataHandler) GetDbHandle() bun.IDB { + if postgresDataHandler.Txn != nil { + return postgresDataHandler.Txn + } + return postgresDataHandler.DB +} + +// CreateSavepoint creates a savepoint in the current transaction. If no transaction is open, it returns an empty string. +// The randomly generated savepoint name is returned if the savepoint is created successfully. +func (postgresDataHandler *PostgresDataHandler) CreateSavepoint() (string, error) { + if postgresDataHandler.Txn == nil { + return "", nil + } + savepointName, err := generateSavepointName() + if err != nil { + return "", errors.Wrapf(err, "PostgresDataHandler.CreateSavepoint: Error generating savepoint name") + } + + _, err = postgresDataHandler.Txn.Exec(fmt.Sprintf("SAVEPOINT %s", savepointName)) + if err != nil { + return "", errors.Wrapf(err, "PostgresDataHandler.CreateSavepoint: Error creating savepoint") + } + + return savepointName, nil +} + +// RevertToSavepoint reverts the current transaction to the savepoint with the given name. +func (postgresDataHandler *PostgresDataHandler) RevertToSavepoint(savepointName string) error { + if postgresDataHandler.Txn == nil { + return nil + } + _, err := postgresDataHandler.Txn.Exec(fmt.Sprintf("ROLLBACK TO SAVEPOINT %s", savepointName)) + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.RevertToSavepoint: Error reverting to savepoint") + } + return nil +} + +// ReleaseSavepoint releases the savepoint with the given name. +func (postgresDataHandler *PostgresDataHandler) ReleaseSavepoint(savepointName string) error { + if postgresDataHandler.Txn == nil { + return nil + } + _, err := postgresDataHandler.Txn.Exec(fmt.Sprintf("RELEASE SAVEPOINT %s", savepointName)) + if err != nil { + return errors.Wrapf(err, "PostgresDataHandler.ReleaseSavepoint: Error releasing savepoint") + } + return nil +} + +func generateSavepointName() (string, error) { + // Create a byte slice of length 8 for a 64-bit random value + randomBytes := make([]byte, 8) + _, err := rand.Read(randomBytes) + if err != nil { + return "", errors.Wrapf(err, "generateSavepointName: Error generating random bytes") + } + // Convert the byte slice to a hexadecimal string + return "savepoint_" + hex.EncodeToString(randomBytes), nil +} diff --git a/handler/db_utils.go b/handler/db_utils.go index d1fcc2f..51a38ab 100644 --- a/handler/db_utils.go +++ b/handler/db_utils.go @@ -1,9 +1,9 @@ package handler import ( - "PostgresDataHandler/migrations/initial_migrations" - "PostgresDataHandler/migrations/post_sync_migrations" "context" + "github.com/deso-protocol/postgres-data-handler/migrations/initial_migrations" + "github.com/deso-protocol/postgres-data-handler/migrations/post_sync_migrations" "github.com/golang/glog" "github.com/uptrace/bun" "github.com/uptrace/bun/migrate" diff --git a/local.docker-compose.yml b/local.docker-compose.yml new file mode 100644 index 0000000..9efcae3 --- /dev/null +++ b/local.docker-compose.yml @@ -0,0 +1,144 @@ +version: '3.8' +services: + deso: + # image: docker.io/desoprotocol/backend-dev:451a0a65e27da4ad68cb0705c63b1c964a1ce011 + build: + context: .. + dockerfile: backend/Dockerfile + environment: + - ADD_IPS=localhost:19000 + - PRIVATE_MODE=true + - RUN_HOT_FEED_ROUTINE=false + - API_PORT=18001 + - PROTOCOL_PORT=18000 + - TXINDEX=true + - DATA_DIR=/pd/n0_00001 + - ACCESS_CONTROL_ALLOW_ORIGINS=* + - SECURE_HEADER_ALLOW_HOSTS=localhost:4200 + - SECURE_HEADER_DEVELOPMENT=true + - BLOCK_CYPHER_API_KEY=092dae962ea44b02809a4c74408b42a1 + - MIN_SATOSHIS_FOR_PROFILE=0 + - EXPOSE_GLOBAL_STATE=false + - SHOW_PROCESSING_SPINNERS=true + - COMP_PROFILE_CREATION=false + + # Hypersync Settings: + # - SYNC_TYPE=hypersync + # - HYPERSYNC=true + # Blocksync Settings: + - SYNC_TYPE=blocksync + - HYPERSYNC=false + + # Mainnet Settings: + # - REGTEST=false + # - TESTNET=false + # - CONNECT_IPS=35.232.92.5:17000 + # - STATE_CHANGE_DIR=/ss/state-changes + + # Testnet Settings: + - REGTEST=false + - TESTNET=true + - CONNECT_IPS=35.192.117.201:18000 + - STATE_CHANGE_DIR=/ss/state-changes + - TRUSTED_BLOCK_PRODUCER_PUBLIC_KEYS= + - TRUSTED_BLOCK_PRODUCER_START_HEIGHT=10000000 + + # Regtest Settings: + # - REGTEST=true + # - TESTNET=true + # - STATE_CHANGE_DIR=/ss/state-changes + # - ADMIN_PUBLIC_KEYS=* + # - SUPER_ADMIN_PUBLIC_KEYS=* + # - NUM_MINING_THREADS=1 + # - MINER_PUBLIC_KEYS=BC1YLg7Bk5sq9iNY17bAwoAYiChLYpmWEi6nY6q5gnA1UQV6xixHjfV + # - BLOCK_PRODUCER_SEED=essence camp ghost remove document vault ladder swim pupil index apart ring + # - STARTER_DESO_SEED=road congress client market couple bid risk escape artwork rookie artwork food + ports: + - '18000:18000' + - '18001:18001' + - '19000:19000' + volumes: + - pd_volume:/pd + # - /tmp/state-change-files:/ss + - ss_volume:/ss + healthcheck: + test: [ "CMD-SHELL", "wget --quiet --tries=1 --spider http://deso:18001/api/v0/health-check || exit 1" ] + interval: 30s + timeout: 10s + retries: 20 + start_period: 10s + command: ["run"] + pdh: + # image: docker.io/desoprotocol/postgres-data-handler:c2ff0e2921911d2581685e1794a1324724997c64 + build: + context: .. + dockerfile: postgres-data-handler/Dockerfile + environment: + - DB_HOST=db-ss + - DB_NAME=postgres + - DB_PASSWORD=postgres + - DB_PORT=5432 + - DB_USERNAME=postgres + - READONLY_USER_PASSWORD=postgres + - STATE_CHANGE_DIR=/ss/state-changes + - CONSUMER_PROGRESS_DIR=/ss/consumer-progress + - BATCH_BYTES=500000 + - THREAD_LIMIT=10 + - CALCULATE_EXPLORER_STATISTICS=false + - LOG_QUERIES=false + # Mainnet Settings: + # - IS_TESTNET=false + # Testnet Settings: + - IS_TESTNET=true + command: ["run"] + volumes: + - ss_volume:/ss + depends_on: + db-ss: + condition: service_healthy + # gql: + # image: docker.io/desoprotocol/graphql-api:4255d8c3c5be7911ed7817ef7b1baf979a6d3818 + # environment: + # - DB_HOST=db-ss + # - DB_NAME=postgres + # - DB_PASSWORD=postgres + # - DB_PORT=5432 + # - DB_USERNAME=postgres + # - DB_OWNER_USERNAME=postgres + # - DB_OWNER_PASSWORD=postgres + # - READONLY_USER_PASSWORD=postgres + # - LOG_SQL=true + # - DESO_NODE_URI=http://deso:18001 + # ports: + # - '4000:4000' + # depends_on: + # db-ss: + # condition: service_healthy + # healthcheck: + # test: [ "CMD-SHELL", "wget --quiet --tries=1 --spider --header='Content-Type: application/json' --post-data='{\"query\":\"query {__typename}\"}' http://gql:4000/graphql || exit 1" ] + # interval: 10s + # timeout: 15s + # retries: 30 + # start_period: 10s + db-ss: + image: postgres:14 + # restart: always + environment: + - PGUSER=postgres + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=postgres + - POSTGRES_LOG_STATEMENTS=all + ports: + - '5430:5432' + volumes: + - db_ss_volume:/var/lib/postgresql/data + healthcheck: + test: [ "CMD-SHELL", "pg_isready" ] + interval: 10s + timeout: 5s + retries: 100 +volumes: + db_ss_volume: + pd_volume: + ss_volume: diff --git a/main.go b/main.go index aaaea1c..e96d641 100644 --- a/main.go +++ b/main.go @@ -1,13 +1,13 @@ package main import ( - "PostgresDataHandler/handler" - "PostgresDataHandler/migrations/initial_migrations" - "PostgresDataHandler/migrations/post_sync_migrations" "database/sql" "flag" "fmt" "github.com/deso-protocol/core/lib" + "github.com/deso-protocol/postgres-data-handler/handler" + "github.com/deso-protocol/postgres-data-handler/migrations/initial_migrations" + "github.com/deso-protocol/postgres-data-handler/migrations/post_sync_migrations" "github.com/deso-protocol/state-consumer/consumer" "github.com/golang/glog" "github.com/spf13/viper" @@ -22,7 +22,8 @@ import ( func main() { // Initialize flags and get config values. setupFlags() - pgURI, stateChangeDir, consumerProgressDir, batchBytes, threadLimit, logQueries, readOnlyUserPassword, explorerStatistics, datadogProfiler, isTestnet := getConfigValues() + pgURI, stateChangeDir, consumerProgressDir, batchBytes, threadLimit, logQueries, readOnlyUserPassword, + explorerStatistics, datadogProfiler, isTestnet, isRegtest := getConfigValues() // Print all the config values in a single printf call broken up // with newlines and make it look pretty both printed out and in code @@ -63,6 +64,9 @@ func main() { params := &lib.DeSoMainnetParams if isTestnet { params = &lib.DeSoTestnetParams + if isRegtest { + params.EnableRegtest() + } } // Initialize and run a state syncer consumer. @@ -95,7 +99,7 @@ func setupFlags() { viper.AutomaticEnv() } -func getConfigValues() (pgURI string, stateChangeDir string, consumerProgressDir string, batchBytes uint64, threadLimit int, logQueries bool, readonlyUserPassword string, explorerStatistics bool, datadogProfiler bool, isTestnet bool) { +func getConfigValues() (pgURI string, stateChangeDir string, consumerProgressDir string, batchBytes uint64, threadLimit int, logQueries bool, readonlyUserPassword string, explorerStatistics bool, datadogProfiler bool, isTestnet bool, isRegtest bool) { dbHost := viper.GetString("DB_HOST") dbPort := viper.GetString("DB_PORT") @@ -108,6 +112,8 @@ func getConfigValues() (pgURI string, stateChangeDir string, consumerProgressDir if stateChangeDir == "" { stateChangeDir = "/tmp/state-changes" } + // Set the state change dir flag that core uses, so DeSoEncoders properly encode and decode state change metadata. + viper.Set("state-change-dir", stateChangeDir) consumerProgressDir = viper.GetString("CONSUMER_PROGRESS_DIR") if consumerProgressDir == "" { @@ -129,8 +135,9 @@ func getConfigValues() (pgURI string, stateChangeDir string, consumerProgressDir explorerStatistics = viper.GetBool("CALCULATE_EXPLORER_STATISTICS") datadogProfiler = viper.GetBool("DATADOG_PROFILER") isTestnet = viper.GetBool("IS_TESTNET") + isRegtest = viper.GetBool("REGTEST") - return pgURI, stateChangeDir, consumerProgressDir, batchBytes, threadLimit, logQueries, readonlyUserPassword, explorerStatistics, datadogProfiler, isTestnet + return pgURI, stateChangeDir, consumerProgressDir, batchBytes, threadLimit, logQueries, readonlyUserPassword, explorerStatistics, datadogProfiler, isTestnet, isRegtest } func setupDb(pgURI string, threadLimit int, logQueries bool, readonlyUserPassword string, calculateExplorerStatistics bool) (*bun.DB, error) { diff --git a/migrations/initial_migrations/20231213000001_create_stake_entry_table.go b/migrations/initial_migrations/20231213000001_create_stake_entry_table.go new file mode 100644 index 0000000..1653fb9 --- /dev/null +++ b/migrations/initial_migrations/20231213000001_create_stake_entry_table.go @@ -0,0 +1,41 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createStakeEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + staker_pkid VARCHAR NOT NULL, + validator_pkid VARCHAR NOT NULL, + reward_method VARCHAR NOT NULL, + stake_amount_nanos NUMERIC(78, 0) NOT NULL, + + extra_data JSONB, + badger_key BYTEA PRIMARY KEY + ); + CREATE INDEX {tableName}_validator_pkid_idx ON {tableName} (validator_pkid); + CREATE INDEX {tableName}_staker_pkid_idx ON {tableName} (staker_pkid); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createStakeEntryTable(db, "stake_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS stake_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20231213000002_create_validator_entry_table.go b/migrations/initial_migrations/20231213000002_create_validator_entry_table.go new file mode 100644 index 0000000..7a48b13 --- /dev/null +++ b/migrations/initial_migrations/20231213000002_create_validator_entry_table.go @@ -0,0 +1,44 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createValidatorEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + validator_pkid VARCHAR NOT NULL, + domains VARCHAR ARRAY, + disable_delegated_stake BOOLEAN, + delegated_stake_commission_basis_points BIGINT, + voting_public_key VARCHAR, + voting_authorization VARCHAR, + total_stake_amount_nanos NUMERIC(78, 0) NOT NULL, + last_active_at_epoch_number BIGINT, + jailed_at_epoch_number BIGINT, + extra_data JSONB, + badger_key BYTEA PRIMARY KEY + ); + CREATE INDEX {tableName}_validator_pkid_idx ON {tableName} (validator_pkid); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createValidatorEntryTable(db, "validator_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS validator_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20231213000003_create_locked_stake_entry_table.go b/migrations/initial_migrations/20231213000003_create_locked_stake_entry_table.go new file mode 100644 index 0000000..4d13124 --- /dev/null +++ b/migrations/initial_migrations/20231213000003_create_locked_stake_entry_table.go @@ -0,0 +1,41 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createLockedStakeEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + staker_pkid VARCHAR NOT NULL, + validator_pkid VARCHAR NOT NULL, + locked_amount_nanos NUMERIC(78, 0) NOT NULL, + locked_at_epoch_number BIGINT NOT NULL, + + extra_data JSONB, + badger_key BYTEA PRIMARY KEY + ); + CREATE INDEX {tableName}_validator_pkid_idx ON {tableName} (validator_pkid); + CREATE INDEX {tableName}_staker_pkid_idx ON {tableName} (staker_pkid); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createLockedStakeEntryTable(db, "locked_stake_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS locked_stake_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240129000001_create_locked_balance_entry_table.go b/migrations/initial_migrations/20240129000001_create_locked_balance_entry_table.go new file mode 100644 index 0000000..678f156 --- /dev/null +++ b/migrations/initial_migrations/20240129000001_create_locked_balance_entry_table.go @@ -0,0 +1,41 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createLockedBalanceEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + hodler_pkid VARCHAR NOT NULL, + profile_pkid VARCHAR NOT NULL, + unlock_timestamp_nano_secs BIGINT NOT NULL, + vesting_end_timestamp_nano_secs BIGINT NOT NULL, + balance_base_units NUMERIC(78, 0) NOT NULL, + + badger_key BYTEA PRIMARY KEY + ); + CREATE INDEX {tableName}_hodler_pkid_idx ON {tableName} (hodler_pkid); + CREATE INDEX {tableName}_profile_pkid_idx ON {tableName} (profile_pkid); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createLockedBalanceEntryTable(db, "locked_balance_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS locked_balance_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240129000002_create_lockup_yield_curve_point_table.go b/migrations/initial_migrations/20240129000002_create_lockup_yield_curve_point_table.go new file mode 100644 index 0000000..ba27981 --- /dev/null +++ b/migrations/initial_migrations/20240129000002_create_lockup_yield_curve_point_table.go @@ -0,0 +1,38 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createYieldCurvePointTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + profile_pkid VARCHAR NOT NULL, + lockup_duration_nano_secs BIGINT NOT NULL, + lockup_yield_apy_basis_points BIGINT NOT NULL, + + badger_key BYTEA PRIMARY KEY + ); + CREATE INDEX {tableName}_profile_pkid_idx ON {tableName} (profile_pkid); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createYieldCurvePointTable(db, "yield_curve_point") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS yield_curve_point; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240129000003_create_epoch_entry_table.go b/migrations/initial_migrations/20240129000003_create_epoch_entry_table.go new file mode 100644 index 0000000..6436870 --- /dev/null +++ b/migrations/initial_migrations/20240129000003_create_epoch_entry_table.go @@ -0,0 +1,45 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +// TODO: indexes +func createEpochEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + epoch_number BIGINT PRIMARY KEY NOT NULL, + initial_block_height BIGINT NOT NULL, + initial_view BIGINT NOT NULL, + final_block_height BIGINT NOT NULL, + initial_leader_index_offset BIGINT NOT NULL, + created_at_block_timestamp_nano_secs BIGINT NOT NULL, + snapshot_at_epoch_number BIGINT NOT NULL + ); + + CREATE INDEX {tableName}_epoch_number_idx ON {tableName} (epoch_number); + CREATE INDEX {tableName}_initial_block_height_idx ON {tableName} (initial_block_height); + CREATE INDEX {tableName}_final_block_height_idx ON {tableName} (final_block_height); + CREATE INDEX {tableName}_snapshot_at_epoch_number_idx ON {tableName} (snapshot_at_epoch_number); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createEpochEntryTable(db, "epoch_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS epoch_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240201000001_update_block_table_with_pos_types.go b/migrations/initial_migrations/20240201000001_update_block_table_with_pos_types.go new file mode 100644 index 0000000..2bf864a --- /dev/null +++ b/migrations/initial_migrations/20240201000001_update_block_table_with_pos_types.go @@ -0,0 +1,39 @@ +package initial_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func updateBlockTableWithPoSFields(db *bun.DB, tableName string) error { + _, err := db.Exec(` + ALTER TABLE block + ADD COLUMN block_version BIGINT, + ADD COLUMN proposer_voting_public_key VARCHAR, + ADD COLUMN proposer_random_seed_signature VARCHAR, + ADD COLUMN proposed_in_view BIGINT, + ADD COLUMN proposer_vote_partial_signature VARCHAR; + `) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return updateBlockTableWithPoSFields(db, "block") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + ALTER TABLE block + DROP COLUMN block_version, + DROP COLUMN proposer_voting_public_key, + DROP COLUMN proposer_random_seed_signature, + DROP COLUMN proposed_in_view, + DROP COLUMN proposer_vote_partial_signature; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240212000001_add_transaction_partitions_pos.go b/migrations/initial_migrations/20240212000001_add_transaction_partitions_pos.go new file mode 100644 index 0000000..7eae2fd --- /dev/null +++ b/migrations/initial_migrations/20240212000001_add_transaction_partitions_pos.go @@ -0,0 +1,44 @@ +package initial_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + CREATE TABLE transaction_partition_34 PARTITION OF transaction_partitioned FOR VALUES IN (34); + CREATE TABLE transaction_partition_35 PARTITION OF transaction_partitioned FOR VALUES IN (35); + CREATE TABLE transaction_partition_36 PARTITION OF transaction_partitioned FOR VALUES IN (36); + CREATE TABLE transaction_partition_37 PARTITION OF transaction_partitioned FOR VALUES IN (37); + CREATE TABLE transaction_partition_38 PARTITION OF transaction_partitioned FOR VALUES IN (38); + CREATE TABLE transaction_partition_39 PARTITION OF transaction_partitioned FOR VALUES IN (39); + CREATE TABLE transaction_partition_40 PARTITION OF transaction_partitioned FOR VALUES IN (40); + CREATE TABLE transaction_partition_41 PARTITION OF transaction_partitioned FOR VALUES IN (41); + CREATE TABLE transaction_partition_42 PARTITION OF transaction_partitioned FOR VALUES IN (42); + CREATE TABLE transaction_partition_43 PARTITION OF transaction_partitioned FOR VALUES IN (43); + `) + if err != nil { + return err + } + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS transaction_partition_34; + DROP TABLE IF EXISTS transaction_partition_35; + DROP TABLE IF EXISTS transaction_partition_36; + DROP TABLE IF EXISTS transaction_partition_37; + DROP TABLE IF EXISTS transaction_partition_38; + DROP TABLE IF EXISTS transaction_partition_39; + DROP TABLE IF EXISTS transaction_partition_40; + DROP TABLE IF EXISTS transaction_partition_41; + DROP TABLE IF EXISTS transaction_partition_42; + DROP TABLE IF EXISTS transaction_partition_43; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240212000002_create_stake_reward_table.go b/migrations/initial_migrations/20240212000002_create_stake_reward_table.go new file mode 100644 index 0000000..3d9520a --- /dev/null +++ b/migrations/initial_migrations/20240212000002_create_stake_reward_table.go @@ -0,0 +1,44 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createStakeRewardTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + staker_pkid VARCHAR NOT NULL, + validator_pkid VARCHAR NOT NULL, + reward_method SMALLINT NOT NULL, + reward_nanos BIGINT NOT NULL, + is_validator_commission BOOLEAN NOT NULL, + block_hash VARCHAR NOT NULL, + utxo_op_index BIGINT NOT NULL, + PRIMARY KEY(block_hash, utxo_op_index) + ); + CREATE INDEX {tableName}_validator_pkid_idx ON {tableName} (validator_pkid); + CREATE INDEX {tableName}_staker_pkid_idx ON {tableName} (staker_pkid); + CREATE INDEX {tableName}_block_hash_idx ON {tableName} (block_hash); + CREATE INDEX {tableName}_is_validator_commission_idx ON {tableName} (is_validator_commission); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createStakeRewardTable(db, "stake_reward") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS stake_reward; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240213000001_update_transaction_types_table.go b/migrations/initial_migrations/20240213000001_update_transaction_types_table.go new file mode 100644 index 0000000..6371d33 --- /dev/null +++ b/migrations/initial_migrations/20240213000001_update_transaction_types_table.go @@ -0,0 +1,37 @@ +package initial_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + INSERT INTO transaction_type (type, name) VALUES + (34, 'Register As Validator'), + (35, 'Unregister As Validator'), + (36, 'Stake'), + (37, 'Unstake'), + (38, 'Unlock Stake'), + (39, 'Unjail Validator'), + (40, 'Coin Lockup'), + (41, 'Update Coin Lockup Params'), + (42, 'Coin Lockup Transfer'), + (43, 'Coin Unlock'); + `) + if err != nil { + return err + } + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + delete from transaction_type where type >= 34 AND type <= 43; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240215000001_create_leader_schedule.go b/migrations/initial_migrations/20240215000001_create_leader_schedule.go new file mode 100644 index 0000000..d9bd6e7 --- /dev/null +++ b/migrations/initial_migrations/20240215000001_create_leader_schedule.go @@ -0,0 +1,37 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +func createLeaderScheduleTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + validator_pkid VARCHAR NOT NULL, + snapshot_at_epoch_number BIGINT NOT NULL, + leader_index INTEGER NOT NULL, + badger_key BYTEA PRIMARY KEY NOT NULL + ); + CREATE INDEX {tableName}_validator_pkid_idx ON {tableName} (validator_pkid); + CREATE INDEX {tableName}_snapshot_at_epoch_number_idx ON {tableName} (snapshot_at_epoch_number); + CREATE INDEX {tableName}_snapshot_at_epoch_number_leader_index_idx ON {tableName} (snapshot_at_epoch_number, leader_index); + `, "{tableName}", tableName, -1)) + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createLeaderScheduleTable(db, "leader_schedule_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS leader_schedule_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240220000001_create_global_params_entry_table.go b/migrations/initial_migrations/20240220000001_create_global_params_entry_table.go new file mode 100644 index 0000000..179831c --- /dev/null +++ b/migrations/initial_migrations/20240220000001_create_global_params_entry_table.go @@ -0,0 +1,58 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createGlobalParamsEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + usd_cents_per_bitcoin BIGINT NOT NULL, + create_profile_fee_nanos BIGINT NOT NULL, + create_nft_fee_nanos BIGINT NOT NULL, + max_copies_per_nft BIGINT NOT NULL, + minimum_network_fee_nanos_per_kb BIGINT NOT NULL, + max_nonce_expiration_block_height_offset BIGINT NOT NULL, + stake_lockup_epoch_duration BIGINT NOT NULL, + validator_jail_epoch_duration BIGINT NOT NULL, + leader_schedule_max_num_validators BIGINT NOT NULL, + validator_set_max_num_validators BIGINT NOT NULL, + staking_rewards_max_num_stakes BIGINT NOT NULL, + staking_rewards_apy_basis_points BIGINT NOT NULL, + epoch_duration_num_blocks BIGINT NOT NULL, + jail_inactive_validator_grace_period_epochs BIGINT NOT NULL, + maximum_vested_intersections_per_lockup_transaction INT NOT NULL, + fee_bucket_growth_rate_basis_points BIGINT NOT NULL, + block_timestamp_drift_nano_secs BIGINT NOT NULL, + mempool_max_size_bytes BIGINT NOT NULL, + mempool_fee_estimator_num_mempool_blocks BIGINT NOT NULL, + mempool_fee_estimator_num_past_blocks BIGINT NOT NULL, + max_block_size_bytes_pos BIGINT NOT NULL, + soft_max_block_size_bytes_pos BIGINT NOT NULL, + max_txn_size_bytes_pos BIGINT NOT NULL, + block_production_interval_milliseconds_pos BIGINT NOT NULL, + timeout_interval_milliseconds_pos BIGINT NOT NULL, + badger_key BYTEA PRIMARY KEY + ); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createGlobalParamsEntryTable(db, "global_params_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS global_params_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240220000003_create_bls_public_key_pkid_pair_entry_table.go b/migrations/initial_migrations/20240220000003_create_bls_public_key_pkid_pair_entry_table.go new file mode 100644 index 0000000..627525e --- /dev/null +++ b/migrations/initial_migrations/20240220000003_create_bls_public_key_pkid_pair_entry_table.go @@ -0,0 +1,37 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createBLSPublicKeyPKIDPairEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + pkid VARCHAR NOT NULL, + bls_public_key VARCHAR NOT NULL, + + badger_key BYTEA PRIMARY KEY + ); + CREATE INDEX {tableName}_pkid_idx ON {tableName} (pkid); + CREATE INDEX {tableName}_bls_public_key_idx ON {tableName} (bls_public_key); + `, "{tableName}", tableName, -1)) + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createBLSPublicKeyPKIDPairEntryTable(db, "bls_public_key_pkid_pair_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS bls_public_key_pkid_pair_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240220000005_create_jailed_history_event_table.go b/migrations/initial_migrations/20240220000005_create_jailed_history_event_table.go new file mode 100644 index 0000000..dcc996c --- /dev/null +++ b/migrations/initial_migrations/20240220000005_create_jailed_history_event_table.go @@ -0,0 +1,36 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +func createJailedHistoryEventTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + validator_pkid VARCHAR NOT NULL, + jailed_at_epoch_number BIGINT NOT NULL, + unjailed_at_epoch_number BIGINT NOT NULL, + PRIMARY KEY(validator_pkid, jailed_at_epoch_number, unjailed_at_epoch_number) + ); + CREATE INDEX {tableName}_validator_pkid_idx ON {tableName} (validator_pkid); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createJailedHistoryEventTable(db, "jailed_history_event") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS jailed_history_event; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240260000001_create_bls_public_key_pkid_pair_snapshot_entry_table.go b/migrations/initial_migrations/20240260000001_create_bls_public_key_pkid_pair_snapshot_entry_table.go new file mode 100644 index 0000000..a027de2 --- /dev/null +++ b/migrations/initial_migrations/20240260000001_create_bls_public_key_pkid_pair_snapshot_entry_table.go @@ -0,0 +1,39 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createBLSPublicKeyPKIDPairSnapshotEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + pkid VARCHAR NOT NULL, + bls_public_key VARCHAR NOT NULL, + snapshot_at_epoch_number BIGINT NOT NULL, + + badger_key BYTEA PRIMARY KEY + ); + CREATE INDEX {tableName}_pkid_idx ON {tableName} (pkid); + CREATE INDEX {tableName}_bls_public_key_idx ON {tableName} (bls_public_key); + CREATE INDEX {tableName}_snapshot_at_epoch_number_idx ON {tableName} (snapshot_at_epoch_number); + `, "{tableName}", tableName, -1)) + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createBLSPublicKeyPKIDPairSnapshotEntryTable(db, "bls_public_key_pkid_pair_snapshot_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS bls_public_key_pkid_pair_snapshot_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240416000001_add_transaction_partitions_for_atomics.go b/migrations/initial_migrations/20240416000001_add_transaction_partitions_for_atomics.go new file mode 100644 index 0000000..528b726 --- /dev/null +++ b/migrations/initial_migrations/20240416000001_add_transaction_partitions_for_atomics.go @@ -0,0 +1,26 @@ +package initial_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + CREATE TABLE transaction_partition_44 PARTITION OF transaction_partitioned FOR VALUES IN (44); + `) + if err != nil { + return err + } + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS transaction_partition_44; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240418000001_add_atomic_txn_fields_to_transaction_partitioned.go b/migrations/initial_migrations/20240418000001_add_atomic_txn_fields_to_transaction_partitioned.go new file mode 100644 index 0000000..b9ff7ad --- /dev/null +++ b/migrations/initial_migrations/20240418000001_add_atomic_txn_fields_to_transaction_partitioned.go @@ -0,0 +1,45 @@ +package initial_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + ALTER TABLE transaction_partitioned ALTER COLUMN index_in_block DROP NOT NULL; + ALTER TABLE transaction_partitioned ADD COLUMN wrapper_transaction_hash VARCHAR; + ALTER TABLE transaction_partitioned ADD COLUMN index_in_wrapper_transaction BIGINT; + `) + if err != nil { + return err + } + _, err = db.Exec(` + CREATE OR REPLACE VIEW transaction AS + SELECT * FROM transaction_partitioned; + `) + if err != nil { + return err + } + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DELETE FROM transaction_partitioned where index_in_block IS NULL; + ALTER TABLE transaction_partitioned ALTER COLUMN index_in_block SET NOT NULL; + ALTER TABLE transaction_partitioned DROP COLUMN wrapper_transaction_hash CASCADE; + ALTER TABLE transaction_partitioned DROP COLUMN index_in_wrapper_transaction CASCADE; + `) + if err != nil { + return err + } + _, err = db.Exec(` + CREATE OR REPLACE VIEW transaction AS + SELECT * FROM transaction_partitioned; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240418000002_add_atomic_txn_type_to_txn_types.go b/migrations/initial_migrations/20240418000002_add_atomic_txn_type_to_txn_types.go new file mode 100644 index 0000000..b203b89 --- /dev/null +++ b/migrations/initial_migrations/20240418000002_add_atomic_txn_type_to_txn_types.go @@ -0,0 +1,27 @@ +package initial_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + INSERT INTO transaction_type (type, name) VALUES + (44, 'Atomic Transaction'); + `) + if err != nil { + return err + } + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + delete from transaction_type where type = 44; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240425000001_create_snapshot_validator_entry_table.go b/migrations/initial_migrations/20240425000001_create_snapshot_validator_entry_table.go new file mode 100644 index 0000000..8997cf9 --- /dev/null +++ b/migrations/initial_migrations/20240425000001_create_snapshot_validator_entry_table.go @@ -0,0 +1,48 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createSnapshotValidatorEntryTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + validator_pkid VARCHAR NOT NULL, + domains VARCHAR ARRAY, + disable_delegated_stake BOOLEAN, + delegated_stake_commission_basis_points BIGINT, + voting_public_key VARCHAR, + voting_authorization VARCHAR, + total_stake_amount_nanos NUMERIC(78, 0) NOT NULL, + last_active_at_epoch_number BIGINT, + jailed_at_epoch_number BIGINT, + extra_data JSONB, + snapshot_at_epoch_number BIGINT NOT NULL, + badger_key BYTEA PRIMARY KEY + ); + CREATE INDEX {tableName}_validator_pkid_idx ON {tableName} (validator_pkid); + CREATE INDEX {tableName}_snapshot_at_epoch_number_idx ON {tableName} (snapshot_at_epoch_number); + CREATE INDEX {tableName}_total_stake_amount_nanos on {tableName} (total_stake_amount_nanos); + CREATE INDEX {tableName}_badger_key ON {tableName} (badger_key); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createSnapshotValidatorEntryTable(db, "snapshot_validator_entry") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS snapshot_validator_entry; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/initial_migrations/20240425000002_create_block_signer_table.go b/migrations/initial_migrations/20240425000002_create_block_signer_table.go new file mode 100644 index 0000000..f6726e9 --- /dev/null +++ b/migrations/initial_migrations/20240425000002_create_block_signer_table.go @@ -0,0 +1,39 @@ +package initial_migrations + +import ( + "context" + "strings" + + "github.com/uptrace/bun" +) + +// TODO: Not nullable fields +func createBlockSignerTable(db *bun.DB, tableName string) error { + _, err := db.Exec(strings.Replace(` + CREATE TABLE {tableName} ( + block_hash VARCHAR NOT NULL, + signer_index BIGINT NOT NULL, + PRIMARY KEY(block_hash, signer_index) + ); + CREATE INDEX {tableName}_block_hash_idx ON {tableName} (block_hash); + CREATE INDEX {tableName}_block_hash_signer_index_idx ON {tableName} (block_hash, signer_index); + CREATE INDEX {tableName}_signer_index_idx ON {tableName} (signer_index); + create index block_proposer_voting_public_key on block (proposer_voting_public_key); + `, "{tableName}", tableName, -1)) + // TODO: What other fields do we need indexed? + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + return createBlockSignerTable(db, "block_signer") + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE IF EXISTS block_signer; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240213000002_create_pos_fk_comments.go b/migrations/post_sync_migrations/20240213000002_create_pos_fk_comments.go new file mode 100644 index 0000000..37824bd --- /dev/null +++ b/migrations/post_sync_migrations/20240213000002_create_pos_fk_comments.go @@ -0,0 +1,72 @@ +package post_sync_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +// TODO: revisit access group relationships when we refactor the messaging app to use the graphql API. +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on table stake_reward is E'@foreignKey (staker_pkid) references account (pkid)|@foreignFieldName stakeRewards|@fieldName staker\n@foreignKey (validator_pkid) references account (pkid)|@foreignFieldName validatorStakeRewards|@fieldName validator\n@foreignKey (block_hash) references block (block_hash)|@foreignFieldName stakeRewardForBlock|@fieldName block'; + comment on table stake_entry is E'@foreignKey (staker_pkid) references account (pkid)|@foreignFieldName stakeEntries|@fieldName staker\n@foreignKey (validator_pkid) references account (pkid)|@foreignFieldName validatorStakeEntries|@fieldName validatorAccount\n@foreignKey (validator_pkid) references validator_entry (validator_pkid)|@foreignFieldName stakeEntries|@fieldName validatorEntry'; + comment on table validator_entry is E'@unique validator_pkid\n@foreignKey (validator_pkid) references account (pkid)|@foreignFieldName validatorEntry|@fieldName account'; + comment on table locked_stake_entry is E'@foreignKey (staker_pkid) references account (pkid)|@foreignFieldName lockedStakeEntries|@fieldName staker\n@foreignKey (validator_pkid) references account (pkid)|@foreignFieldName validatorLockedStakeEntries|@fieldName validatorAccount\n@foreignKey (validator_pkid) references validator_entry(validator_pkid)|@foreignFieldName validatorLockedStakeEntries|@fieldName validatorEntry'; + comment on table yield_curve_point is E'@foreignKey (profile_pkid) references account (pkid)|@foreignFieldName yieldCurvePoints|@fieldName account'; + comment on table locked_balance_entry is E'@foreignKey (profile_pkid) references account (pkid)|@foreignFieldName profileLockedBalanceEntries|@fieldName profileAccount\n@foreignKey (hodler_pkid) references account (pkid)|@foreignFieldName hodlerLockedBalanceEntries|@fieldName hodlerAccount'; + comment on column stake_entry.badger_key is E'@omit'; + comment on column validator_entry.badger_key is E'@omit'; + comment on column locked_stake_entry.badger_key is E'@omit'; + comment on column yield_curve_point.badger_key is E'@omit'; + comment on column locked_balance_entry.badger_key is E'@omit'; + comment on table transaction_partition_34 is E'@omit'; + comment on table transaction_partition_35 is E'@omit'; + comment on table transaction_partition_36 is E'@omit'; + comment on table transaction_partition_37 is E'@omit'; + comment on table transaction_partition_38 is E'@omit'; + comment on table transaction_partition_39 is E'@omit'; + comment on table transaction_partition_40 is E'@omit'; + comment on table transaction_partition_41 is E'@omit'; + comment on table transaction_partition_42 is E'@omit'; + comment on table transaction_partition_43 is E'@omit'; + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on table stake_reward is NULL; + comment on table stake_entry is NULL; + comment on table validator_entry is NULL; + comment on table locked_stake_entry is NULL; + comment on table yield_curve_point is NULL; + comment on table locked_balance_entry is NULL; + comment on table block is E'@unique block_hash\n@unique height'; + comment on column stake_entry.badger_key is NULL; + comment on column validator_entry.badger_key is NULL; + comment on column locked_stake_entry.badger_key is NULL; + comment on column yield_curve_point.badger_key is NULL; + comment on column locked_balance_entry.badger_key is NULL; + comment on column epoch_entry.badger_key is NULL; + comment on table transaction_partition_34 is NULL; + comment on table transaction_partition_35 is NULL; + comment on table transaction_partition_36 is NULL; + comment on table transaction_partition_37 is NULL; + comment on table transaction_partition_38 is NULL; + comment on table transaction_partition_39 is NULL; + comment on table transaction_partition_40 is NULL; + comment on table transaction_partition_41 is NULL; + comment on table transaction_partition_42 is NULL; + comment on table transaction_partition_43 is NULL; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240215000002_create_leader_schedule_fk_comments.go b/migrations/post_sync_migrations/20240215000002_create_leader_schedule_fk_comments.go new file mode 100644 index 0000000..eebde8b --- /dev/null +++ b/migrations/post_sync_migrations/20240215000002_create_leader_schedule_fk_comments.go @@ -0,0 +1,31 @@ +package post_sync_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on table leader_schedule_entry is E'@foreignKey (validator_pkid) references account (pkid)|@foreignFieldName leaderScheduleEntries|@fieldName leaderAccount\n@foreignKey (validator_pkid) references validator_entry (validator_pkid)|@foreignFieldName leaderScheduleEntries|@fieldName validatorEntry\n@foreignKey (snapshot_at_epoch_number) references epoch_entry (snapshot_at_epoch_number)|@foreignFieldName leaderScheduleEntries|@fieldName epochEntryBySnapshot'; + comment on column leader_schedule_entry.badger_key is E'@omit'; + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on table leader_schedule_entry is NULL; + comment on column leader_schedule_entry.badger_key is NULL; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240220000002_create_global_params_comments.go b/migrations/post_sync_migrations/20240220000002_create_global_params_comments.go new file mode 100644 index 0000000..f4dc653 --- /dev/null +++ b/migrations/post_sync_migrations/20240220000002_create_global_params_comments.go @@ -0,0 +1,29 @@ +package post_sync_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on column global_params_entry.badger_key is E'@omit'; + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on column global_params_entry.badger_key is NULL; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240220000006_create_jailed_history_event_comments.go b/migrations/post_sync_migrations/20240220000006_create_jailed_history_event_comments.go new file mode 100644 index 0000000..725e701 --- /dev/null +++ b/migrations/post_sync_migrations/20240220000006_create_jailed_history_event_comments.go @@ -0,0 +1,29 @@ +package post_sync_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on table jailed_history_event is E'@foreignKey (validator_pkid) references validator_entry (validator_pkid)|@foreignFieldName jailedHistoryEvents|@fieldName validatorEntry\n@foreignKey (validator_pkid) references account (pkid)|@foreignFieldName jailedHistoryEvents|@fieldName account'; + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on table jailed_history_event is NULL; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240260000002_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go b/migrations/post_sync_migrations/20240260000002_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go new file mode 100644 index 0000000..1c310f3 --- /dev/null +++ b/migrations/post_sync_migrations/20240260000002_create_pos_stat_views_and_fk_comments_for_snapshot_bls.go @@ -0,0 +1,123 @@ +package post_sync_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + +CREATE OR REPLACE VIEW epoch_details_for_block as +select block_hash, epoch_number, bls.pkid as proposer_pkid +from block + left join epoch_entry + on epoch_entry.initial_block_height <= block.height and + epoch_entry.final_block_height >= block.height + left join bls_public_key_pkid_pair_snapshot_entry bls + on bls.snapshot_at_epoch_number = epoch_entry.snapshot_at_epoch_number and + block.proposer_voting_public_key = bls.bls_public_key; + + comment on view epoch_details_for_block is E'@unique block_hash\n@unique epoch_number\n@foreignKey (block_hash) references block (block_hash)|@foreignFieldName epochDetailForBlock|@fieldName block\n@foreignKey (epoch_number) references epoch_entry (epoch_number)|@foreignFieldName blockHashesInEpoch|@fieldName epochEntry\n@foreignKey (proposer_pkid) references account (pkid)|@foreignFieldName proposedBlockHashes|@fieldName proposer'; + comment on table bls_public_key_pkid_pair_snapshot_entry is E'@foreignKey (pkid) references account (pkid)|@foreignFieldName blsPublicKeyPkidPairSnapshotEntries|@fieldName account\n@foreignKey (snapshot_at_epoch_number) references epoch_entry (snapshot_at_epoch_number)|@foreignFieldName blsPublicKeyPkidPairSnapshotEntries|@fieldName epochEntry'; + comment on column bls_public_key_pkid_pair_snapshot_entry.badger_key is E'@omit'; +`) + if err != nil { + return err + } + if !calculateExplorerStatistics { + return nil + } + _, err = db.Exec(` +CREATE MATERIALIZED VIEW my_stake_summary as +select coalesce(total_stake_rewards.staker_pkid, total_stake_amount.staker_pkid) as staker_pkid, + total_stake_rewards.total_rewards as total_stake_rewards, + total_stake_amount.total_stake as total_stake +from (select staker_pkid, sum(reward_nanos) total_rewards + from stake_reward + group by staker_pkid) total_stake_rewards + full outer join + (select staker_pkid, sum(stake_amount_nanos) total_stake + from stake_entry + group by staker_pkid) total_stake_amount + on total_stake_amount.staker_pkid = total_stake_rewards.staker_pkid; + +CREATE UNIQUE INDEX my_stake_summary_unique_index ON my_stake_summary (staker_pkid); + +CREATE MATERIALIZED VIEW staking_summary as +select * +from (select sum(total_stake_amount_nanos) as global_stake_amount_nanos, + count(distinct validator_pkid) as num_validators + from validator_entry) validator_summary, + (select max(epoch_number) current_epoch_number from epoch_entry) current_epoch, + (select count(distinct snapshot_at_epoch_number) num_epochs_in_leader_schedule + from leader_schedule_entry) num_epochs_in_leader_schedule, + (select count(distinct staker_pkid) as num_stakers from stake_entry) staker_summary; + +CREATE UNIQUE INDEX staking_summary_unique_index ON staking_summary (global_stake_amount_nanos, num_validators, current_epoch_number, num_epochs_in_leader_schedule); + +CREATE MATERIALIZED VIEW validator_stats as +select validator_entry.validator_pkid, + rank() OVER ( order by validator_entry.total_stake_amount_nanos) as validator_rank, + validator_entry.total_stake_amount_nanos::float / + staking_summary.global_stake_amount_nanos::float as percent_total_stake, + coalesce(time_in_jail, 0) + + (case + when jailed_at_epoch_number = 0 then 0 + else (staking_summary.current_epoch_number - jailed_at_epoch_number) END) epochs_in_jail, + coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0) num_epochs_in_leader_schedule, + coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0)::float / + staking_summary.num_epochs_in_leader_schedule::float as percent_epochs_in_leader_schedule, + coalesce(total_rewards, 0) as total_stake_reward_nanos +from staking_summary, + validator_entry + left join (select validator_pkid, sum(jhe.unjailed_at_epoch_number - jhe.jailed_at_epoch_number) time_in_jail + from jailed_history_event jhe + group by validator_pkid) jhe + on jhe.validator_pkid = validator_entry.validator_pkid + left join (select validator_pkid, count(*) as num_epochs_in_leader_schedule + from leader_schedule_entry + group by validator_pkid) leader_schedule_summary + on leader_schedule_summary.validator_pkid = validator_entry.validator_pkid + left join (select validator_pkid, sum(reward_nanos) as total_rewards + from stake_reward + group by validator_pkid) as total_stake_rewards + on total_stake_rewards.validator_pkid = validator_entry.validator_pkid; + +CREATE UNIQUE INDEX validator_stats_unique_index ON validator_stats (validator_pkid); + + comment on materialized view validator_stats is E'@unique validator_pkid\n@foreignKey (validator_pkid) references validator_entry (validator_pkid)|@foreignFieldName validatorStats|@fieldName validatorEntry'; + comment on materialized view my_stake_summary is E'@unique staker_pkid\n@foreignKey (staker_pkid) references account (pkid)|@foreignFieldName myStakeSummary|@fieldName staker'; + +`) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on column bls_public_key_pkid_pair_snapshot_entry.badger_key is NULL; + comment on table bls_public_key_pkid_pair_snapshot_entry is NULL; + DROP VIEW IF EXISTS epoch_details_for_block CASCADE; +`) + if err != nil { + return err + } + if !calculateExplorerStatistics { + return nil + } + _, err = db.Exec(` + DROP MATERIALIZED VIEW IF EXISTS validator_stats CASCADE; + DROP MATERIALIZED VIEW IF EXISTS staking_summary CASCADE; + DROP MATERIALIZED VIEW IF EXISTS my_stake_summary CASCADE; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240301000001_update_validator_stats_view.go b/migrations/post_sync_migrations/20240301000001_update_validator_stats_view.go new file mode 100644 index 0000000..80737ee --- /dev/null +++ b/migrations/post_sync_migrations/20240301000001_update_validator_stats_view.go @@ -0,0 +1,98 @@ +package post_sync_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + if !calculateExplorerStatistics { + return nil + } + _, err := db.Exec(` + DROP MATERIALIZED VIEW IF EXISTS validator_stats; +create materialized view validator_stats as +select validator_entry.validator_pkid, + rank() + OVER ( order by (case when validator_entry.jailed_at_epoch_number = 0 then 0 else 1 end), validator_entry.total_stake_amount_nanos desc, validator_entry.jailed_at_epoch_number desc, validator_entry.validator_pkid) as validator_rank, + validator_entry.total_stake_amount_nanos::float / + coalesce(nullif(staking_summary.global_stake_amount_nanos::float, 0), + 1) as percent_total_stake, + coalesce(time_in_jail, 0) + + (case + when jailed_at_epoch_number = 0 then 0 + else (staking_summary.current_epoch_number - jailed_at_epoch_number) END) epochs_in_jail, + coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0) as num_epochs_in_leader_schedule, + coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0)::float / + coalesce(nullif(staking_summary.num_epochs_in_leader_schedule::float, 0), + 1) as percent_epochs_in_leader_schedule, + coalesce(total_rewards, 0) as total_stake_reward_nanos +from staking_summary, + validator_entry + left join (select validator_pkid, sum(jhe.unjailed_at_epoch_number - jhe.jailed_at_epoch_number) time_in_jail + from jailed_history_event jhe + group by validator_pkid) jhe + on jhe.validator_pkid = validator_entry.validator_pkid + left join (select validator_pkid, count(*) as num_epochs_in_leader_schedule + from leader_schedule_entry + group by validator_pkid) leader_schedule_summary + on leader_schedule_summary.validator_pkid = validator_entry.validator_pkid + left join (select validator_pkid, sum(reward_nanos) as total_rewards + from stake_reward + group by validator_pkid) as total_stake_rewards + on total_stake_rewards.validator_pkid = validator_entry.validator_pkid; +CREATE UNIQUE INDEX validator_stats_unique_index ON validator_stats (validator_pkid); + comment on materialized view validator_stats is E'@primaryKey validator_pkid\n@unique validator_rank\n@foreignKey (validator_pkid) references validator_entry (validator_pkid)|@foreignFieldName validatorStats|@fieldName validatorEntry'; +`) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + if !calculateExplorerStatistics { + return nil + } + _, err := db.Exec(` + DROP MATERIALIZED VIEW IF EXISTS validator_stats CASCADE; +CREATE MATERIALIZED VIEW validator_stats as +select validator_entry.validator_pkid, + rank() OVER ( order by validator_entry.total_stake_amount_nanos) as validator_rank, + validator_entry.total_stake_amount_nanos::float / + staking_summary.global_stake_amount_nanos::float as percent_total_stake, + coalesce(time_in_jail, 0) + + (case + when jailed_at_epoch_number = 0 then 0 + else (staking_summary.current_epoch_number - jailed_at_epoch_number) END) epochs_in_jail, + coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0) num_epochs_in_leader_schedule, + coalesce(leader_schedule_summary.num_epochs_in_leader_schedule, 0)::float / + staking_summary.num_epochs_in_leader_schedule::float as percent_epochs_in_leader_schedule, + coalesce(total_rewards, 0) as total_stake_reward_nanos +from staking_summary, + validator_entry + left join (select validator_pkid, sum(jhe.unjailed_at_epoch_number - jhe.jailed_at_epoch_number) time_in_jail + from jailed_history_event jhe + group by validator_pkid) jhe + on jhe.validator_pkid = validator_entry.validator_pkid + left join (select validator_pkid, count(*) as num_epochs_in_leader_schedule + from leader_schedule_entry + group by validator_pkid) leader_schedule_summary + on leader_schedule_summary.validator_pkid = validator_entry.validator_pkid + left join (select validator_pkid, sum(reward_nanos) as total_rewards + from stake_reward + group by validator_pkid) as total_stake_rewards + on total_stake_rewards.validator_pkid = validator_entry.validator_pkid; + +CREATE UNIQUE INDEX validator_stats_unique_index ON validator_stats (validator_pkid); + + comment on materialized view validator_stats is E'@unique validator_pkid\n@foreignKey (validator_pkid) references validator_entry (validator_pkid)|@foreignFieldName validatorStats|@fieldName validatorEntry'; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240416000002_create_atomic_txn_tables_and_comments.go b/migrations/post_sync_migrations/20240416000002_create_atomic_txn_tables_and_comments.go new file mode 100644 index 0000000..d18fee1 --- /dev/null +++ b/migrations/post_sync_migrations/20240416000002_create_atomic_txn_tables_and_comments.go @@ -0,0 +1,29 @@ +package post_sync_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on table transaction_partition_44 is E'@omit'; + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on table transaction_partition_44 is NULL; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240418000003_update_get_transaction_count_function.go b/migrations/post_sync_migrations/20240418000003_update_get_transaction_count_function.go new file mode 100644 index 0000000..1fce957 --- /dev/null +++ b/migrations/post_sync_migrations/20240418000003_update_get_transaction_count_function.go @@ -0,0 +1,61 @@ +package post_sync_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + err := RunMigrationWithRetries(db, ` + CREATE OR REPLACE FUNCTION get_transaction_count(transaction_type integer) + RETURNS bigint AS + $BODY$ + DECLARE + count_value bigint; + padded_transaction_type varchar; + BEGIN + IF transaction_type < 1 OR transaction_type > 44 THEN + RAISE EXCEPTION '% is not a valid transaction type', transaction_type; + END IF; + + padded_transaction_type := LPAD(transaction_type::text, 2, '0'); + + EXECUTE format('SELECT COALESCE(NULLIF(COALESCE(reltuples::bigint, 0), -1), 0) FROM pg_class WHERE relname = ''transaction_partition_%s''', padded_transaction_type) INTO count_value; + RETURN count_value; + END; + $BODY$ + LANGUAGE plpgsql + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + err := RunMigrationWithRetries(db, ` + CREATE OR REPLACE FUNCTION get_transaction_count(transaction_type integer) + RETURNS bigint AS + $BODY$ + DECLARE + count_value bigint; + padded_transaction_type varchar; + BEGIN + IF transaction_type < 1 OR transaction_type > 33 THEN + RAISE EXCEPTION '% is not a valid transaction type', transaction_type; + END IF; + + padded_transaction_type := LPAD(transaction_type::text, 2, '0'); + + EXECUTE format('SELECT COALESCE(reltuples::bigint, 0) FROM pg_class WHERE relname = ''transaction_partition_%s''', padded_transaction_type) INTO count_value; + RETURN count_value; + END; + $BODY$ + LANGUAGE plpgsql + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240418000004_add_smart_comments_for_atomic_txn_fields.go b/migrations/post_sync_migrations/20240418000004_add_smart_comments_for_atomic_txn_fields.go new file mode 100644 index 0000000..6e06381 --- /dev/null +++ b/migrations/post_sync_migrations/20240418000004_add_smart_comments_for_atomic_txn_fields.go @@ -0,0 +1,33 @@ +package post_sync_migrations + +import ( + "context" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + comment on view transaction is E'@foreignKey (block_hash) references block (block_hash)|@foreignFieldName transactions|@fieldName block\n@foreignKey (public_key) references account (public_key)|@foreignFieldName transactions|@fieldName account\n@unique transaction_hash\n@foreignKey (wrapper_transaction_hash) references transaction (transaction_hash)|@foreignFieldName innerTransactions|@fieldName wrapperTransaction'; + CREATE INDEX transaction_wrapper_transaction_hash_idx ON transaction_partitioned (wrapper_transaction_hash desc); + CREATE INDEX transaction_wrapper_transaction_hash_and_idx_in_wrapper_idx ON transaction_partitioned (wrapper_transaction_hash desc, index_in_wrapper_transaction desc); + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP INDEX transaction_wrapper_transaction_hash_idx; + DROP INDEX transaction_wrapper_transaction_hash_and_idx_in_wrapper_idx; + comment on view transaction is E'@foreignKey (block_hash) references block (block_hash)|@foreignFieldName transactions|@fieldName block\n@foreignKey (public_key) references account (public_key)|@foreignFieldName transactions|@fieldName account\n@unique transaction_hash'; + `) + if err != nil { + return err + } + + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240419000001_update_statistc_txn_count_all.go b/migrations/post_sync_migrations/20240419000001_update_statistc_txn_count_all.go new file mode 100644 index 0000000..98d3974 --- /dev/null +++ b/migrations/post_sync_migrations/20240419000001_update_statistc_txn_count_all.go @@ -0,0 +1,113 @@ +package post_sync_migrations + +import ( + "context" + "fmt" + + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + if !calculateExplorerStatistics { + return nil + } + + err := RunMigrationWithRetries(db, fmt.Sprintf(` + DROP MATERIALIZED VIEW IF EXISTS statistic_txn_count_all CASCADE; + CREATE MATERIALIZED VIEW statistic_txn_count_all AS + SELECT SUM(get_transaction_count(s.i)) as count, + 0 as id + FROM generate_series(1, 44) AS s(i); + + CREATE UNIQUE INDEX statistic_txn_count_all_unique_index ON statistic_txn_count_all (id); + comment on materialized view statistic_txn_count_all is E'@omit'; + %v +`, buildStatisticsView())) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + err := RunMigrationWithRetries(db, fmt.Sprintf(` + DROP MATERIALIZED VIEW IF EXISTS statistic_txn_count_all CASCADE; + CREATE MATERIALIZED VIEW statistic_txn_count_all AS + SELECT SUM(get_transaction_count(s.i)) as count, + 0 as id + FROM generate_series(1, 33) AS s(i); + + CREATE UNIQUE INDEX statistic_txn_count_all_unique_index ON statistic_txn_count_all (id); + comment on materialized view statistic_txn_count_all is E'@omit'; + %v +`, buildStatisticsView())) + if err != nil { + return err + } + return nil + }) +} +func buildStatisticsView() string { + return ` +CREATE VIEW statistic_dashboard AS + SELECT + statistic_txn_count_all.count as txn_count_all, + statistic_txn_count_30_d.count as txn_count_30_d, + statistic_wallet_count_all.count as wallet_count_all, + statistic_active_wallet_count_30_d.count as active_wallet_count_30_d, + statistic_new_wallet_count_30_d.count as new_wallet_count_30_d, + statistic_block_height_current.height as block_height_current, + statistic_txn_count_pending.count as txn_count_pending, + statistic_txn_fee_1_d.avg as txn_fee_1_d, + statistic_total_supply.sum as total_supply, + statistic_post_count.count as post_count, + statistic_post_longform_count.count as post_longform_count, + statistic_comment_count.count as comment_count, + statistic_repost_count.count as repost_count, + statistic_txn_count_creator_coin.count as txn_count_creator_coin, + statistic_txn_count_nft.count as txn_count_nft, + statistic_txn_count_dex.count as txn_count_dex, + statistic_txn_count_social.count as txn_count_social, + statistic_follow_count.count as follow_count, + statistic_message_count.count as message_count + FROM + statistic_txn_count_all + CROSS JOIN + statistic_txn_count_30_d + CROSS JOIN + statistic_wallet_count_all + CROSS JOIN + statistic_active_wallet_count_30_d + CROSS JOIN + statistic_new_wallet_count_30_d + CROSS JOIN + statistic_block_height_current + CROSS JOIN + statistic_txn_count_pending + CROSS JOIN + statistic_txn_fee_1_d + CROSS JOIN + statistic_total_supply + CROSS JOIN + statistic_post_count + CROSS JOIN + statistic_post_longform_count + CROSS JOIN + statistic_comment_count + CROSS JOIN + statistic_repost_count + CROSS JOIN + statistic_txn_count_creator_coin + CROSS JOIN + statistic_txn_count_nft + CROSS JOIN + statistic_txn_count_dex + CROSS JOIN + statistic_txn_count_social + CROSS JOIN + statistic_follow_count + CROSS JOIN + statistic_message_count; + comment on view statistic_dashboard is E'@name dashboardStat'; +` +} diff --git a/migrations/post_sync_migrations/20240610000001_update_wallet_view.go b/migrations/post_sync_migrations/20240610000001_update_wallet_view.go new file mode 100644 index 0000000..258c429 --- /dev/null +++ b/migrations/post_sync_migrations/20240610000001_update_wallet_view.go @@ -0,0 +1,36 @@ +package post_sync_migrations + +import ( + "context" + "github.com/uptrace/bun" +) + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + create or replace view wallet as + select case when pkid_entry.pkid is null then public_key.public_key else pkid_entry.pkid end as pkid, public_key.public_key as public_key + from public_key + left join pkid_entry + on pkid_entry.public_key = public_key.public_key; + `) + if err != nil { + return err + } + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + CREATE OR REPLACE VIEW wallet AS + SELECT pkid, public_key FROM pkid_entry + UNION ALL + SELECT public_key AS pkid, public_key + FROM public_key + WHERE public_key NOT IN (SELECT public_key FROM pkid_entry) + AND public_key NOT IN (SELECT pkid FROM pkid_entry); + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/post_sync_migrations/20240612000000_update_wallet_create_triggers.go b/migrations/post_sync_migrations/20240612000000_update_wallet_create_triggers.go new file mode 100644 index 0000000..fd9982d --- /dev/null +++ b/migrations/post_sync_migrations/20240612000000_update_wallet_create_triggers.go @@ -0,0 +1,283 @@ +package post_sync_migrations + +import ( + "context" + "github.com/uptrace/bun" + "strings" +) + +// Create a postgres trigger function to fire on insert in order to update the public_key table. +func updatePublicKeyTriggerFn(db *bun.DB, tableName string, fieldName string) error { + triggerFnName := "insert_public_key_" + tableName + "_" + fieldName + // Create a trigger to run on insert into the public_key table. + err := RunMigrationWithRetries(db, strings.Replace(strings.Replace(` + CREATE OR REPLACE FUNCTION {trigger_fn_name}() RETURNS TRIGGER AS $$ + DECLARE + row_count INT; + BEGIN + INSERT INTO public_key(public_key) + VALUES (NEW.{field_name}) + ON CONFLICT (public_key) DO NOTHING; + + + GET DIAGNOSTICS row_count = ROW_COUNT; + + IF row_count > 0 THEN + -- No conflict occurred, perform another insert into the wallet table + -- Only insert into wallet if NEW.{field_name} does not match either a public_key or pkid in the wallet table + IF NOT EXISTS ( + SELECT 1 + FROM wallet + WHERE public_key = NEW.{field_name} OR pkid = NEW.{field_name} + ) THEN + INSERT INTO wallet(public_key, pkid) + VALUES (NEW.{field_name}, NEW.{field_name}); + END IF; + END IF; + + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + `, "{field_name}", fieldName, -1), "{trigger_fn_name}", triggerFnName, -1)) + return err +} + +func init() { + Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + + // Create new wallet table, migrate to it, and drop the old view. + err := RunMigrationWithRetries(db, ` + CREATE TABLE wallet_table ( + pkid VARCHAR, + public_key VARCHAR, + PRIMARY KEY (public_key) + ); + + insert into wallet_table (pkid, public_key) select pkid, public_key from wallet; + + CREATE INDEX idx_wallet_public_key ON wallet_table (public_key); + CREATE INDEX idx_wallet_pkid ON wallet_table (pkid); + + CREATE OR REPLACE VIEW account AS + SELECT + wallet.pkid, + wallet.public_key, + profile_entry.username, + profile_entry.description, + profile_entry.profile_pic, + profile_entry.creator_basis_points, + profile_entry.coin_watermark_nanos, + profile_entry.minting_disabled, + profile_entry.dao_coin_minting_disabled, + profile_entry.dao_coin_transfer_restriction_status, + profile_entry.extra_data, + profile_entry.coin_price_deso_nanos, + profile_entry.deso_locked_nanos, + profile_entry.cc_coins_in_circulation_nanos, + profile_entry.dao_coins_in_circulation_nanos_hex, + true as token_balance_join_field, + false as cc_balance_join_field + FROM + wallet_table wallet + LEFT JOIN + profile_entry + ON + wallet.public_key = profile_entry.public_key; + + DROP VIEW IF EXISTS wallet; + ALTER TABLE wallet_table RENAME TO wallet; + + CREATE OR REPLACE VIEW account AS + SELECT + wallet.pkid, + wallet.public_key, + profile_entry.username, + profile_entry.description, + profile_entry.profile_pic, + profile_entry.creator_basis_points, + profile_entry.coin_watermark_nanos, + profile_entry.minting_disabled, + profile_entry.dao_coin_minting_disabled, + profile_entry.dao_coin_transfer_restriction_status, + profile_entry.extra_data, + profile_entry.coin_price_deso_nanos, + profile_entry.deso_locked_nanos, + profile_entry.cc_coins_in_circulation_nanos, + profile_entry.dao_coins_in_circulation_nanos_hex, + true as token_balance_join_field, + false as cc_balance_join_field + FROM + wallet wallet + LEFT JOIN + profile_entry + ON + wallet.public_key = profile_entry.public_key; + + comment on view account is E'@unique username\n@unique public_key\n@unique pkid\n@primaryKey public_key'; + + comment on table wallet is E'@omit'; + `) + + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "profile_entry", "public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "pkid_entry", "public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "like_entry", "public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "post_entry", "poster_public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "deso_balance_entry", "public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "balance_entry", "hodler_pkid") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "balance_entry", "creator_pkid") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "derived_key_entry", "owner_public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "message_entry", "sender_public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "message_entry", "recipient_public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "new_message_entry", "sender_access_group_owner_public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "new_message_entry", "recipient_access_group_owner_public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "user_association_entry", "target_user_pkid") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "access_group_entry", "access_group_owner_public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "access_group_member_entry", "access_group_member_public_key") + if err != nil { + return err + } + + err = updatePublicKeyTriggerFn(db, "follow_entry", "follower_pkid") + if err != nil { + return err + } + + err = RunMigrationWithRetries(db, ` + CREATE OR REPLACE FUNCTION insert_into_wallet_table_from_pkid_entry() + RETURNS TRIGGER AS $$ + DECLARE + row_count INT; + BEGIN + INSERT INTO wallet (pkid, public_key) + VALUES (NEW.pkid, NEW.public_key) + ON CONFLICT (public_key) + DO UPDATE SET pkid = EXCLUDED.pkid + WHERE wallet_table.pkid <> EXCLUDED.pkid; + + GET DIAGNOSTICS row_count = ROW_COUNT; + + IF row_count = 0 THEN + -- If there was a conflict on public_key, make sure no other public_key is associated with this pkid. + DELETE FROM wallet WHERE pkid = NEW.pkid AND public_key <> NEW.public_key; + END IF; + + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + + CREATE TRIGGER insert_into_wallet_table_from_pkid_entry_trigger + AFTER INSERT ON pkid_entry + FOR EACH ROW + EXECUTE FUNCTION insert_into_wallet_table_from_pkid_entry(); + `) + if err != nil { + return err + } + + return nil + }, func(ctx context.Context, db *bun.DB) error { + _, err := db.Exec(` + DROP TABLE wallet cascade; + + CREATE OR REPLACE VIEW wallet AS + SELECT pkid, public_key FROM pkid_entry + UNION ALL + SELECT public_key AS pkid, public_key + FROM public_key + WHERE public_key NOT IN (SELECT public_key FROM pkid_entry) + AND public_key NOT IN (SELECT pkid FROM pkid_entry); + + CREATE OR REPLACE VIEW account AS + SELECT + wallet.pkid, + wallet.public_key, + profile_entry.username, + profile_entry.description, + profile_entry.profile_pic, + profile_entry.creator_basis_points, + profile_entry.coin_watermark_nanos, + profile_entry.minting_disabled, + profile_entry.dao_coin_minting_disabled, + profile_entry.dao_coin_transfer_restriction_status, + profile_entry.extra_data, + profile_entry.coin_price_deso_nanos, + profile_entry.deso_locked_nanos, + profile_entry.cc_coins_in_circulation_nanos, + profile_entry.dao_coins_in_circulation_nanos_hex, + true as token_balance_join_field, + false as cc_balance_join_field + FROM + wallet + LEFT JOIN + profile_entry + ON + wallet.public_key = profile_entry.public_key; + + comment on view account is E'@unique username\n@unique public_key\n@unique pkid\n@primaryKey public_key'; + `) + if err != nil { + return err + } + return nil + }) +} diff --git a/migrations/post_sync_migrations/migration_helpers.go b/migrations/post_sync_migrations/migration_helpers.go index 5c1fe4e..70d8bce 100644 --- a/migrations/post_sync_migrations/migration_helpers.go +++ b/migrations/post_sync_migrations/migration_helpers.go @@ -23,7 +23,7 @@ var ( {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_wallet_count_all", Ticker: time.NewTicker(15 * time.Minute)}, {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_new_wallet_count_30_d", Ticker: time.NewTicker(15 * time.Minute)}, {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_active_wallet_count_30_d", Ticker: time.NewTicker(30 * time.Minute)}, - {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_block_height_current", Ticker: time.NewTicker(15 * time.Minute)}, + {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_block_height_current", Ticker: time.NewTicker(100 * time.Millisecond)}, {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_count_pending", Ticker: time.NewTicker(15 * time.Minute)}, {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_txn_fee_1_d", Ticker: time.NewTicker(15 * time.Minute)}, {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_total_supply", Ticker: time.NewTicker(15 * time.Minute)}, @@ -73,6 +73,9 @@ var ( {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_buy_orders", Ticker: time.NewTicker(30 * time.Minute)}, {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_deso_token_sell_orders", Ticker: time.NewTicker(30 * time.Minute)}, {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY statistic_profile_earnings_breakdown_counts", Ticker: time.NewTicker(30 * time.Minute)}, + {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY staking_summary", Ticker: time.NewTicker(1 * time.Second)}, + {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY my_stake_summary", Ticker: time.NewTicker(1 * time.Second)}, + {Query: "REFRESH MATERIALIZED VIEW CONCURRENTLY validator_stats", Ticker: time.NewTicker(1 * time.Second)}, } )