diff --git a/11-make-stateless.sql b/11-make-stateless.sql deleted file mode 100644 index f7fefc0d9..000000000 --- a/11-make-stateless.sql +++ /dev/null @@ -1,95 +0,0 @@ -drop trigger "insert_account" on accounts; -drop trigger "update_account" on accounts; -drop trigger "insert_transaction" on transactions; -drop trigger "update_transaction" on transactions; -drop trigger "insert_log" on logs; - -alter table moves -add column transactions_id bigint; - -alter table transactions -add column inserted_at timestamp without time zone -default (now() at time zone 'utc'); - -alter table transactions -alter column timestamp set default (now() at time zone 'utc'); - -DO -$do$ - declare - ledger record; - vsql text; - BEGIN - for ledger in select * from _system.ledgers where bucket = current_schema loop - -- create a sequence for transactions by ledger instead of a sequence of the table as we want to have contiguous ids - -- notes: we can still have "holes" on ids since a sql transaction can be reverted after a usage of the sequence - - vsql = 'create sequence "transaction_id_' || ledger.id || '" owned by transactions.id'; - execute vsql; - - vsql = 'select setval("transaction_id_' || ledger.id || '", coalesce((select max(id) + 1 from transactions where ledger = ledger.name), 1)::bigint, false)'; - execute vsql; - - -- create a sequence for logs by ledger instead of a sequence of the table as we want to have contiguous ids - -- notes: we can still have "holes" on id since a sql transaction can be reverted after a usage of the sequence - vsql = 'create sequence "log_id_' || ledger.id || '" owned by logs.id'; - execute vsql; - - vsql = 'select setval("log_id_' || ledger.id || '", coalesce((select max(id) + 1 from logs where ledger = ledger.name), 1)::bigint, false)'; - execute vsql; - - -- enable post commit effective volumes synchronously - vsql = 'create index "pcev_' || ledger.id || '" on moves (accounts_address, asset, effective_date desc) where ledger = ledger.name'; - execute vsql; - - vsql = 'create trigger "set_effective_volumes_' || ledger.id || '" before insert on moves for each row when (new.ledger = ledger.name) execute procedure set_effective_volumes()'; - execute vsql; - - vsql = 'create trigger "update_effective_volumes_' || ledger.id || '" after insert on moves for each row when (new.ledger = ledger.name) execute procedure update_effective_volumes()'; - execute vsql; - - -- logs hash - vsql = 'create trigger "set_log_hash_' || ledger.id || '" before insert on logs for each row when (new.ledger = ledger.name) execute procedure set_log_hash()'; - execute vsql; - - vsql = 'create trigger "update_account_metadata_history_' || ledger.id || '" after update on "accounts" for each row when (new.ledger = ledger.name) execute procedure update_account_metadata_history()'; - execute vsql; - - vsql = 'create trigger "insert_account_metadata_history_' || ledger.id || '" after insert on "accounts" for each row when (new.ledger = ledger.name) execute procedure insert_account_metadata_history()'; - execute vsql; - - vsql = 'create trigger "update_transaction_metadata_history_' || ledger.id || '" after update on "transactions" for each row when (new.ledger = ledger.name) execute procedure update_transaction_metadata_history()'; - execute vsql; - - vsql = 'create trigger "insert_transaction_metadata_history_' || ledger.id || '" after insert on "transactions" for each row when (new.ledger = ledger.name) execute procedure insert_transaction_metadata_history()'; - execute vsql; - - vsql = 'create index "transactions_sources_' || ledger.id || '" on transactions using gin (sources jsonb_path_ops) where ledger = ledger.name'; - execute vsql; - - vsql = 'create index "transactions_destinations_' || ledger.id || '" on transactions using gin (destinations jsonb_path_ops) where ledger = ledger.name'; - execute vsql; - - vsql = 'create trigger "transaction_set_addresses_' || ledger.id || '" before insert on transactions for each row when (new.ledger = ledger.name) execute procedure set_transaction_addresses()'; - execute vsql; - - vsql = 'create index "accounts_address_array_' || ledger.id || '" on accounts using gin (address_array jsonb_ops) where ledger = ledger.name'; - execute vsql; - - vsql = 'create index "accounts_address_array_length_' || ledger.id || '" on accounts (jsonb_array_length(address_array)) where ledger = ledger.name'; - execute vsql; - - vsql = 'create trigger "accounts_set_address_array_' || ledger.id || '" before insert on accounts for each row when (new.ledger = ledger.name) execute procedure set_address_array_for_account()'; - execute vsql; - - vsql = 'create index "transactions_sources_arrays_' || ledger.id || '" on transactions using gin (sources_arrays jsonb_path_ops) where ledger = ledger.name'; - execute vsql; - - vsql = 'create index "transactions_destinations_arrays_' || ledger.id || '" on transactions using gin (destinations_arrays jsonb_path_ops) where ledger = ledger.name'; - execute vsql; - - vsql = 'create trigger "transaction_set_addresses_segments_' || ledger.id || '" before insert on "transactions" for each row when (new.ledger = ledger.name) execute procedure set_transaction_addresses_segments()'; - execute vsql; - end loop; - END -$do$; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/11-make-stateless.sql b/internal/storage/bucket/migrations/11-make-stateless.sql new file mode 100644 index 000000000..11fc87c16 --- /dev/null +++ b/internal/storage/bucket/migrations/11-make-stateless.sql @@ -0,0 +1,350 @@ +drop trigger insert_account on accounts; +drop trigger update_account on accounts; +drop trigger insert_transaction on transactions; +drop trigger update_transaction on transactions; +drop trigger insert_log on logs; + +alter table moves +add column transactions_id bigint, +alter column post_commit_volumes drop not null, +alter column post_commit_effective_volumes drop not null, +alter column insertion_date set default (now() at time zone 'utc'), +alter column effective_date set default (now() at time zone 'utc'); + +alter table moves +rename column account_address to accounts_address; + +alter table transactions +add column post_commit_volumes jsonb, +add column inserted_at timestamp without time zone default (now() at time zone 'utc'), +alter column timestamp set default (now() at time zone 'utc'), +alter column id type bigint; + +drop index transactions_reference; +create unique index transactions_reference on transactions (ledger, reference); + +alter table logs +add column memento bytea, +add column idempotency_hash bytea, +alter column hash drop not null, +alter column date set default (now() at time zone 'utc'); + +alter table accounts +alter column address_array drop not null, +alter column first_usage set default (now() at time zone 'utc'), +alter column insertion_date set default (now() at time zone 'utc'), +alter column updated_at set default (now() at time zone 'utc') +; + +create table accounts_volumes ( + ledger varchar not null, + accounts_address varchar not null, + asset varchar not null, + input numeric not null, + output numeric not null, + + primary key (ledger, accounts_address, asset) +); + +alter table transactions_metadata +add column transactions_id bigint; + +alter table accounts_metadata +add column accounts_address varchar; + +create function set_effective_volumes() + returns trigger + security definer + language plpgsql +as +$$ +begin + new.post_commit_effective_volumes = coalesce(( + select ( + (post_commit_effective_volumes).inputs + case when new.is_source then 0 else new.amount end, + (post_commit_effective_volumes).outputs + case when new.is_source then new.amount else 0 end + ) + from moves + where accounts_address = new.accounts_address + and asset = new.asset + and ledger = new.ledger + and (effective_date < new.effective_date or (effective_date = new.effective_date and seq < new.seq)) + order by effective_date desc, seq desc + limit 1 + ), ( + case when new.is_source then 0 else new.amount end, + case when new.is_source then new.amount else 0 end + )); + + return new; +end; +$$ set search_path from current; + +create function update_effective_volumes() + returns trigger + security definer + language plpgsql +as +$$ +begin + update moves + set post_commit_effective_volumes = ( + (post_commit_effective_volumes).inputs + case when new.is_source then 0 else new.amount end, + (post_commit_effective_volumes).outputs + case when new.is_source then new.amount else 0 end + ) + where accounts_address = new.accounts_address + and asset = new.asset + and effective_date > new.effective_date + and ledger = new.ledger; + + return new; +end; +$$ set search_path from current; + +create or replace function update_transaction_metadata_history() returns trigger + security definer + language plpgsql +as +$$ +begin + insert into transactions_metadata (ledger, transactions_id, revision, date, metadata) + values (new.ledger, new.id, ( + select revision + 1 + from transactions_metadata + where transactions_metadata.transactions_id = new.id and transactions_metadata.ledger = new.ledger + order by revision desc + limit 1 + ), new.updated_at, new.metadata); + + return new; +end; +$$ set search_path from current; + +create or replace function insert_transaction_metadata_history() returns trigger + security definer + language plpgsql +as +$$ +begin + insert into transactions_metadata (ledger, transactions_id, revision, date, metadata) + values (new.ledger, new.id, 1, new.timestamp, new.metadata); + + return new; +end; +$$ set search_path from current; + +create or replace function update_account_metadata_history() returns trigger + security definer + language plpgsql +as +$$ +begin + insert into accounts_metadata (ledger, accounts_address, revision, date, metadata) + values (new.ledger, new.address, ( + select revision + 1 + from accounts_metadata + where accounts_metadata.accounts_address = new.address + order by revision desc + limit 1 + ), new.updated_at, new.metadata); + + return new; +end; +$$ set search_path from current; + +create or replace function insert_account_metadata_history() returns trigger + security definer + language plpgsql +as +$$ +begin + insert into accounts_metadata (ledger, accounts_address, revision, date, metadata) + values (new.ledger, new.address, 1, new.insertion_date, new.metadata); + + return new; +end; +$$ set search_path from current; + +create or replace function explode_address(_address varchar) + returns jsonb + language sql + immutable +as +$$ +select public.aggregate_objects(jsonb_build_object(data.number - 1, data.value)) +from (select row_number() over () as number, v.value + from (select unnest(string_to_array(_address, ':')) as value + union all + select null) v) data +$$ set search_path from current; + +create or replace function set_transaction_addresses() returns trigger + security definer + language plpgsql +as +$$ +begin + + new.sources = ( + select to_jsonb(array_agg(v->>'source')) as value + from jsonb_array_elements(new.postings::jsonb) v + ); + new.destinations = ( + select to_jsonb(array_agg(v->>'destination')) as value + from jsonb_array_elements(new.postings::jsonb) v + ); + + return new; +end +$$ set search_path from current; + +create or replace function set_transaction_addresses_segments() returns trigger + security definer + language plpgsql +as +$$ +begin + new.sources_arrays = ( + select to_jsonb(array_agg(explode_address(v ->> 'source'))) as value + from jsonb_array_elements(new.postings::jsonb) v + ); + new.destinations_arrays = ( + select to_jsonb(array_agg(explode_address(v ->> 'destination'))) as value + from jsonb_array_elements(new.postings::jsonb) v + ); + + return new; +end +$$ set search_path from current; + +create or replace function set_address_array_for_account() returns trigger + security definer + language plpgsql +as +$$ +begin + new.address_array = to_json(string_to_array(new.address, ':')); + + return new; +end +$$ set search_path from current; + +create function set_log_hash() + returns trigger + security definer + language plpgsql +as +$$ +declare + previousHash bytea; + marshalledAsJSON varchar; +begin + select hash into previousHash + from logs + where ledger = new.ledger + order by seq desc + limit 1; + + -- select only fields participating in the hash on the backend and format json representation the same way + select '{' || + '"type":"' || new.type || '",' || + '"data":' || encode(new.memento, 'escape') || ',' || + '"date":"' || (to_json(new.date::timestamp)#>>'{}') || 'Z",' || + '"idempotencyKey":"' || coalesce(new.idempotency_key, '') || '",' || + '"id":0,' || + '"hash":null' || + '}' into marshalledAsJSON; + + new.hash = ( + select public.digest( + case + when previousHash is null + then marshalledAsJSON::bytea + else '"' || encode(previousHash::bytea, 'base64')::bytea || E'"\n' || convert_to(marshalledAsJSON, 'LATIN1')::bytea + end || E'\n', 'sha256'::text + ) + ); + + return new; +end; +$$ set search_path from current; + +DO +$do$ + declare + ledger record; + vsql text; + BEGIN + for ledger in select * from _system.ledgers where bucket = current_schema loop + -- create a sequence for transactions by ledger instead of a sequence of the table as we want to have contiguous ids + -- notes: we can still have "holes" on ids since a sql transaction can be reverted after a usage of the sequence + + vsql = 'create sequence "transaction_id_' || ledger.id || '" owned by transactions.id'; + execute vsql; + + vsql = 'select setval("transaction_id_' || ledger.id || '", coalesce((select max(id) + 1 from transactions where ledger = ledger.name), 1)::bigint, false)'; + execute vsql; + + -- create a sequence for logs by ledger instead of a sequence of the table as we want to have contiguous ids + -- notes: we can still have "holes" on id since a sql transaction can be reverted after a usage of the sequence + vsql = 'create sequence "log_id_' || ledger.id || '" owned by logs.id'; + execute vsql; + + vsql = 'select setval("log_id_' || ledger.id || '", coalesce((select max(id) + 1 from logs where ledger = ledger.name), 1)::bigint, false)'; + execute vsql; + + -- enable post commit effective volumes synchronously + vsql = 'create index "pcev_' || ledger.id || '" on moves (accounts_address, asset, effective_date desc) where ledger = ledger.name'; + execute vsql; + + vsql = 'create trigger "set_effective_volumes_' || ledger.id || '" before insert on moves for each row when (new.ledger = ledger.name) execute procedure set_effective_volumes()'; + execute vsql; + + vsql = 'create trigger "update_effective_volumes_' || ledger.id || '" after insert on moves for each row when (new.ledger = ledger.name) execute procedure update_effective_volumes()'; + execute vsql; + + -- logs hash + vsql = 'create trigger "set_log_hash_' || ledger.id || '" before insert on logs for each row when (new.ledger = ledger.name) execute procedure set_log_hash()'; + execute vsql; + + vsql = 'create trigger "update_account_metadata_history_' || ledger.id || '" after update on "accounts" for each row when (new.ledger = ledger.name) execute procedure update_account_metadata_history()'; + execute vsql; + + vsql = 'create trigger "insert_account_metadata_history_' || ledger.id || '" after insert on "accounts" for each row when (new.ledger = ledger.name) execute procedure insert_account_metadata_history()'; + execute vsql; + + vsql = 'create trigger "update_transaction_metadata_history_' || ledger.id || '" after update on "transactions" for each row when (new.ledger = ledger.name) execute procedure update_transaction_metadata_history()'; + execute vsql; + + vsql = 'create trigger "insert_transaction_metadata_history_' || ledger.id || '" after insert on "transactions" for each row when (new.ledger = ledger.name) execute procedure insert_transaction_metadata_history()'; + execute vsql; + + vsql = 'create index "transactions_sources_' || ledger.id || '" on transactions using gin (sources jsonb_path_ops) where ledger = ledger.name'; + execute vsql; + + vsql = 'create index "transactions_destinations_' || ledger.id || '" on transactions using gin (destinations jsonb_path_ops) where ledger = ledger.name'; + execute vsql; + + vsql = 'create trigger "transaction_set_addresses_' || ledger.id || '" before insert on transactions for each row when (new.ledger = ledger.name) execute procedure set_transaction_addresses()'; + execute vsql; + + vsql = 'create index "accounts_address_array_' || ledger.id || '" on accounts using gin (address_array jsonb_ops) where ledger = ledger.name'; + execute vsql; + + vsql = 'create index "accounts_address_array_length_' || ledger.id || '" on accounts (jsonb_array_length(address_array)) where ledger = ledger.name'; + execute vsql; + + vsql = 'create trigger "accounts_set_address_array_' || ledger.id || '" before insert on accounts for each row when (new.ledger = ledger.name) execute procedure set_address_array_for_account()'; + execute vsql; + + vsql = 'create index "transactions_sources_arrays_' || ledger.id || '" on transactions using gin (sources_arrays jsonb_path_ops) where ledger = ledger.name'; + execute vsql; + + vsql = 'create index "transactions_destinations_arrays_' || ledger.id || '" on transactions using gin (destinations_arrays jsonb_path_ops) where ledger = ledger.name'; + execute vsql; + + vsql = 'create trigger "transaction_set_addresses_segments_' || ledger.id || '" before insert on "transactions" for each row when (new.ledger = ledger.name) execute procedure set_transaction_addresses_segments()'; + execute vsql; + end loop; + END +$do$; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/14-rename-address-moves-column.sql b/internal/storage/bucket/migrations/14-rename-address-moves-column.sql deleted file mode 100644 index f1b76b860..000000000 --- a/internal/storage/bucket/migrations/14-rename-address-moves-column.sql +++ /dev/null @@ -1,5 +0,0 @@ -alter table moves -rename column account_address to accounts_address; - -alter table moves -rename column account_address_array to accounts_address_array; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/11-drop-triggers.sql b/internal/storage/bucket/old/11-drop-triggers.sql similarity index 100% rename from internal/storage/bucket/migrations/11-drop-triggers.sql rename to internal/storage/bucket/old/11-drop-triggers.sql diff --git a/internal/storage/bucket/migrations/12-moves-add-transaction-id.sql b/internal/storage/bucket/old/12-moves-add-transaction-id.sql similarity index 100% rename from internal/storage/bucket/migrations/12-moves-add-transaction-id.sql rename to internal/storage/bucket/old/12-moves-add-transaction-id.sql diff --git a/internal/storage/bucket/migrations/13-set-transaction-timestamp-default-utc.sql b/internal/storage/bucket/old/13-set-transaction-timestamp-default-utc.sql similarity index 100% rename from internal/storage/bucket/migrations/13-set-transaction-timestamp-default-utc.sql rename to internal/storage/bucket/old/13-set-transaction-timestamp-default-utc.sql diff --git a/internal/storage/bucket/old/14-rename-address-moves-column.sql b/internal/storage/bucket/old/14-rename-address-moves-column.sql new file mode 100644 index 000000000..9e54be258 --- /dev/null +++ b/internal/storage/bucket/old/14-rename-address-moves-column.sql @@ -0,0 +1,8 @@ +--todo: must be transactional +-- to be transparent, the table which list migrations must be updated in the same transaction as this change +alter table moves +rename column account_address to accounts_address; + +-- todo: column removed later, we don't need to change its type +alter table moves +rename column account_address_array to accounts_address_array; \ No newline at end of file diff --git a/internal/storage/bucket/migrations/15-moves-remove-accounts-seq.sql b/internal/storage/bucket/old/15-moves-remove-accounts-seq.sql similarity index 100% rename from internal/storage/bucket/migrations/15-moves-remove-accounts-seq.sql rename to internal/storage/bucket/old/15-moves-remove-accounts-seq.sql diff --git a/internal/storage/bucket/migrations/16-transactions-fix-reference.sql b/internal/storage/bucket/old/16-transactions-fix-reference.sql similarity index 100% rename from internal/storage/bucket/migrations/16-transactions-fix-reference.sql rename to internal/storage/bucket/old/16-transactions-fix-reference.sql diff --git a/internal/storage/bucket/migrations/17-transactions-add-pvc.sql b/internal/storage/bucket/old/17-transactions-add-pvc.sql similarity index 100% rename from internal/storage/bucket/migrations/17-transactions-add-pvc.sql rename to internal/storage/bucket/old/17-transactions-add-pvc.sql diff --git a/internal/storage/bucket/migrations/18-logs-add-idempotency-hash.sql b/internal/storage/bucket/old/18-logs-add-idempotency-hash.sql similarity index 100% rename from internal/storage/bucket/migrations/18-logs-add-idempotency-hash.sql rename to internal/storage/bucket/old/18-logs-add-idempotency-hash.sql diff --git a/internal/storage/bucket/migrations/19-moves-drop-accounts-address-array.sql b/internal/storage/bucket/old/19-moves-drop-accounts-address-array.sql similarity index 100% rename from internal/storage/bucket/migrations/19-moves-drop-accounts-address-array.sql rename to internal/storage/bucket/old/19-moves-drop-accounts-address-array.sql diff --git a/internal/storage/bucket/migrations/20-add-accounts-volumes-table.sql b/internal/storage/bucket/old/20-add-accounts-volumes-table.sql similarity index 88% rename from internal/storage/bucket/migrations/20-add-accounts-volumes-table.sql rename to internal/storage/bucket/old/20-add-accounts-volumes-table.sql index 6a6e94906..23cbf687c 100644 --- a/internal/storage/bucket/migrations/20-add-accounts-volumes-table.sql +++ b/internal/storage/bucket/old/20-add-accounts-volumes-table.sql @@ -8,6 +8,7 @@ create table accounts_volumes ( primary key (ledger, accounts_address, asset) ); +--todo: handle conflicts while we are inserting and the ledger is actively writing to the database insert into accounts_volumes (ledger, accounts_address, asset, input, output) select distinct on (ledger, accounts_address, asset) ledger, diff --git a/internal/storage/bucket/migrations/21-transactions-metadata-add-transaction-id.sql b/internal/storage/bucket/old/21-transactions-metadata-add-transaction-id.sql similarity index 100% rename from internal/storage/bucket/migrations/21-transactions-metadata-add-transaction-id.sql rename to internal/storage/bucket/old/21-transactions-metadata-add-transaction-id.sql diff --git a/internal/storage/bucket/migrations/22-accounts-metadata-add-address.sql b/internal/storage/bucket/old/22-accounts-metadata-add-address.sql similarity index 100% rename from internal/storage/bucket/migrations/22-accounts-metadata-add-address.sql rename to internal/storage/bucket/old/22-accounts-metadata-add-address.sql diff --git a/internal/storage/bucket/migrations/23-transactions-clean-table.sql b/internal/storage/bucket/old/23-transactions-clean-table.sql similarity index 100% rename from internal/storage/bucket/migrations/23-transactions-clean-table.sql rename to internal/storage/bucket/old/23-transactions-clean-table.sql diff --git a/internal/storage/bucket/migrations/24-accounts-set-array-not-null.sql b/internal/storage/bucket/old/24-accounts-set-array-not-null.sql similarity index 100% rename from internal/storage/bucket/migrations/24-accounts-set-array-not-null.sql rename to internal/storage/bucket/old/24-accounts-set-array-not-null.sql diff --git a/internal/storage/bucket/migrations/25-logs-set-hash-nullable.sql b/internal/storage/bucket/old/25-logs-set-hash-nullable.sql similarity index 100% rename from internal/storage/bucket/migrations/25-logs-set-hash-nullable.sql rename to internal/storage/bucket/old/25-logs-set-hash-nullable.sql diff --git a/internal/storage/bucket/migrations/26-clean-index.sql b/internal/storage/bucket/old/26-clean-index.sql similarity index 100% rename from internal/storage/bucket/migrations/26-clean-index.sql rename to internal/storage/bucket/old/26-clean-index.sql diff --git a/internal/storage/bucket/migrations/27-add-features-functions.sql b/internal/storage/bucket/old/27-add-features-functions.sql similarity index 100% rename from internal/storage/bucket/migrations/27-add-features-functions.sql rename to internal/storage/bucket/old/27-add-features-functions.sql diff --git a/internal/storage/bucket/migrations/28-logs-add-memento.sql b/internal/storage/bucket/old/28-logs-add-memento.sql similarity index 100% rename from internal/storage/bucket/migrations/28-logs-add-memento.sql rename to internal/storage/bucket/old/28-logs-add-memento.sql diff --git a/internal/storage/bucket/migrations/29-logs-hash-in-database.sql b/internal/storage/bucket/old/29-logs-hash-in-database.sql similarity index 100% rename from internal/storage/bucket/migrations/29-logs-hash-in-database.sql rename to internal/storage/bucket/old/29-logs-hash-in-database.sql diff --git a/internal/storage/bucket/migrations/30-logs-assign-date.sql b/internal/storage/bucket/old/30-logs-assign-date.sql similarity index 100% rename from internal/storage/bucket/migrations/30-logs-assign-date.sql rename to internal/storage/bucket/old/30-logs-assign-date.sql diff --git a/internal/storage/bucket/migrations/31-accounts-assign-date.sql b/internal/storage/bucket/old/31-accounts-assign-date.sql similarity index 100% rename from internal/storage/bucket/migrations/31-accounts-assign-date.sql rename to internal/storage/bucket/old/31-accounts-assign-date.sql diff --git a/internal/storage/bucket/migrations/32-moves-assign-date.sql b/internal/storage/bucket/old/32-moves-assign-date.sql similarity index 100% rename from internal/storage/bucket/migrations/32-moves-assign-date.sql rename to internal/storage/bucket/old/32-moves-assign-date.sql diff --git a/internal/storage/bucket/migrations/33-set-ledger-specifics.sql b/internal/storage/bucket/old/33-set-ledger-specifics.sql similarity index 100% rename from internal/storage/bucket/migrations/33-set-ledger-specifics.sql rename to internal/storage/bucket/old/33-set-ledger-specifics.sql diff --git a/internal/storage/bucket/migrations/34-moves-not-null-columns.sql b/internal/storage/bucket/old/34-moves-not-null-columns.sql similarity index 100% rename from internal/storage/bucket/migrations/34-moves-not-null-columns.sql rename to internal/storage/bucket/old/34-moves-not-null-columns.sql diff --git a/internal/storage/ledger/balances.go b/internal/storage/ledger/balances.go index bf45d3360..91a6de3c4 100644 --- a/internal/storage/ledger/balances.go +++ b/internal/storage/ledger/balances.go @@ -187,6 +187,7 @@ func (s *Store) GetAggregatedBalances(ctx context.Context, q ledgercontroller.Ge return aggregatedVolumes.Aggregated.Balances(), nil } +// todo: need to handle previous version schema by looking moves func (s *Store) GetBalances(ctx context.Context, query ledgercontroller.BalanceQuery) (ledgercontroller.Balances, error) { return tracing.TraceWithMetric( ctx,