Skip to content

Commit

Permalink
Rework .accounting.Position calcs to prep for polars
Browse files Browse the repository at this point in the history
We're probably going to move to implementing all accounting using
`polars.DataFrame` and friends and thus this rejig preps for a much more
"stateless" implementation of our `Position` type and its internal
pos-accounting metrics: `ppu` and `cumsize`.

Summary:
- wrt to `._pos.Position`:
  - rename `.size`/`.accum_size` to `.cumsize` to be more in line
    with `polars.DataFrame.cumsum()`.
  - make `Position.expiry` delegate to the underlying `.mkt: MktPair`
    handling (hopefully) all edge cases..
  - change over to a new `._events: dict[str, Transaction]` in prep
    for #510 (and friends) and enforce a new `Transaction.etype: str`
    which is by default `clear`.
  - add `.iter_by_type()` which iterates, filters and sorts the
    entries in `._events` from above.
  - add `Position.clearsdict()` which returns the dict-ified and
    datetime-sorted table which can more-or-less be stored in the
    toml account file.
  - add `.minimized_clears()` a new (and close) version of the old
    method which always grabs at least one clear before
    a position-side-polarity-change.
  - mask-drop `.ensure_state()` since there is no more `.size`/`.price`
    state vars (per say) as we always re-calc the ppu and cumsize from
    the clears records on every read.
  - `.add_clear` no longer does bisec insorting since all sorting is
    done on position properties *reads*.
  - move the PPU (price per unit) calculator to a new `.accounting.calcs`
    as well as add in the `iter_by_dt()` clearing transaction sorted
    iterator.
    - also make some fixes to this to handle both lists of `Transaction`
      as well as `dict`s as before.

- start rename of `PpTable` -> `Account` and make a note about adding
  a `.balances` table.
- always `float()` the transaction size/price values since it seems if
  they get processed as `tomlkit.Integer` there's some suuper weird
  double negative on read-then-write to the clears table?
  - something like `cumsize = -1` -> `cumsize = --1` !?!?
- make `load_pps_from_ledger()` work again but now includes some very
  very first draft `polars` df processing from a transaction ledger.
  - use this from the `accounting.cli.disect` subcmd which is also in
    *super early draft* mode ;)
- obviously as mentioned in the `Position` section, add the new `.calcs`
  module with a `.ppu()` calculator func B)
  • Loading branch information
goodboy committed Jul 12, 2023
1 parent 745c144 commit 05af2b3
Show file tree
Hide file tree
Showing 6 changed files with 720 additions and 520 deletions.
21 changes: 3 additions & 18 deletions piker/accounting/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,10 @@
'''
from ..log import get_logger

from ._ledger import (
from .calc import (
iter_by_dt,
)
from ._ledger import (
Transaction,
TransactionLedger,
open_trade_ledger,
Expand Down Expand Up @@ -100,20 +102,3 @@ def get_likely_pair(
likely_dst = bs_mktid[:src_name_start]
if likely_dst == dst:
return bs_mktid


if __name__ == '__main__':
import sys
from pprint import pformat

args = sys.argv
assert len(args) > 1, 'Specifiy account(s) from `brokers.toml`'
args = args[1:]
for acctid in args:
broker, name = acctid.split('.')
trans, updated_pps = load_pps_from_ledger(broker, name)
print(
f'Processing transactions into pps for {broker}:{acctid}\n'
f'{pformat(trans)}\n\n'
f'{pformat(updated_pps)}'
)
8 changes: 3 additions & 5 deletions piker/accounting/_allocate.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,9 @@ def next_order_info(
ld: int = mkt.size_tick_digits

size_unit = self.size_unit
live_size = live_pp.size
live_size = live_pp.cumsize
abs_live_size = abs(live_size)
abs_startup_size = abs(startup_pp.size)
abs_startup_size = abs(startup_pp.cumsize)

u_per_slot, currency_per_slot = self.step_sizes()

Expand Down Expand Up @@ -213,8 +213,6 @@ def next_order_info(
slots_used = self.slots_used(
Position(
mkt=mkt,
size=order_size,
ppu=price,
bs_mktid=mkt.bs_mktid,
)
)
Expand All @@ -241,7 +239,7 @@ def slots_used(
Calc and return the number of slots used by this ``Position``.
'''
abs_pp_size = abs(pp.size)
abs_pp_size = abs(pp.cumsize)

if self.size_unit == 'currency':
# live_currency_size = size or (abs_pp_size * pp.ppu)
Expand Down
64 changes: 15 additions & 49 deletions piker/accounting/_ledger.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,22 @@
from typing import (
Any,
Callable,
Iterator,
Union,
Generator
)

from pendulum import (
datetime,
DateTime,
from_timestamp,
parse,
)
import tomli_w # for fast ledger writing

from .. import config
from ..data.types import Struct
from ..log import get_logger
from .calc import (
iter_by_dt,
)
from ._mktinfo import (
Symbol, # legacy
MktPair,
Expand All @@ -56,13 +56,14 @@ class Transaction(Struct, frozen=True):
# once we have that as a required field,
# we don't really need the fqme any more..
fqme: str

tid: Union[str, int] # unique transaction id
tid: str | int # unique transaction id
size: float
price: float
cost: float # commisions or other additional costs
dt: datetime

etype: str = 'clear'

# TODO: we can drop this right since we
# can instead expect the backend to provide this
# via the `MktPair`?
Expand Down Expand Up @@ -159,9 +160,9 @@ def iter_trans(
# and instead call it for each entry incrementally:
# normer = mod.norm_trade_record(txdict)

# TODO: use tx_sort here yah?
# datetime-sort and pack into txs
for txdict in self.tx_sort(self.data.values()):
# for tid, txdict in self.data.items():

# special field handling for datetimes
# to ensure pendulum is used!
tid: str = txdict['tid']
Expand All @@ -186,6 +187,7 @@ def iter_trans(
# TODO: change to .sys!
sym=mkt,
expiry=parse(expiry) if expiry else None,
etype='clear',
)
yield tid, tx

Expand All @@ -208,62 +210,26 @@ def write_config(
Render the self.data ledger dict to it's TOML file form.
'''
cpy = self.data.copy()
towrite: dict[str, Any] = {}
for tid, trans in cpy.items():
for tid, txdict in self.tx_sort(self.data.copy()):

# drop key for non-expiring assets
txdict = towrite[tid] = self.data[tid]
# write blank-str expiry for non-expiring assets
if (
'expiry' in txdict
and txdict['expiry'] is None
):
txdict.pop('expiry')
txdict['expiry'] = ''

# re-write old acro-key
fqme = txdict.get('fqsn')
if fqme:
if fqme := txdict.get('fqsn'):
txdict['fqme'] = fqme

towrite[tid] = txdict

with self.file_path.open(mode='wb') as fp:
tomli_w.dump(towrite, fp)


def iter_by_dt(
records: dict[str, dict[str, Any]] | list[dict],

# NOTE: parsers are looked up in the insert order
# so if you know that the record stats show some field
# is more common then others, stick it at the top B)
parsers: dict[tuple[str], Callable] = {
'dt': None, # parity case
'datetime': parse, # datetime-str
'time': from_timestamp, # float epoch
},
key: Callable | None = None,

) -> Iterator[tuple[str, dict]]:
'''
Iterate entries of a ``records: dict`` table sorted by entry recorded
datetime presumably set at the ``'dt'`` field in each entry.
'''
def dyn_parse_to_dt(txdict: dict[str, Any]) -> DateTime:
k, v, parser = next(
(k, txdict[k], parsers[k]) for k in parsers if k in txdict
)
return parser(v) if parser else v

if isinstance(records, dict):
records = records.values()

for entry in sorted(
records,
key=key or dyn_parse_to_dt,
):
yield entry


def load_ledger(
brokername: str,
acctid: str,
Expand Down
Loading

0 comments on commit 05af2b3

Please sign in to comment.