Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # This is provided at absolutely zero guarantee of correctness, obligation or warranty. Use at your own risk.
- # ***Before using*** You'll need the csv from https://eth2.tax/ put it in the same folder as this script.
- # RPL minipool handling. E.g. if you have 2 minipools, one at 5% commission, one at 15%, these arrays would look like
- # [{'index': '123456', 'rate': 0.15}, {'index': '234561', 'rate': 0.05}]
- # Your commission rate can be found by plugging your validator index into beaconcha.in then
- # clicking on the rocketpool tab. E.g: https://beaconcha.in/validator/123456#rocketpool
- # ETH calculations on RPL minipools do: ETH * (0.5 + 0.5 * commission_rate).
- # That is, 50% of the block reward is yours + your commission rate of the remaining 50%
- rpl_minipool_validator_rates = [{'index': '123456', 'rate': 0.15}]
- date_table_income_fiat = {}
- date_price_table = {}
- date_table_income_eth = {}
- def main():
- lines = read_file("combinedRewardsTable.csv")
- parse_lines_fill_table(lines)
- #fiat_range_condense_table() # Uncomment if you want this, the condense value is adjustable but also
- # please scroll down and read the condenser comment thoroughly to understand what it's doing.
- table_to_csv_file("cointrackingIncome.csv")
- # Cointracking.info expected CSV order:
- # "Type", "Buy", "Cur.", "Sell", "Cur.", "Fee", "Cur.", "Exchange", "Group", "Comment", "Date"
- # "Income", "0.1106600", "ETH", "", "", "", "", "Validator Stake Reward", "", "", "19.06.2022 13:44:55"
- # Update to whatever your tax reporting software expects.
- def table_to_csv_file(filename):
- keys = date_table_income_eth.keys()
- keys = sorted(keys)
- lineOutput = ""
- with open(filename, "w") as file:
- file.write('"Type", "Buy", "Cur.", "Sell", "Cur.", "Fee", "Cur.", "Exchange", "Group", "Comment", "Date"')
- file.write('\n')
- for k in keys:
- dateSplit = k.split("-")
- ct_date = f'{dateSplit[2]}.{dateSplit[1]}.{dateSplit[0]} 00:00:00'
- lineOutput = f'"Income","{date_table_income_eth[k]}","ETH","","","","","Validator Stake Reward","","","{ct_date}"'
- file.write(lineOutput)
- file.write('\n')
- totalETHvalue = 0.0
- totalFiatIncome = 0.0
- for k in keys:
- totalETHvalue += date_table_income_eth[k]
- totalFiatIncome += date_table_income_fiat[k]
- print(f'\n*** Wrote {len(keys)} lines that look like this: {lineOutput}\n to {filename}. Total ETH: {totalETHvalue} / Total Fiat: {totalFiatIncome}')
- file.close()
- def read_file(filename):
- lines = []
- f = open(filename, encoding="utf8")
- for line in f:
- lines.append(line)
- return lines
- def rpl_minipool_rate(validator_index):
- for pool in rpl_minipool_validator_rates:
- if pool['index'] == validator_index:
- return 0.5 + (0.5 * pool['rate'])
- return 0
- def parse_lines_fill_table(lines):
- lines.pop(0) # we dont want the header
- # expected order:
- # date / validator index / end of day balance / income eth / price fiat / income fiat
- # "2022-02-16";"123456";"32.1234";"0.0039123456";"1234.566789";"12.34567890"
- for line in lines:
- line = line.replace('"', '')
- split = line.split(";") # despite it being a csv, we're actually semicolon delimited
- date = split[0]
- validator_index = split[1] # keep as string
- income_eth = float(split[3])
- price_fiat = float(split[4])
- income_fiat = float(split[5])
- # RPL minipool initial 'matching' is reported strangely, we will filter these lines out.
- # 2021-11-xx 16 -16
- # 2021-11-xx 16 0
- # 2021-11-xx 32.000951869 16.000951868999998
- if income_eth >= 15 or income_eth <= 0: continue
- percentage_rate = rpl_minipool_rate(validator_index)
- if percentage_rate != 0:
- income_fiat *= percentage_rate
- income_eth *= percentage_rate
- if date in date_table_income_fiat:
- date_table_income_fiat[date] += income_fiat
- date_table_income_eth[date] += income_eth
- else:
- date_table_income_fiat[date] = income_fiat
- date_price_table[date] = price_fiat
- date_table_income_eth[date] = income_eth
- return
- #
- # Fiat Range Condenser
- # This will attempt to condense similar validator rewards in a given FIAT price range.
- # Example:
- # 0.1 ETH reward on 2021-05-01 for $100 USD
- # 0.1 ETH reward on 2022-02-30 for $102 USD
- # Merged result: 0.2 ETH reward on 2021-05-1 for $101 USD.
- #
- # It will do this twice, once with a smaller range, and then again with a larger range.
- # You can change this. Or you can even keep condensing if you want to. See line 218 or so
- #
- # Reasoning: The above example is not too uncommon, and I don't want to bloat my tax report with
- # so many near-identical transactions.
- #
- # **NOTE**: There's some half-decent logic to not condense block proposals to avoid this:
- # 0.1 ETH reward on 2021-05-01 for $100
- # 0.5 ETH reward on 2022-05-03 for $120
- # Merged result = 0.6 ETH on 2021-05-01 for $110
- # You will be overpaying on the 0.1 reward in this example (or underpaying, the other way).
- #
- # The detection is fairly simple, take the median (not mean) ETH value, times it by 3, if either value is above,
- # we don't merge. [in the example above for example, the median would be 0.3 ETH]
- # (x2 would probably be fine but since we're merging some date rewards (and hence their ETH values), x3 is safer).
- # You can change that on line 202 or so if you want to (e.g. set a high value if you want to turn this functionality off)
- #
- # There are two vars you can adjust here.
- fiat_condense_tolerance = float(20) # Default value = $20.
- # If you DON'T want it to double condense, set this anything other than 0, and double the value above
- # (since it will be halved for the first condense, sorry, bit unusual code flow)
- fiat_double_condense = 0
- # This bool is here if you want assurance it worked. Put it to non-zero if you don't care.
- condense_sanity_check = 0
- def fiat_range_condense_table():
- eth_values = []
- fiat_prices = []
- dates = []
- fiatIncome = []
- medianEthValueTimes3 = 0
- condensed_eth = []
- condensed_dates = []
- condensed_prices = []
- condensed_income = []
- def condense(tolerance):
- condensed_eth.clear()
- condensed_dates.clear()
- condensed_prices.clear()
- condensed_income.clear()
- i = 0
- while i < len(fiat_prices):
- price = fiat_prices[i]
- eth1 = eth_values[i]
- date1 = dates[i]
- income = fiatIncome[i]
- if (condense_sanity_check == 0):
- print(f'{i} = {price} on {date1}')
- if i + 1 < len(fiat_prices):
- j = i + 1
- next_price = fiat_prices[j]
- if (next_price - price) < tolerance:
- eth2 = eth_values[j]
- if eth2 < medianEthValueTimes3 and eth1 < medianEthValueTimes3:
- midPrice = (price + next_price) / 2
- if (condense_sanity_check == 0):
- print(
- f'Rolling: {next_price} on {dates[j]} into {price} on {date1} as {midPrice} - ETH: {eth1} + {eth2} = {eth1 + eth2}')
- eth1 += eth2
- price = midPrice
- income = price * eth1
- i += 1 # skip this on next pass.
- else:
- if (condense_sanity_check == 0):
- print(f'SKIPPING MERGE as ETH values {eth1} or {eth2} are higher than the expected median: {medianEthValueTimes3}')
- condensed_prices.append(price)
- condensed_eth.append(eth1)
- condensed_dates.append(date1)
- condensed_income.append(income)
- i += 1
- return
- keys = date_table_income_fiat.keys()
- for k in keys:
- fiat_prices.append(date_price_table[k])
- dates.append(k)
- fiat_prices, dates = (list(t) for t in zip(*sorted(zip(fiat_prices, dates))))
- for d in dates:
- fiatIncome.append(date_table_income_fiat[d])
- eth_values.append(date_table_income_eth[d])
- sortedEthValues = sorted(eth_values)
- #for e in sortedEthValues:
- # print(f"SORTED E: {e}")
- medianEthValueTimes3 = sortedEthValues[int(len(eth_values) / 2)] * 3
- orig_prices = fiat_prices
- orig_dates = dates
- condense(fiat_condense_tolerance / 2)
- eth_values = condensed_eth.copy()
- fiat_prices = condensed_prices.copy()
- dates = condensed_dates.copy()
- fiatIncome = condensed_income.copy()
- firstCondenseText = f"First Condense: {len(eth_values)} normal: {len(orig_prices)}"
- if (condense_sanity_check == 0):
- print(firstCondenseText)
- if (fiat_double_condense == 0):
- # Copy paste the next 5 lines if you want to keep condensing.
- condense(fiat_condense_tolerance)
- eth_values = condensed_eth.copy()
- fiat_prices = condensed_prices.copy()
- dates = condensed_dates.copy()
- fiatIncome = condensed_income.copy()
- # i.e it'd look like this
- # condense(fiat_condense_tolerance)
- #
- #eth_values = condensed_eth.copy()
- #fiat_prices = condensed_prices.copy()
- #dates = condensed_dates.copy()
- #fiatIncome = condensed_income.copy()
- if (condense_sanity_check == 0):
- print('\n')
- print(firstCondenseText)
- print(f"Second Condense: {len(eth_values)} normal: {len(orig_prices)}")
- totalETHvalue = 0.0
- totalFiatIncome = 0.0
- for d in orig_dates:
- totalETHvalue += date_table_income_eth[d]
- totalFiatIncome += date_table_income_fiat[d]
- condensedTotalValue = 0.0
- condensedTotalFiatValue = 0.0
- date_table_income_eth.clear()
- date_table_income_fiat.clear()
- index = 0
- while index < len(condensed_eth):
- date_table_income_eth[dates[index]] = eth_values[index]
- date_table_income_fiat[dates[index]] = fiatIncome[index]
- condensedTotalValue += eth_values[index]
- condensedTotalFiatValue += fiatIncome[index]
- index += 1
- # If you want more sanity checking, compare the output file to the input file.
- if (condense_sanity_check == 0):
- print('\n')
- print(f"Fiat total pre-condense {totalFiatIncome} after: {condensedTotalFiatValue} -- ETH total (this should NOT have changed) pre-condense: {totalETHvalue} after: {condensedTotalValue}")
- print(f'The fiat difference should be fairly minor, if not, your fiat tolerance is probably too high.')
- return
- # Press the green button in the gutter to run the script.
- if __name__ == '__main__':
- main()
Add Comment
Please, Sign In to add comment