|
| 1 | +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) |
| 2 | +# Copyright (c) 2025 Drakkar-Software, All rights reserved. |
| 3 | +# |
| 4 | +# OctoBot is free software; you can redistribute it and/or |
| 5 | +# modify it under the terms of the GNU General Public License |
| 6 | +# as published by the Free Software Foundation; either |
| 7 | +# version 3.0 of the License, or (at your option) any later version. |
| 8 | +# |
| 9 | +# OctoBot is distributed in the hope that it will be useful, |
| 10 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 12 | +# General Public License for more details. |
| 13 | +# |
| 14 | +# You should have received a copy of the GNU General Public |
| 15 | +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. |
| 16 | +import math |
| 17 | +import time |
| 18 | +import mock |
| 19 | +import pytest |
| 20 | +import asyncio |
| 21 | +import pyiceberg.table |
| 22 | + |
| 23 | +from additional_tests.historical_backend_tests import iceberg_client |
| 24 | +import octobot.community.history_backend.iceberg_historical_backend_client as iceberg_historical_backend_client |
| 25 | +import octobot.community.history_backend.util as history_backend_util |
| 26 | + |
| 27 | +import octobot_commons.enums as commons_enums |
| 28 | +import octobot_commons.constants as commons_constants |
| 29 | + |
| 30 | + |
| 31 | +# All test coroutines will be treated as marked. |
| 32 | +pytestmark = pytest.mark.asyncio |
| 33 | + |
| 34 | + |
| 35 | +EXCHANGE = "binance" |
| 36 | +SYMBOL = "BTC/USDC" |
| 37 | +SHORT_TIME_FRAME = commons_enums.TimeFrames.FIFTEEN_MINUTES |
| 38 | + |
| 39 | + |
| 40 | +async def test_fetch_candles_history_range(iceberg_client): |
| 41 | + # unknown candles |
| 42 | + min_time, max_time = await iceberg_client.fetch_candles_history_range( |
| 43 | + EXCHANGE+"plop", SYMBOL, SHORT_TIME_FRAME |
| 44 | + ) |
| 45 | + assert min_time == max_time == 0 |
| 46 | + |
| 47 | + # known candles |
| 48 | + min_time, max_time = await iceberg_client.fetch_candles_history_range( |
| 49 | + EXCHANGE, SYMBOL, SHORT_TIME_FRAME |
| 50 | + ) |
| 51 | + assert 0 < min_time < max_time < time.time() |
| 52 | + |
| 53 | + |
| 54 | +async def test_fetch_candles_history(iceberg_client): |
| 55 | + start_time = 1718785679 |
| 56 | + end_time = 1721377495 |
| 57 | + candles_count = math.floor((end_time - start_time) / ( |
| 58 | + commons_enums.TimeFramesMinutes[SHORT_TIME_FRAME] * 60 |
| 59 | + )) |
| 60 | + # requires multiple fetches |
| 61 | + assert candles_count == 2879 |
| 62 | + candles = await iceberg_client.fetch_candles_history( |
| 63 | + EXCHANGE, SYMBOL, SHORT_TIME_FRAME, start_time, end_time |
| 64 | + ) |
| 65 | + assert sorted(candles, key=lambda c: c[0]) == candles |
| 66 | + fetched_count = candles_count + 1 |
| 67 | + assert len(candles) == fetched_count |
| 68 | + # will fail if parsed time is not UTC |
| 69 | + assert candles[0][commons_enums.PriceIndexes.IND_PRICE_TIME.value] == 1718785800 |
| 70 | + assert ( |
| 71 | + candles[0][commons_enums.PriceIndexes.IND_PRICE_TIME.value] |
| 72 | + != candles[0][commons_enums.PriceIndexes.IND_PRICE_OPEN.value] |
| 73 | + != candles[0][commons_enums.PriceIndexes.IND_PRICE_HIGH.value] |
| 74 | + != candles[0][commons_enums.PriceIndexes.IND_PRICE_LOW.value] |
| 75 | + != candles[0][commons_enums.PriceIndexes.IND_PRICE_CLOSE.value] |
| 76 | + != candles[0][commons_enums.PriceIndexes.IND_PRICE_VOL.value] |
| 77 | + ) |
| 78 | + # candles are unique |
| 79 | + assert len(set(c[0] for c in candles)) == fetched_count |
| 80 | + |
| 81 | + |
| 82 | +async def test_fetch_candles_history_asynchronousness(iceberg_client): |
| 83 | + start_time = 1718785679 |
| 84 | + end_time_1 = 1721377495 |
| 85 | + end_time_2 = 1721377495 + 2 * commons_constants.DAYS_TO_SECONDS |
| 86 | + end_time_3 = 1721377495 + 23 * commons_constants.DAYS_TO_SECONDS |
| 87 | + |
| 88 | + scan_call_times = [] |
| 89 | + _to_arrow_call_times = [] |
| 90 | + _to_arrow_return_times = [] |
| 91 | + |
| 92 | + def _get_or_create_table(*args, **kwargs): |
| 93 | + table = original_get_or_create_table(*args, **kwargs) |
| 94 | + original_scan = table.scan |
| 95 | + |
| 96 | + |
| 97 | + def _scan(*args, **kwargs): |
| 98 | + scan_call_times.append(time.time()) |
| 99 | + scan_result = original_scan(*args, **kwargs) |
| 100 | + original_to_arrow = scan_result.to_arrow |
| 101 | + |
| 102 | + def _to_arrow(*args, **kwargs): |
| 103 | + _to_arrow_call_times.append(time.time()) |
| 104 | + try: |
| 105 | + return original_to_arrow(*args, **kwargs) |
| 106 | + finally: |
| 107 | + _to_arrow_return_times.append(time.time()) |
| 108 | + |
| 109 | + scan_result.to_arrow = _to_arrow |
| 110 | + return scan_result |
| 111 | + |
| 112 | + table.scan = mock.Mock(side_effect=_scan) |
| 113 | + return table |
| 114 | + |
| 115 | + original_get_or_create_table = iceberg_client.get_or_create_table |
| 116 | + with ( |
| 117 | + mock.patch.object(iceberg_client, "get_or_create_table", mock.Mock(side_effect=_get_or_create_table)) as get_or_create_table_mock |
| 118 | + ): |
| 119 | + candles_1, candles_2, candles_3 = await asyncio.gather( |
| 120 | + iceberg_client.fetch_candles_history( |
| 121 | + EXCHANGE, SYMBOL, SHORT_TIME_FRAME, start_time, end_time_1 |
| 122 | + ), |
| 123 | + iceberg_client.fetch_candles_history( |
| 124 | + EXCHANGE, SYMBOL, SHORT_TIME_FRAME, start_time, end_time_2 |
| 125 | + ), |
| 126 | + iceberg_client.fetch_candles_history( |
| 127 | + EXCHANGE, SYMBOL, SHORT_TIME_FRAME, start_time, end_time_3 |
| 128 | + ), |
| 129 | + ) |
| 130 | + assert get_or_create_table_mock.call_count == 3 |
| 131 | + assert len(scan_call_times) == 3 |
| 132 | + assert len(_to_arrow_call_times) == 3 |
| 133 | + assert len(_to_arrow_return_times) == 3 |
| 134 | + |
| 135 | + assert scan_call_times[0] <= scan_call_times[1] <= scan_call_times[2] |
| 136 | + assert _to_arrow_call_times[0] <= _to_arrow_call_times[1] <= _to_arrow_call_times[2] |
| 137 | + assert _to_arrow_return_times[0] < _to_arrow_return_times[1] < _to_arrow_return_times[2] |
| 138 | + |
| 139 | + # all to_arrow calls have been performed before the first to_arrow return, |
| 140 | + # which means they are running concurrently in this async context |
| 141 | + assert max(_to_arrow_call_times) < min(_to_arrow_return_times) |
| 142 | + |
| 143 | + assert len(candles_1) > 2000 |
| 144 | + assert len(candles_2) > len(candles_1) |
| 145 | + assert len(candles_3) > len(candles_2) |
| 146 | + |
| 147 | + |
| 148 | +async def test_deduplicate(iceberg_client): |
| 149 | + start_time = 1718785679 |
| 150 | + end_time = 1721377495 |
| 151 | + candles = await iceberg_client.fetch_candles_history( |
| 152 | + EXCHANGE, SYMBOL, SHORT_TIME_FRAME, start_time, end_time |
| 153 | + ) |
| 154 | + duplicated = candles + candles |
| 155 | + assert len(duplicated) == len(candles) * 2 |
| 156 | + assert sorted(candles, key=lambda c: c[0]) == candles |
| 157 | + deduplicated = history_backend_util.deduplicate(duplicated, 0) |
| 158 | + # deduplicated and still sorted |
| 159 | + assert deduplicated == candles |
0 commit comments