diff --git a/backtesting/_stats.py b/backtesting/_stats.py index 6905d62c..243c4a24 100644 --- a/backtesting/_stats.py +++ b/backtesting/_stats.py @@ -97,24 +97,34 @@ def _round_timedelta(value, _period=_data_period(index)): resolution = getattr(_period, 'resolution_string', None) or _period.resolution return value.ceil(resolution) - s = pd.Series(dtype=object) - s.loc['Start'] = index[0] - s.loc['End'] = index[-1] - s.loc['Duration'] = s.End - s.Start + stat_items: list[tuple[str, object]] = [] + start = index[0] + end = index[-1] + duration = end - start + stat_items.extend([ + ('Start', start), + ('End', end), + ('Duration', duration), + ]) have_position = np.repeat(0, len(index)) - for t in trades_df.itertuples(index=False): + for t in trades_df[['EntryBar', 'ExitBar']].itertuples(index=False): have_position[t.EntryBar:t.ExitBar + 1] = 1 - s.loc['Exposure Time [%]'] = have_position.mean() * 100 # In "n bars" time, not index time - s.loc['Equity Final [$]'] = equity[-1] - s.loc['Equity Peak [$]'] = equity.max() + exposure_time_pct = have_position.mean() * 100 # In "n bars" time, not index time + stat_items.append(('Exposure Time [%]', exposure_time_pct)) + equity_final = equity[-1] + equity_peak = equity.max() + stat_items.append(('Equity Final [$]', equity_final)) + stat_items.append(('Equity Peak [$]', equity_peak)) if commissions: - s.loc['Commissions [$]'] = commissions - s.loc['Return [%]'] = (equity[-1] - equity[0]) / equity[0] * 100 + stat_items.append(('Commissions [$]', commissions)) + return_pct = (equity_final - equity[0]) / equity[0] * 100 + stat_items.append(('Return [%]', return_pct)) first_trading_bar = _indicator_warmup_nbars(strategy_instance) c = ohlc_data.Close.values - s.loc['Buy & Hold Return [%]'] = (c[-1] - c[first_trading_bar]) / c[first_trading_bar] * 100 # long-only return + buy_hold_return_pct = (c[-1] - c[first_trading_bar]) / c[first_trading_bar] * 100 + stat_items.append(('Buy & Hold Return [%]', buy_hold_return_pct)) # long-only return gmean_day_return: float = 0 day_returns = np.array(np.nan) @@ -137,22 +147,29 @@ def _round_timedelta(value, _period=_data_period(index)): # Our annualized return matches `empyrical.annual_return(day_returns)` whereas # our risk doesn't; they use the simpler approach below. annualized_return = (1 + gmean_day_return)**annual_trading_days - 1 - s.loc['Return (Ann.) [%]'] = annualized_return * 100 - s.loc['Volatility (Ann.) [%]'] = np.sqrt((day_returns.var(ddof=int(bool(day_returns.shape))) + (1 + gmean_day_return)**2)**annual_trading_days - (1 + gmean_day_return)**(2 * annual_trading_days)) * 100 # noqa: E501 + return_ann_pct = annualized_return * 100 + volatility_ann_pct = np.sqrt((day_returns.var(ddof=int(bool(day_returns.shape))) + (1 + gmean_day_return)**2)**annual_trading_days - (1 + gmean_day_return)**(2 * annual_trading_days)) * 100 # noqa: E501 + stat_items.append(('Return (Ann.) [%]', return_ann_pct)) + stat_items.append(('Volatility (Ann.) [%]', volatility_ann_pct)) # s.loc['Return (Ann.) [%]'] = gmean_day_return * annual_trading_days * 100 # s.loc['Risk (Ann.) [%]'] = day_returns.std(ddof=1) * np.sqrt(annual_trading_days) * 100 if is_datetime_index: - time_in_years = (s.loc['Duration'].days + s.loc['Duration'].seconds / 86400) / annual_trading_days - s.loc['CAGR [%]'] = ((s.loc['Equity Final [$]'] / equity[0])**(1 / time_in_years) - 1) * 100 if time_in_years else np.nan # noqa: E501 + time_in_years = (duration.days + duration.seconds / 86400) / annual_trading_days + cagr_pct = ((equity_final / equity[0])**(1 / time_in_years) - 1) * 100 if time_in_years else np.nan # noqa: E501 + stat_items.append(('CAGR [%]', cagr_pct)) # Our Sharpe mismatches `empyrical.sharpe_ratio()` because they use arithmetic mean return # and simple standard deviation - s.loc['Sharpe Ratio'] = (s.loc['Return (Ann.) [%]'] - risk_free_rate * 100) / (s.loc['Volatility (Ann.) [%]'] or np.nan) # noqa: E501 + sharpe_denom = volatility_ann_pct or np.nan + sharpe_ratio = (return_ann_pct - risk_free_rate * 100) / sharpe_denom + stat_items.append(('Sharpe Ratio', sharpe_ratio)) # noqa: E501 # Our Sortino mismatches `empyrical.sortino_ratio()` because they use arithmetic mean return with np.errstate(divide='ignore'): - s.loc['Sortino Ratio'] = (annualized_return - risk_free_rate) / (np.sqrt(np.mean(day_returns.clip(-np.inf, 0)**2)) * np.sqrt(annual_trading_days)) # noqa: E501 + sortino_ratio = (annualized_return - risk_free_rate) / (np.sqrt(np.mean(day_returns.clip(-np.inf, 0)**2)) * np.sqrt(annual_trading_days)) # noqa: E501 + stat_items.append(('Sortino Ratio', sortino_ratio)) max_dd = -np.nan_to_num(dd.max()) - s.loc['Calmar Ratio'] = annualized_return / (-max_dd or np.nan) + calmar_ratio = annualized_return / (-max_dd or np.nan) + stat_items.append(('Calmar Ratio', calmar_ratio)) equity_log_returns = np.log(equity[1:] / equity[:-1]) market_log_returns = np.log(c[1:] / c[:-1]) beta = np.nan @@ -161,29 +178,40 @@ def _round_timedelta(value, _period=_data_period(index)): cov_matrix = np.cov(equity_log_returns, market_log_returns) beta = cov_matrix[0, 1] / cov_matrix[1, 1] # Jensen CAPM Alpha: can be strongly positive when beta is negative and B&H Return is large - s.loc['Alpha [%]'] = s.loc['Return [%]'] - risk_free_rate * 100 - beta * (s.loc['Buy & Hold Return [%]'] - risk_free_rate * 100) # noqa: E501 - s.loc['Beta'] = beta - s.loc['Max. Drawdown [%]'] = max_dd * 100 - s.loc['Avg. Drawdown [%]'] = -dd_peaks.mean() * 100 - s.loc['Max. Drawdown Duration'] = _round_timedelta(dd_dur.max()) - s.loc['Avg. Drawdown Duration'] = _round_timedelta(dd_dur.mean()) - s.loc['# Trades'] = n_trades = len(trades_df) + alpha_pct = return_pct - risk_free_rate * 100 - beta * (buy_hold_return_pct - risk_free_rate * 100) # noqa: E501 + stat_items.append(('Alpha [%]', alpha_pct)) + stat_items.append(('Beta', beta)) + stat_items.append(('Max. Drawdown [%]', max_dd * 100)) + stat_items.append(('Avg. Drawdown [%]', -dd_peaks.mean() * 100)) + stat_items.append(('Max. Drawdown Duration', _round_timedelta(dd_dur.max()))) + stat_items.append(('Avg. Drawdown Duration', _round_timedelta(dd_dur.mean()))) + n_trades = len(trades_df) + stat_items.append(('# Trades', n_trades)) win_rate = np.nan if not n_trades else (pl > 0).mean() - s.loc['Win Rate [%]'] = win_rate * 100 - s.loc['Best Trade [%]'] = returns.max() * 100 - s.loc['Worst Trade [%]'] = returns.min() * 100 + stat_items.append(('Win Rate [%]', win_rate * 100)) + stat_items.append(('Best Trade [%]', returns.max() * 100)) + stat_items.append(('Worst Trade [%]', returns.min() * 100)) mean_return = geometric_mean(returns) - s.loc['Avg. Trade [%]'] = mean_return * 100 - s.loc['Max. Trade Duration'] = _round_timedelta(durations.max()) - s.loc['Avg. Trade Duration'] = _round_timedelta(durations.mean()) - s.loc['Profit Factor'] = returns[returns > 0].sum() / (abs(returns[returns < 0].sum()) or np.nan) # noqa: E501 - s.loc['Expectancy [%]'] = returns.mean() * 100 - s.loc['SQN'] = np.sqrt(n_trades) * pl.mean() / (pl.std() or np.nan) - s.loc['Kelly Criterion'] = win_rate - (1 - win_rate) / (pl[pl > 0].mean() / -pl[pl < 0].mean()) - - s.loc['_strategy'] = strategy_instance - s.loc['_equity_curve'] = equity_df - s.loc['_trades'] = trades_df + stat_items.append(('Avg. Trade [%]', mean_return * 100)) + stat_items.append(('Max. Trade Duration', _round_timedelta(durations.max()))) + stat_items.append(('Avg. Trade Duration', _round_timedelta(durations.mean()))) + profit_factor = returns[returns > 0].sum() / (abs(returns[returns < 0].sum()) or np.nan) # noqa: E501 + stat_items.append(('Profit Factor', profit_factor)) + expectancy = returns.mean() * 100 + stat_items.append(('Expectancy [%]', expectancy)) + sqn = np.sqrt(n_trades) * pl.mean() / (pl.std() or np.nan) + stat_items.append(('SQN', sqn)) + kelly = win_rate - (1 - win_rate) / (pl[pl > 0].mean() / -pl[pl < 0].mean()) + stat_items.append(('Kelly Criterion', kelly)) + + stat_items.extend([ + ('_strategy', strategy_instance), + ('_equity_curve', equity_df), + ('_trades', trades_df), + ]) + + labels, values = zip(*stat_items) + s = pd.Series(values, index=labels, dtype=object) s = _Stats(s) return s diff --git a/backtesting/_util.py b/backtesting/_util.py index 123abe4e..1edd007a 100644 --- a/backtesting/_util.py +++ b/backtesting/_util.py @@ -216,6 +216,11 @@ def __get_array(self, key) -> _Array: arr = self.__cache[key] = cast(_Array, self.__arrays[key][:self.__len]) return arr + def _current_value(self, key: str): + if self.__len <= 0: + raise IndexError("No data available") + return self.__arrays[key][self.__len - 1] + @property def Open(self) -> _Array: return self.__get_array('Open') diff --git a/backtesting/backtesting.py b/backtesting/backtesting.py index cbc21e33..725d8655 100644 --- a/backtesting/backtesting.py +++ b/backtesting/backtesting.py @@ -13,7 +13,7 @@ from abc import ABCMeta, abstractmethod from copy import copy from difflib import get_close_matches -from functools import lru_cache, partial +from functools import cached_property, lru_cache, partial from itertools import chain, product, repeat from math import copysign from numbers import Number @@ -312,7 +312,7 @@ def position(self) -> 'Position': @property def orders(self) -> 'Tuple[Order, ...]': """List of orders (see `Order`) waiting for execution.""" - return _Orders(self._broker.orders) + return tuple(self._broker.orders) @property def trades(self) -> 'Tuple[Trade, ...]': @@ -325,27 +325,6 @@ def closed_trades(self) -> 'Tuple[Trade, ...]': return tuple(self._broker.closed_trades) -class _Orders(tuple): - """ - TODO: remove this class. Only for deprecation. - """ - def cancel(self): - """Cancel all non-contingent (i.e. SL/TP) orders.""" - for order in self: - if not order.is_contingent: - order.cancel() - - def __getattr__(self, item): - # TODO: Warn on deprecations from the previous version. Remove in the next. - removed_attrs = ('entry', 'set_entry', 'is_long', 'is_short', - 'sl', 'tp', 'set_sl', 'set_tp') - if item in removed_attrs: - raise AttributeError(f'Strategy.orders.{"/.".join(removed_attrs)} were removed in' - 'Backtesting 0.2.0. ' - 'Use `Order` API instead. See docs.') - raise AttributeError(f"'tuple' object has no attribute {item!r}") - - class Position: """ Currently held asset position, available as @@ -365,17 +344,17 @@ def __bool__(self): @property def size(self) -> float: """Position size in units of asset. Negative if position is short.""" - return sum(trade.size for trade in self.__broker.trades) + return self.__broker._open_trade_size_sum @property def pl(self) -> float: """Profit (positive) or loss (negative) of the current position in cash units.""" - return sum(trade.pl for trade in self.__broker.trades) + return self.__broker.unrealized_pl @property def pl_pct(self) -> float: """Profit (positive) or loss (negative) of the current position in percent.""" - total_invested = sum(trade.entry_price * abs(trade.size) for trade in self.__broker.trades) + total_invested = self.__broker._open_trade_entry_abs_value_sum return (self.pl / total_invested) * 100 if total_invested else 0 @property @@ -832,6 +811,30 @@ def new_order(self, return order + @cached_property + def _open_trade_size_sum(self) -> int: + return sum(int(trade.size) for trade in self.trades) + + @cached_property + def _open_trade_entry_value_sum(self) -> float: + return sum(trade.size * trade.entry_price for trade in self.trades) + + @cached_property + def _open_trade_entry_abs_value_sum(self) -> float: + return sum(abs(trade.size) * trade.entry_price for trade in self.trades) + + def _clear_trade_caches(self) -> None: + self.__dict__.pop('_open_trade_size_sum', None) + self.__dict__.pop('_open_trade_entry_value_sum', None) + self.__dict__.pop('_open_trade_entry_abs_value_sum', None) + + @property + def unrealized_pl(self) -> float: + if not self.trades: + return 0.0 + current_price = float(self._data._current_value("Close")) + return current_price * self._open_trade_size_sum - self._open_trade_entry_value_sum + @property def last_price(self) -> float: """ Price at the last (current) close. """ @@ -846,7 +849,7 @@ def _adjusted_price(self, size=None, price=None) -> float: @property def equity(self) -> float: - return self._cash + sum(trade.pl for trade in self.trades) + return self._cash + self.unrealized_pl @property def margin_available(self) -> float: @@ -873,7 +876,7 @@ def next(self): def _process_orders(self): data = self._data - open, high, low = data.Open[-1], data.High[-1], data.Low[-1] + open, high, low = data._current_value("Open"), data._current_value("High"), data._current_value("Low") reprocess_orders = False # Process orders @@ -886,7 +889,9 @@ def _process_orders(self): # Check if stop condition was hit stop_price = order.stop if stop_price: - is_stop_hit = ((high >= stop_price) if order.is_long else (low <= stop_price)) + is_stop_hit = ( + high >= stop_price if order.is_long else low <= stop_price + ) if not is_stop_hit: continue @@ -897,7 +902,9 @@ def _process_orders(self): # Determine purchase price. # Check if limit order can be filled. if order.limit: - is_limit_hit = low <= order.limit if order.is_long else high >= order.limit + is_limit_hit = ( + low <= order.limit if order.is_long else high >= order.limit + ) # When stop and limit are hit within the same bar, we pessimistically # assume limit was hit before the stop (i.e. "before it counts") is_limit_hit_before_stop = (is_limit_hit and @@ -908,14 +915,20 @@ def _process_orders(self): continue # stop_price, if set, was hit within this bar - price = (min(stop_price or open, order.limit) - if order.is_long else - max(stop_price or open, order.limit)) + price = ( + min(stop_price or open, order.limit) + if order.is_long + else max(stop_price or open, order.limit) + ) else: # Market-if-touched / market order # Contingent orders always on next open prev_close = data.Close[-2] - price = prev_close if self._trade_on_close and not order.is_contingent else open + price = ( + prev_close + if self._trade_on_close and not order.is_contingent + else open + ) if stop_price: price = max(price, stop_price) if order.is_long else min(price, stop_price) @@ -1026,12 +1039,28 @@ def _process_orders(self): reprocess_orders = True # Order.stop and TP hit within the same bar, but SL wasn't. This case # is not ambiguous, because stop and TP go in the same price direction. - elif stop_price and not order.limit and order.tp and ( - (order.is_long and order.tp <= high and (order.sl or -np.inf) < low) or - (order.is_short and order.tp >= low and (order.sl or np.inf) > high)): + elif ( + stop_price + and not order.limit + and order.tp + and ( + ( + order.is_long + and order.tp <= high + and (order.sl or -np.inf) < low + ) + or ( + order.is_short + and order.tp >= low + and (order.sl or np.inf) > high + ) + ) + ): reprocess_orders = True - elif (low <= (order.sl or -np.inf) <= high or - low <= (order.tp or -np.inf) <= high): + elif ( + low <= (order.sl or -np.inf) <= high + or low <= (order.tp or -np.inf) <= high + ): warnings.warn( f"({data.index[-1]}) A contingent SL/TP order would execute in the " "same bar its parent stop/limit order was turned into a trade. " @@ -1051,6 +1080,7 @@ def _process_orders(self): def _reduce_trade(self, trade: Trade, price: float, size: float, time_index: int): assert trade.size * size < 0 assert abs(trade.size) >= abs(size) + self._clear_trade_caches() size_left = trade.size + size assert size_left * trade.size >= 0 @@ -1071,6 +1101,7 @@ def _reduce_trade(self, trade: Trade, price: float, size: float, time_index: int self._close_trade(close_trade, price, time_index) def _close_trade(self, trade: Trade, price: float, time_index: int): + self._clear_trade_caches() self.trades.remove(trade) if trade._sl_order: self.orders.remove(trade._sl_order) @@ -1094,6 +1125,7 @@ def _open_trade(self, price: float, size: int, self.trades.append(trade) # Apply broker commission at trade open self._cash -= self._commission(size, price) + self._clear_trade_caches() # Create SL/TP (bracket) orders. if tp: trade.tp = tp diff --git a/backtesting/test/_test.py b/backtesting/test/_test.py index 2a187a79..3fe3ebf2 100644 --- a/backtesting/test/_test.py +++ b/backtesting/test/_test.py @@ -173,7 +173,6 @@ def next(self, _FEW_DAYS=pd.Timedelta('3 days')): # noqa: N803 self.position.is_long if crossover(self.sma, self.data.Close): - self.orders.cancel() # cancels only non-contingent price = self.data.Close[-1] sl, tp = 1.05 * price, .9 * price