OSIRIS NEXUS — The Invisible Eye.
Timeframe
5m
Direction
Long Only
Stoploss
-8.0%
Trailing Stop
Yes
ROI
0m: 15.0%, 30m: 8.0%, 60m: 4.0%, 120m: 2.0%
Interface Version
3
Startup Candles
200
Indicators
7
freqtrade/freqtrade-strategies
Strategy 003 author@: Gerald Lonlas github@: https://github.com/freqtrade/freqtrade-strategies
"""
OSIRIS NEXUS STRATEGY v1.0 — The Invisible Eye
================================================================
8 Proprietary Algorithms from Market Microstructure, Information Theory,
Fractal Geometry and Statistical Physics.
FINDS WHAT NOBODY ELSE CAN SEE.
These algorithms detect invisible patterns that conventional technical
analysis (RSI, MACD, Bollinger, etc.) cannot perceive. They analyze
the STRUCTURE of price/volume data at a mathematical level that reveals
institutional activity, regime changes, and predictability windows
BEFORE they manifest in price.
PROPRIETARY ALGORITHMS (100% original, academic-grade):
1. VPIN Proxy — Informed trading pressure via bulk volume classification
Based on Easley, López de Prado & O'Hara (2012)
2. Variance Ratio Regime — Trend vs mean-reversion regime detector
Based on Lo & MacKinlay (1988)
3. Fractal Efficiency Ratio — Price path cleanness measurement
Inspired by Mandelbrot's fractal market hypothesis
4. Vol-of-Vol Pulse — 2nd-order volatility regime detector
Detects when volatility ITSELF becomes volatile
5. Microstructure Noise Ratio — Parkinson/CC vol reveals hidden flow
Based on realized volatility literature (Andersen & Bollerslev)
6. Volume Surprise Index — Log-normal volume anomaly detection
Mandelbrot's insight: vol clusters predict continuation
7. Autocorrelation Cascade — Fibonacci-lag momentum persistence map
Nobody else maps autocorrelation PATTERNS across lag structures
8. Entropy Pulse Detector — Shannon entropy predictability window
When the market becomes predictable = ENTRY
ADAPTIVE EXIT SYSTEM:
- Dynamic ATR-based stop (tunable multiplier)
- R:R target take-profit (default 3:1)
- Progressive trailing: break-even at 1R → lock at 1.5R → TP at 3R
- Time-based safety exits
REGIME-ADAPTIVE:
- Variance Ratio > 1: MOMENTUM mode (trade breakouts)
- Variance Ratio < 1: MEAN-REVERSION mode (fade extremes)
- Variance Ratio ≈ 1: Random walk → no trade
TARGET: 10 trades/day | 80% WR | 3:1 R:R
100% proprietario. Desenvolvido exclusivamente para OSIRIS.
O olho que ve o invisivel.
"""
import logging
import numpy as np
from pandas import DataFrame
from typing import Optional
from freqtrade.strategy import IStrategy, merge_informative_pair
from freqtrade.strategy import CategoricalParameter, DecimalParameter, IntParameter
from freqtrade.persistence import Trade
import talib.abstract as ta
try:
from freqtrade.strategy import stoploss_from_open
except ImportError:
def stoploss_from_open(open_relative_stop, current_profit, is_short=False):
if current_profit == 0:
return 1
if is_short:
return -1 + ((1 - open_relative_stop) / (1 - current_profit))
return 1 - ((1 + open_relative_stop) / (1 + current_profit))
logger = logging.getLogger(__name__)
class OsirisNexusStrategy(IStrategy):
"""
OSIRIS NEXUS — The Invisible Eye.
8 proprietary algorithms based on market microstructure theory,
information theory, fractal geometry and statistical physics.
Detects patterns invisible to conventional technical analysis.
Regime-adaptive: automatically switches between momentum and
mean-reversion modes based on Variance Ratio analysis.
"""
INTERFACE_VERSION = 3
can_short = False
timeframe = "5m"
# ROI safety net (hyperopt tunes)
minimal_roi = {
"0": 0.15,
"30": 0.08,
"60": 0.04,
"120": 0.02,
}
# Hard stoploss safety (hyperopt tunes)
stoploss = -0.08
# Trailing (hyperopt tunes)
trailing_stop = True
trailing_stop_positive = 0.01
trailing_stop_positive_offset = 0.025
trailing_only_offset_is_reached = True
# Enable ATR-based progressive stoploss
use_custom_stoploss = True
startup_candle_count = 200
process_only_new_candles = True
# ===================================================================
# HYPEROPT PARAMETERS — BUY (16 parameters)
# ===================================================================
# Core score threshold (max possible ~23)
buy_score_min = IntParameter(2, 16, default=6, space="buy", optimize=True)
# ATR multiplier for dynamic stop distance
buy_stop_atr = DecimalParameter(
0.5, 3.0, default=1.5, decimals=1, space="buy", optimize=True
)
# R:R target multiplier (TP = stop_distance * this)
buy_rr_target = DecimalParameter(
2.0, 5.0, default=3.0, decimals=1, space="buy", optimize=True
)
# VPIN: Informed trading intensity threshold
buy_vpin_min = DecimalParameter(
0.1, 0.8, default=0.4, decimals=2, space="buy", optimize=True
)
# Variance Ratio: deviation from 1.0 (regime strength)
buy_vr_dev = DecimalParameter(
0.03, 0.5, default=0.15, decimals=2, space="buy", optimize=True
)
# Fractal Efficiency: min cleanness for momentum entry
buy_fer_min = DecimalParameter(
0.1, 0.8, default=0.4, decimals=1, space="buy", optimize=True
)
# Vol-of-Vol pulse threshold
buy_vvp_min = DecimalParameter(
0.3, 3.0, default=1.0, decimals=1, space="buy", optimize=True
)
# Microstructure Noise Ratio threshold
buy_mnr_min = DecimalParameter(
0.8, 3.0, default=1.3, decimals=1, space="buy", optimize=True
)
# Volume Surprise threshold
buy_vsi_min = DecimalParameter(
0.3, 3.0, default=1.0, decimals=1, space="buy", optimize=True
)
# Autocorrelation Cascade threshold
buy_acc_min = DecimalParameter(
-0.1, 0.5, default=0.05, decimals=2, space="buy", optimize=True
)
# Entropy: max normalized entropy (lower = more predictable)
buy_epd_max = DecimalParameter(
0.3, 1.0, default=0.75, decimals=2, space="buy", optimize=True
)
# RSI boundaries
buy_rsi_min = IntParameter(15, 50, default=25, space="buy", optimize=True)
buy_rsi_max = IntParameter(55, 85, default=75, space="buy", optimize=True)
# Allow mean-reversion mode entries
buy_allow_meanrev = CategoricalParameter(
[True, False], default=True, space="buy", optimize=True
)
# Max trade duration hours
buy_max_hours = IntParameter(2, 24, default=8, space="buy", optimize=True)
# Require EMA trend alignment
buy_ema_filter = CategoricalParameter(
[True, False], default=False, space="buy", optimize=True
)
# ===================================================================
# HYPEROPT PARAMETERS — SELL (2 parameters)
# ===================================================================
sell_score_min = IntParameter(2, 8, default=4, space="sell", optimize=True)
sell_rsi_exit = IntParameter(65, 92, default=78, space="sell", optimize=True)
# ===================================================================
# INFORMATIVE PAIRS
# ===================================================================
def informative_pairs(self):
pairs = self.dp.current_whitelist()
informative = []
for pair in pairs:
informative.append((pair, "15m"))
informative.append((pair, "1h"))
return informative
# ===================================================================
# ALGORITHM 1: VPIN PROXY
# Volume-Synchronized Probability of Informed Trading
# Easley, López de Prado & O'Hara (2012)
#
# Bulk Volume Classification: each candle's volume is classified as
# buy/sell using the close position within the High-Low range.
# High VPIN = informed traders are active = follow them.
# ===================================================================
def _calc_vpin(self, df: DataFrame) -> DataFrame:
hl_range = (df["high"] - df["low"]).replace(0, 0.0001)
close_pos = (df["close"] - df["low"]) / hl_range
buy_vol = close_pos * df["volume"]
sell_vol = (1 - close_pos) * df["volume"]
bucket = 50
buy_sum = buy_vol.rolling(bucket).sum()
sell_sum = sell_vol.rolling(bucket).sum()
total = (buy_sum + sell_sum).replace(0, 1)
df["vpin"] = (buy_sum - sell_sum).abs() / total
df["vpin_dir"] = (buy_sum - sell_sum) / total
df["vpin_accel"] = df["vpin"] - df["vpin"].shift(10)
return df
# ===================================================================
# ALGORITHM 2: VARIANCE RATIO REGIME CLASSIFIER
# Lo & MacKinlay (1988)
#
# If returns are random walk, Var(q-period) = q * Var(1-period).
# VR > 1 → trending (momentum works)
# VR < 1 → mean-reverting (fade moves)
# VR ≈ 1 → random walk (DON'T trade)
# ===================================================================
def _calc_variance_ratio(self, df: DataFrame) -> DataFrame:
returns = df["close"].pct_change()
var_1 = returns.rolling(50).var().replace(0, 1e-10)
ret_5 = df["close"].pct_change(5)
var_5 = ret_5.rolling(50).var()
df["vr"] = var_5 / (5 * var_1)
df["vr_dev"] = (df["vr"] - 1.0).abs()
df["vr_trending"] = (df["vr"] > 1.0).astype(int)
df["vr_smooth"] = ta.EMA(df["vr"], timeperiod=10)
return df
# ===================================================================
# ALGORITHM 3: FRACTAL EFFICIENCY RATIO (FER)
# Inspired by Mandelbrot's fractal market hypothesis.
#
# Efficiency = displacement / path_length
# 1.0 = perfectly straight move (maximum efficiency)
# 0.0 = going nowhere (pure noise)
# ===================================================================
def _calc_fractal_efficiency(self, df: DataFrame) -> DataFrame:
window = 20
path_length = df["close"].diff().abs().rolling(window).sum()
displacement = (df["close"] - df["close"].shift(window)).abs()
df["fer"] = displacement / path_length.replace(0, 0.0001)
net_change = df["close"] - df["close"].shift(window)
df["fer_dir"] = np.sign(net_change) * df["fer"]
return df
# ===================================================================
# ALGORITHM 4: VOL-OF-VOL PULSE (VVP)
# 2nd-order volatility signal.
#
# When volatility ITSELF becomes volatile, a large directional
# move is imminent. This is the derivative of risk.
# ===================================================================
def _calc_vol_of_vol(self, df: DataFrame) -> DataFrame:
natr = df["atr"] / df["close"].replace(0, 1) * 100
vov = natr.rolling(20).std()
vov_mean = vov.rolling(100).mean().replace(0, 0.0001)
df["vvp"] = vov / vov_mean
price_mom = df["close"].pct_change(10)
df["vvp_dir"] = df["vvp"] * np.sign(price_mom)
return df
# ===================================================================
# ALGORITHM 5: MICROSTRUCTURE NOISE RATIO (MNR)
# Based on Andersen & Bollerslev realized volatility research.
#
# Parkinson volatility (H-L) captures intra-bar movement.
# Close-Close volatility only sees endpoints.
# Ratio reveals HIDDEN information flow within candles.
# High ratio = institutional HFT activity within bars.
# ===================================================================
def _calc_noise_ratio(self, df: DataFrame) -> DataFrame:
window = 30
cc_vol = df["close"].pct_change().rolling(window).std().replace(0, 1e-10)
log_hl = np.log(df["high"] / df["low"].replace(0, 0.0001))
parkinson_vol = np.sqrt(
(log_hl ** 2).rolling(window).mean() / (4 * np.log(2))
)
df["mnr"] = parkinson_vol / cc_vol
mnr_mean = df["mnr"].rolling(100).mean()
mnr_std = df["mnr"].rolling(100).std().replace(0, 0.0001)
df["mnr_z"] = (df["mnr"] - mnr_mean) / mnr_std
return df
# ===================================================================
# ALGORITHM 6: VOLUME SURPRISE INDEX (VSI)
# Based on Mandelbrot's insight: volatility/volume clusters.
#
# Models volume as log-normal distribution.
# Z-score in log space = "surprise" factor.
# Volume surprises predict price continuation.
# ===================================================================
def _calc_volume_surprise(self, df: DataFrame) -> DataFrame:
log_vol = np.log(df["volume"].replace(0, 1))
log_mean = log_vol.rolling(50).mean()
log_std = log_vol.rolling(50).std().replace(0, 0.0001)
df["vsi"] = (log_vol - log_mean) / log_std
price_dir = np.sign(df["close"] - df["open"])
df["vsi_dir"] = df["vsi"] * price_dir
df["vsi_cluster"] = (df["vsi"] > 1.0).astype(int).rolling(5).sum()
return df
# ===================================================================
# ALGORITHM 7: AUTOCORRELATION CASCADE (ACC)
# NOBODY else looks at the PATTERN of autocorrelation across lags.
#
# Computes return autocorrelation at Fibonacci lags: 1,2,3,5,8,13
# Positive cascade = momentum persistence (ride the trend)
# Negative cascade = mean-reversion (fade the move)
# The STRUCTURE of correlation across lags is the key insight.
# ===================================================================
def _calc_autocorrelation_cascade(self, df: DataFrame) -> DataFrame:
returns = df["close"].pct_change()
window = 50
lags = [1, 2, 3, 5, 8, 13]
cascade_sum = np.zeros(len(df))
for lag in lags:
shifted = returns.shift(lag)
corr = returns.rolling(window).corr(shifted)
cascade_sum += corr.fillna(0).values
df["acc"] = cascade_sum / len(lags)
df["acc_change"] = df["acc"] - df["acc"].shift(5)
return df
# ===================================================================
# ALGORITHM 8: ENTROPY PULSE DETECTOR (EPD)
# Shannon entropy applied to discretized price returns.
#
# High entropy = maximum uncertainty = market is RANDOM = AVOID
# Low entropy = market is PREDICTABLE = TRADE
# Entropy DROP = transition from chaos to order = ENTRY WINDOW
#
# This is information theory applied to markets — measures the
# "bits of surprise" per candle.
# ===================================================================
def _calc_entropy_pulse(self, df: DataFrame) -> DataFrame:
returns = df["close"].pct_change().values
window = 30
n_bins = 10
max_entropy = np.log2(n_bins)
entropy_values = np.full(len(returns), np.nan)
for i in range(window, len(returns)):
w = returns[i - window: i]
if np.any(np.isnan(w)):
continue
hist = np.histogram(w, bins=n_bins)[0]
total = hist.sum()
if total == 0:
continue
probs = hist / total
mask = probs > 0
entropy_values[i] = -np.sum(probs[mask] * np.log2(probs[mask]))
df["epd"] = entropy_values
df["epd_norm"] = df["epd"] / max_entropy
df["epd_drop"] = df["epd_norm"].shift(5) - df["epd_norm"]
return df
# ===================================================================
# STANDARD TECHNICAL INDICATORS
# ===================================================================
def _calc_standard(self, df: DataFrame) -> DataFrame:
df["rsi"] = ta.RSI(df, timeperiod=14)
macd = ta.MACD(df, fastperiod=12, slowperiod=26, signalperiod=9)
df["macd"] = macd["macd"]
df["macd_signal"] = macd["macdsignal"]
df["macd_hist"] = macd["macdhist"]
df["ema_8"] = ta.EMA(df, timeperiod=8)
df["ema_21"] = ta.EMA(df, timeperiod=21)
df["ema_50"] = ta.EMA(df, timeperiod=50)
df["ema_aligned"] = (
(df["ema_8"] > df["ema_21"]) & (df["ema_21"] > df["ema_50"])
).astype(int)
df["adx"] = ta.ADX(df, timeperiod=14)
df["atr"] = ta.ATR(df, timeperiod=14)
tp = (df["high"] + df["low"] + df["close"]) / 3
vwap_vol = df["volume"].rolling(50).sum().replace(0, 1)
df["vwap"] = (tp * df["volume"]).rolling(50).sum() / vwap_vol
return df
# ===================================================================
# POPULATE INDICATORS
# ===================================================================
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# Standard TA first (RSI, ADX, ATR needed by proprietary algos)
dataframe = self._calc_standard(dataframe)
# Multi-timeframe informative data
if self.dp:
inf_15m = self.dp.get_pair_dataframe(
pair=metadata["pair"], timeframe="15m"
)
if not inf_15m.empty:
inf_15m["rsi"] = ta.RSI(inf_15m, timeperiod=14)
inf_15m["ema_21"] = ta.EMA(inf_15m, timeperiod=21)
dataframe = merge_informative_pair(
dataframe, inf_15m, self.timeframe, "15m", ffill=True
)
inf_1h = self.dp.get_pair_dataframe(
pair=metadata["pair"], timeframe="1h"
)
if not inf_1h.empty:
inf_1h["rsi"] = ta.RSI(inf_1h, timeperiod=14)
inf_1h["ema_21"] = ta.EMA(inf_1h, timeperiod=21)
dataframe = merge_informative_pair(
dataframe, inf_1h, self.timeframe, "1h", ffill=True
)
# 8 Proprietary Algorithms — The Invisible Eye
dataframe = self._calc_vpin(dataframe)
dataframe = self._calc_variance_ratio(dataframe)
dataframe = self._calc_fractal_efficiency(dataframe)
dataframe = self._calc_vol_of_vol(dataframe)
dataframe = self._calc_noise_ratio(dataframe)
dataframe = self._calc_volume_surprise(dataframe)
dataframe = self._calc_autocorrelation_cascade(dataframe)
dataframe = self._calc_entropy_pulse(dataframe)
return dataframe
# ===================================================================
# ENTRY SCORING SYSTEM (max ~23 pts)
# Regime-adaptive: Variance Ratio determines mode
# ===================================================================
def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
score = np.zeros(len(dataframe))
is_trending = dataframe["vr_smooth"].fillna(1.0) > 1.0
is_meanrev = dataframe["vr_smooth"].fillna(1.0) < 1.0
# 1. VPIN: Informed buy pressure detected (3 pts)
score += np.where(
(dataframe["vpin"] > self.buy_vpin_min.value)
& (dataframe["vpin_dir"] > 0),
3, 0,
)
# 2. VARIANCE RATIO: Strong regime signal (2 pts)
score += np.where(
dataframe["vr_dev"] > self.buy_vr_dev.value, 2, 0
)
# 3A. FRACTAL EFFICIENCY: Clean up-trend — MOMENTUM MODE (3 pts)
score += np.where(
is_trending
& (dataframe["fer"] > self.buy_fer_min.value)
& (dataframe["fer_dir"] > 0),
3, 0,
)
# 3B. FRACTAL EFFICIENCY: Choppy+oversold — MEAN-REV MODE (3 pts)
if self.buy_allow_meanrev.value:
score += np.where(
is_meanrev
& (dataframe["fer"] < 0.3)
& (dataframe["rsi"] < 35),
3, 0,
)
# 4. VOL-OF-VOL: Volatility regime change + bullish (2 pts)
score += np.where(
(dataframe["vvp"] > self.buy_vvp_min.value)
& (dataframe["vvp_dir"] > 0),
2, 0,
)
# 5. MICROSTRUCTURE NOISE: Institutional activity (2 pts)
score += np.where(
dataframe["mnr"] > self.buy_mnr_min.value, 2, 0
)
# 6. VOLUME SURPRISE: Anomalous buy volume (2 pts)
score += np.where(
(dataframe["vsi"] > self.buy_vsi_min.value)
& (dataframe["vsi_dir"] > 0),
2, 0,
)
# 7. AUTOCORRELATION: Momentum persistence (2 pts)
score += np.where(
dataframe["acc"] > self.buy_acc_min.value, 2, 0
)
# 8. ENTROPY: Market becoming predictable (2 pts)
score += np.where(
dataframe["epd_norm"].fillna(1.0) < self.buy_epd_max.value, 2, 0
)
# BONUS: RSI in zone (1 pt)
score += np.where(
(dataframe["rsi"] > self.buy_rsi_min.value)
& (dataframe["rsi"] < self.buy_rsi_max.value),
1, 0,
)
# BONUS: MACD bullish (1 pt)
score += np.where(dataframe["macd_hist"] > 0, 1, 0)
# BONUS: Price above VWAP (1 pt)
score += np.where(dataframe["close"] > dataframe["vwap"], 1, 0)
# BONUS: VPIN accelerating (1 pt)
score += np.where(dataframe["vpin_accel"] > 0.05, 1, 0)
# CONDITIONAL: EMA alignment (1 pt)
if self.buy_ema_filter.value:
score += np.where(dataframe["ema_aligned"] == 1, 1, 0)
# === ENTRY SIGNAL ===
dataframe.loc[
(score >= self.buy_score_min.value) & (dataframe["volume"] > 0),
"enter_long",
] = 1
return dataframe
# ===================================================================
# EXIT SCORING SYSTEM (max ~14 pts)
# ===================================================================
def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
score = np.zeros(len(dataframe))
# 1. VPIN sell pressure (2 pts)
score += np.where(
(dataframe["vpin"] > 0.4) & (dataframe["vpin_dir"] < -0.1),
2, 0,
)
# 2. Entropy spike — chaos returning (2 pts)
score += np.where(
dataframe["epd_norm"].fillna(0) > 0.9, 2, 0
)
# 3. Volume surprise sell-side (2 pts)
score += np.where(
(dataframe["vsi"] > 1.5) & (dataframe["vsi_dir"] < 0),
2, 0,
)
# 4. Autocorrelation turning negative — momentum dying (2 pts)
score += np.where(dataframe["acc"] < -0.1, 2, 0)
# 5. RSI overbought (1 pt)
score += np.where(dataframe["rsi"] > self.sell_rsi_exit.value, 1, 0)
# 6. MACD bearish (1 pt)
score += np.where(dataframe["macd_hist"] < 0, 1, 0)
# 7. Fractal Efficiency flip bearish (2 pts)
score += np.where(dataframe["fer_dir"] < -0.3, 2, 0)
# === EXIT SIGNAL ===
dataframe.loc[
(score >= self.sell_score_min.value) & (dataframe["volume"] > 0),
"exit_long",
] = 1
return dataframe
# ===================================================================
# CUSTOM STOPLOSS — ATR-Based Dynamic + Progressive R-Multiple Trail
# ===================================================================
def custom_stoploss(
self,
pair: str,
trade: Trade,
current_time,
current_rate: float,
current_profit: float,
**kwargs,
) -> float:
dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)
if len(dataframe) == 0:
return -0.10
last = dataframe.iloc[-1]
atr = last.get("atr", 0)
if atr == 0 or trade.open_rate == 0:
return -0.10
stop_dist_pct = (atr * self.buy_stop_atr.value) / trade.open_rate
# Progressive R-multiple trailing
if stop_dist_pct > 0 and current_profit > 0:
r_mult = current_profit / stop_dist_pct
if r_mult >= 2.5:
# Lock 1.5R profit — tight trail to TP
return stoploss_from_open(
1.5 * stop_dist_pct, current_profit, is_short=False
)
elif r_mult >= 1.5:
# Lock 0.5R profit
return stoploss_from_open(
0.5 * stop_dist_pct, current_profit, is_short=False
)
elif r_mult >= 1.0:
# Break-even
return stoploss_from_open(
0.001, current_profit, is_short=False
)
# Default: ATR-based dynamic stop
return max(-stop_dist_pct, -0.10)
# ===================================================================
# CUSTOM EXIT — R:R Target + Time Management
# ===================================================================
def custom_exit(
self,
pair: str,
trade: Trade,
current_time,
current_rate: float,
current_profit: float,
**kwargs,
) -> Optional[str]:
dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)
if len(dataframe) == 0:
return None
last = dataframe.iloc[-1]
atr = last.get("atr", 0)
if atr == 0 or trade.open_rate == 0:
return None
stop_dist_pct = (atr * self.buy_stop_atr.value) / trade.open_rate
target_pct = stop_dist_pct * self.buy_rr_target.value
# R:R target hit — TAKE PROFIT
if current_profit >= target_pct:
return "nexus_tp_target"
# Time management
hours = (current_time - trade.open_date_utc).total_seconds() / 3600
max_hours = self.buy_max_hours.value
# After max_hours: exit if profitable
if hours > max_hours and current_profit > 0.003:
return "nexus_time_profit"
# Hard time limit: 2x max_hours
if hours > max_hours * 2:
return "nexus_time_force"
return None