OSIRIS PROMETHEUS — Binary Information Physics Engine.
Timeframe
5m
Direction
Long Only
Stoploss
-0.8%
Trailing Stop
Yes
ROI
0m: 1.5%, 15m: 1.0%, 30m: 0.6%, 60m: 0.3%
Interface Version
3
Startup Candles
50
Indicators
3
freqtrade/freqtrade-strategies
Strategy 003 author@: Gerald Lonlas github@: https://github.com/freqtrade/freqtrade-strategies
# pragma pylint: disable=missing-docstring, invalid-name, pointless-string-statement
"""
OSIRIS PROMETHEUS — Binary Information Physics Strategy
========================================================
A COMPLETELY NOVEL trading strategy that uses ZERO traditional indicators.
Every signal is derived from:
1. Binary N-gram Pattern Matching (mined from raw OHLCV)
2. Bayesian Consensus across 4 independent encoding schemes
3. Information Entropy Gradient (order emerging from chaos)
4. Kolmogorov-proxy Compressibility (regime detection)
The strategy loads a pattern library mined by osiris_prometheus_miner.py
and matches current market states against historically profitable patterns.
Target: 80% WR | 2-3:1 R:R | 10+ trades/day
"""
import numpy as np
import pandas as pd
import pickle
import lzma
from pathlib import Path
from typing import Dict
from datetime import datetime
from freqtrade.strategy.interface import IStrategy
from freqtrade.strategy import IntParameter, DecimalParameter
# ╔══════════════════════════════════════════════════════════════════╗
# ║ BINARY ENCODING FUNCTIONS ║
# ║ Identical to miner. Pure raw OHLCV → binary state. ║
# ║ NO moving averages. NO RSI. NO MACD. NO Bollinger. ║
# ╚══════════════════════════════════════════════════════════════════╝
def encode_2bit(close, open_, high, low, volume):
"""2-bit: direction + momentum → 4 states."""
n = len(close)
b0 = (close > open_).astype(np.uint8)
b1 = np.zeros(n, dtype=np.uint8)
b1[1:] = (close[1:] > close[:-1]).astype(np.uint8)
return b0 | (b1 << 1)
def encode_4bit(close, open_, high, low, volume):
"""4-bit: direction + momentum + vol_change + volume_change → 16 states."""
n = len(close)
rng = high - low
b0 = (close > open_).astype(np.uint8)
b1 = np.zeros(n, dtype=np.uint8)
b1[1:] = (close[1:] > close[:-1]).astype(np.uint8)
b2 = np.zeros(n, dtype=np.uint8)
b2[1:] = (rng[1:] > rng[:-1]).astype(np.uint8)
b3 = np.zeros(n, dtype=np.uint8)
b3[1:] = (volume[1:] > volume[:-1]).astype(np.uint8)
return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3)
def encode_6bit(close, open_, high, low, volume):
"""6-bit: direction + momentum + full_higher + close_pos + range_exp + vol → 64 states."""
n = len(close)
rng = high - low
b0 = (close > open_).astype(np.uint8)
b1 = np.zeros(n, dtype=np.uint8)
b1[1:] = (close[1:] > close[:-1]).astype(np.uint8)
b2 = np.zeros(n, dtype=np.uint8)
b2[1:] = ((high[1:] > high[:-1]) & (low[1:] > low[:-1])).astype(np.uint8)
b3 = ((close - low) > (high - close)).astype(np.uint8)
b4 = np.zeros(n, dtype=np.uint8)
b4[1:] = (rng[1:] > rng[:-1]).astype(np.uint8)
b5 = np.zeros(n, dtype=np.uint8)
b5[1:] = (volume[1:] > volume[:-1]).astype(np.uint8)
return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3) | (b4 << 4) | (b5 << 5)
def encode_8bit(close, open_, high, low, volume):
"""8-bit: all features → 256 states."""
n = len(close)
rng = high - low
body = np.abs(close - open_)
safe_rng = np.where(rng > 0, rng, 1e-10)
b0 = (close > open_).astype(np.uint8)
b1 = np.zeros(n, dtype=np.uint8)
b1[1:] = (close[1:] > close[:-1]).astype(np.uint8)
b2 = np.zeros(n, dtype=np.uint8)
b2[1:] = (rng[1:] > rng[:-1]).astype(np.uint8)
b3 = np.zeros(n, dtype=np.uint8)
b3[1:] = (volume[1:] > volume[:-1]).astype(np.uint8)
b4 = ((close - low) > (high - close)).astype(np.uint8)
b5 = np.zeros(n, dtype=np.uint8)
b5[1:] = (high[1:] > high[:-1]).astype(np.uint8)
b6 = np.zeros(n, dtype=np.uint8)
b6[1:] = (low[1:] > low[:-1]).astype(np.uint8)
b7 = (body > 0.5 * safe_rng).astype(np.uint8)
return (
b0 | (b1 << 1) | (b2 << 2) | (b3 << 3)
| (b4 << 4) | (b5 << 5) | (b6 << 6) | (b7 << 7)
)
def ngram_key(states, start, n):
"""Convert N consecutive states into a single integer key."""
key = 0
for k in range(n):
key = key * 256 + int(states[start + k])
return key
def rolling_binary_entropy(states, window=30):
"""Shannon entropy of binary state distribution in a sliding window."""
n = len(states)
n_states = int(states.max()) + 1
entropy = np.full(n, np.nan)
for i in range(window, n):
w = states[i - window : i]
counts = np.bincount(w, minlength=n_states).astype(np.float64)
probs = counts / window
probs = probs[probs > 0]
H = -np.sum(probs * np.log2(probs))
max_H = np.log2(n_states)
entropy[i] = H / max_H if max_H > 0 else 0
return entropy
# ╔══════════════════════════════════════════════════════════════════╗
# ║ THE STRATEGY ║
# ╚══════════════════════════════════════════════════════════════════╝
class OsirisPrometheusStrategy(IStrategy):
"""
OSIRIS PROMETHEUS — Binary Information Physics Engine.
Uses pattern matching across 4 independent binary encoding schemes
with Bayesian consensus to achieve high win-rate entries.
"""
INTERFACE_VERSION = 3
can_short = False
timeframe = "5m"
# ── Hyperopt Parameters ──
# Consensus scoring
buy_score_min = DecimalParameter(0.1, 5.0, default=0.3, decimals=1, space="buy")
buy_diversity_min = IntParameter(1, 4, default=1, space="buy")
buy_max_wr_min = DecimalParameter(0.55, 0.90, default=0.65, decimals=2, space="buy")
buy_count_min = IntParameter(1, 8, default=1, space="buy")
# Entropy filter
buy_entropy_max = DecimalParameter(0.50, 0.98, default=0.95, decimals=2, space="buy")
# Stoploss: ~1R for 5m crypto (0.3-0.8%)
stoploss = -0.008
# Trailing stop: lock profits after 1R move
trailing_stop = True
trailing_stop_positive = 0.003
trailing_stop_positive_offset = 0.006
trailing_only_offset_is_reached = True
# ROI: time-based take profit (2-3R targets)
# R ≈ 0.3-0.5% → 2R ≈ 0.6-1.0% → 3R ≈ 0.9-1.5%
minimal_roi = {
"0": 0.015, # 1.5% = ~3R (best case)
"15": 0.010, # 1.0% = ~2R (after 15 min)
"30": 0.006, # 0.6% = ~1.5R (after 30 min)
"60": 0.003, # 0.3% = ~1R (after 1 hour)
"120": 0.001, # Near breakeven (2 hours)
"240": 0, # Breakeven at 4 hours
}
use_custom_stoploss = False
# Protections
process_only_new_candles = True
startup_candle_count = 50
# ── Pattern Library ──
_pattern_lib = None
_meta = None
_thresholds = None
_best_r_mult = 2.0
def __init__(self, config: dict) -> None:
super().__init__(config)
self._load_pattern_library()
def _load_pattern_library(self):
"""Load mined pattern library from pickle."""
pkl_path = Path(__file__).parent / "prometheus_patterns.pkl"
if not pkl_path.exists():
raise FileNotFoundError(
f"Pattern library not found: {pkl_path}. "
"Run osiris_prometheus_miner.py first."
)
with open(pkl_path, "rb") as f:
data = pickle.load(f)
self._pattern_lib = data["library"]
self._meta = data.get("meta", {})
self._thresholds = data.get("thresholds", {})
self._best_r_mult = self._meta.get("best_r_mult", 2.0)
# Count total patterns
total = sum(
len(patterns)
for enc in self._pattern_lib.values()
for patterns in enc.values()
)
self._total_patterns = total
print(f"PROMETHEUS: Loaded {total} patterns (R:R={self._best_r_mult}:1)")
# ── ENCODING CONFIG ──
# Each encoding has: name, encoder function, N-gram lengths to check
ENCODING_CONFIG = [
("2bit", encode_2bit, [5, 6, 7, 8]),
("4bit", encode_4bit, [3, 4, 5, 6]),
("6bit", encode_6bit, [3, 4, 5]),
("8bit", encode_8bit, [2, 3, 4]),
]
def populate_indicators(self, dataframe: pd.DataFrame, metadata: dict) -> pd.DataFrame:
"""
Compute binary encodings, pattern consensus scores, and
information physics metrics. ZERO traditional indicators.
"""
close = dataframe["close"].values.astype(np.float64)
open_ = dataframe["open"].values.astype(np.float64)
high = dataframe["high"].values.astype(np.float64)
low = dataframe["low"].values.astype(np.float64)
volume = dataframe["volume"].values.astype(np.float64)
n_candles = len(dataframe)
# ── Compute binary states (vectorized) ──
all_states = {
"2bit": encode_2bit(close, open_, high, low, volume),
"4bit": encode_4bit(close, open_, high, low, volume),
"6bit": encode_6bit(close, open_, high, low, volume),
"8bit": encode_8bit(close, open_, high, low, volume),
}
# ── Pattern matching with Bayesian consensus ──
total_score = np.zeros(n_candles)
match_count = np.zeros(n_candles, dtype=np.int32)
max_wr = np.zeros(n_candles)
enc_diversity = np.zeros(n_candles, dtype=np.int32)
# Track which encodings matched per candle
enc_matched = np.zeros((n_candles, 4), dtype=bool) # 4 encodings
base_wr = 0.335 # Baseline WR at 2:1 R:R
for enc_idx, (enc_name, _encoder, n_values) in enumerate(self.ENCODING_CONFIG):
enc_patterns = self._pattern_lib.get(enc_name, {})
states = all_states[enc_name]
for n in n_values:
n_patterns = enc_patterns.get(n, enc_patterns.get(str(n), {}))
if not n_patterns:
continue
for i in range(n - 1, n_candles):
key = ngram_key(states, i - n + 1, n)
if key in n_patterns:
p = n_patterns[key]
wr = p.get("combined_wr", p.get("wr", 0.5))
total = p.get("combined_total", p.get("total", 1))
# Score: edge over baseline × reliability weight
edge = wr - base_wr
weight = np.sqrt(total)
total_score[i] += edge * weight
match_count[i] += 1
max_wr[i] = max(max_wr[i], wr)
enc_matched[i, enc_idx] = True
# Compute encoding diversity
enc_diversity = enc_matched.sum(axis=1)
dataframe["prom_score"] = total_score
dataframe["prom_count"] = match_count
dataframe["prom_max_wr"] = max_wr
dataframe["prom_diversity"] = enc_diversity
# ── Information Entropy Gradient ──
# Uses 4-bit encoding (most balanced state count)
ent = rolling_binary_entropy(all_states["4bit"], window=30)
dataframe["binary_entropy"] = ent
# Entropy gradient: negative = order forming from chaos
ent_series = pd.Series(ent)
dataframe["entropy_grad"] = ent_series - ent_series.shift(5)
# ── R unit: median candle range (NOT a traditional indicator) ──
rng = high - low
dataframe["median_r"] = pd.Series(rng).rolling(20).median()
dataframe["median_r"] = dataframe["median_r"].bfill()
dataframe["r_pct"] = dataframe["median_r"] / close
# ── Diagnostics ──
has_match = match_count > 0
entry_sigs = (
(total_score >= self.buy_score_min.value)
& (enc_matched.sum(axis=1) >= self.buy_diversity_min.value)
& (max_wr >= self.buy_max_wr_min.value)
& (match_count >= self.buy_count_min.value)
)
pair = metadata.get('pair', '?')
print(
f" {pair}: {has_match.sum():,} candles with matches, "
f"{entry_sigs.sum():,} entry signals, "
f"avg_score={total_score[has_match].mean():.2f} "
f"avg_wr={max_wr[has_match].mean():.3f} "
f"avg_count={match_count[has_match].mean():.1f} "
f"avg_diversity={enc_matched.sum(axis=1)[has_match].mean():.1f}"
)
return dataframe
def populate_entry_trend(self, dataframe: pd.DataFrame, metadata: dict) -> pd.DataFrame:
"""
Enter long when Bayesian consensus is high enough.
Requires multiple independent encoding schemes to agree.
"""
dataframe.loc[
(dataframe["prom_score"] >= self.buy_score_min.value)
& (dataframe["prom_diversity"] >= self.buy_diversity_min.value)
& (dataframe["prom_max_wr"] >= self.buy_max_wr_min.value)
& (dataframe["prom_count"] >= self.buy_count_min.value)
& (dataframe["binary_entropy"] <= self.buy_entropy_max.value)
& (dataframe["volume"] > 0),
"enter_long",
] = 1
return dataframe
def populate_exit_trend(self, dataframe: pd.DataFrame, metadata: dict) -> pd.DataFrame:
"""Exits handled by trailing stop + ROI table."""
return dataframe