Compare commits
No commits in common. "e248f214d717aad518a306b1428a2d1c6a7c9387" and "da50b667da622366e8bdf724679bae9df4893496" have entirely different histories.
e248f214d7
...
da50b667da
49 changed files with 2 additions and 1753 deletions
|
|
@ -1 +1,2 @@
|
|||
# Forex-Bot
|
||||
# Forex-Bot
|
||||
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
0
env/trading_env.py
vendored
0
env/trading_env.py
vendored
|
|
@ -1,104 +0,0 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
# Load 2022 EURUSD 1m data
|
||||
df = pd.read_excel("data/DAT_XLSX_EURUSD_M1_2021.xlsx", header=None, names=[
|
||||
'timestamp', 'open', 'high', 'low', 'close', 'volume'
|
||||
])
|
||||
df['timestamp'] = pd.to_datetime(df['timestamp'])
|
||||
df.set_index('timestamp', inplace=True)
|
||||
|
||||
# Resample to 15-minute intervals
|
||||
df = df.resample('15min').agg({
|
||||
'open': 'first',
|
||||
'high': 'max',
|
||||
'low': 'min',
|
||||
'close': 'last',
|
||||
'volume': 'sum'
|
||||
}).dropna()
|
||||
|
||||
# Create perfect actions
|
||||
lookahead = 4 # ~1 hour
|
||||
profit_threshold = 0.001
|
||||
loss_threshold = 0.001
|
||||
|
||||
labels = []
|
||||
for i in range(len(df) - lookahead):
|
||||
current_close = df.iloc[i]['close']
|
||||
future_window = df.iloc[i + 1:i + 1 + lookahead]
|
||||
max_future_price = future_window['high'].max()
|
||||
min_future_price = future_window['low'].min()
|
||||
|
||||
if max_future_price >= current_close * (1 + profit_threshold):
|
||||
labels.append(1)
|
||||
elif min_future_price <= current_close * (1 - loss_threshold):
|
||||
labels.append(-1)
|
||||
else:
|
||||
labels.append(0)
|
||||
|
||||
labels += [0] * lookahead
|
||||
df['perfect_action'] = labels
|
||||
|
||||
# Simulate perfect trader
|
||||
balance = 10000.0
|
||||
position = 0.0
|
||||
entry_price = 0.0
|
||||
win_count = 0
|
||||
loss_count = 0
|
||||
trades_count = 0
|
||||
trades = []
|
||||
|
||||
for i in range(len(df)):
|
||||
price = df.iloc[i]['close']
|
||||
action = df.iloc[i]['perfect_action']
|
||||
|
||||
if action == 1 and position == 0:
|
||||
position = balance / price
|
||||
entry_price = price
|
||||
trades.append({
|
||||
"timestamp": df.index[i],
|
||||
"action": "buy",
|
||||
"price": price,
|
||||
"balance": balance
|
||||
})
|
||||
balance = 0
|
||||
trades_count += 1
|
||||
|
||||
elif action == -1 and position > 0:
|
||||
exit_value = position * price
|
||||
pnl = exit_value - (position * entry_price)
|
||||
if pnl > 0:
|
||||
win_count += 1
|
||||
else:
|
||||
loss_count += 1
|
||||
balance = exit_value
|
||||
trades.append({
|
||||
"timestamp": df.index[i],
|
||||
"action": "sell",
|
||||
"price": price,
|
||||
"balance": balance
|
||||
})
|
||||
position = 0
|
||||
entry_price = 0
|
||||
|
||||
# Finalize any open position
|
||||
if position > 0:
|
||||
balance = position * df.iloc[-1]['close']
|
||||
|
||||
# Results
|
||||
print("\n📊 Perfect Trader Simulation:")
|
||||
print(f"🟢 Starting Balance: $10,000.00")
|
||||
print(f"🔚 Ending Balance: ${balance:,.2f}")
|
||||
print(f"💼 Total Trades: {trades_count}")
|
||||
print(f"✅ Wins: {win_count} | ❌ Losses: {loss_count}")
|
||||
if trades_count > 0:
|
||||
print(f"📈 Win Rate: {win_count / trades_count * 100:.2f}%")
|
||||
|
||||
# Save trade log
|
||||
output_path = "ml/perfect_trades.csv"
|
||||
os.makedirs("ml", exist_ok=True)
|
||||
|
||||
trades_df = pd.DataFrame(trades)
|
||||
trades_df.to_csv(output_path, index=False)
|
||||
print(f"📁 Perfect trades saved to: {output_path}")
|
||||
8
main.py
8
main.py
|
|
@ -1,8 +0,0 @@
|
|||
from backtest import run_backtest
|
||||
from ai_backtest import run_ai_backtest
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# run_backtest() # ← SMA strategy
|
||||
run_ai_backtest() # ← AI model strategy
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -1,19 +0,0 @@
|
|||
open
|
||||
high
|
||||
low
|
||||
close
|
||||
volume
|
||||
sma_10
|
||||
sma_30
|
||||
rsi_14
|
||||
momentum
|
||||
price_delta
|
||||
vol_rolling
|
||||
bollinger_b
|
||||
macd
|
||||
day_of_week
|
||||
hour
|
||||
minute
|
||||
sim_balance
|
||||
buy_amount
|
||||
pnl
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -1,16 +0,0 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
class ForexMLP(nn.Module):
|
||||
def __init__(self, input_size):
|
||||
super(ForexMLP, self).__init__()
|
||||
self.fc1 = nn.Linear(input_size, 64)
|
||||
self.fc2 = nn.Linear(64, 32)
|
||||
self.out = nn.Linear(32, 2) # [buy_score, sell_score]
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = torch.sigmoid(self.out(x)) # We want probabilities between 0-1
|
||||
return x
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,65 +0,0 @@
|
|||
# ml/preprocessing.py
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
def load_and_preprocess_data(path):
|
||||
df = pd.read_excel(path, header=None, names=[
|
||||
'timestamp', 'open', 'high', 'low', 'close', 'volume'
|
||||
])
|
||||
df['timestamp'] = pd.to_datetime(df['timestamp'])
|
||||
df.set_index('timestamp', inplace=True)
|
||||
|
||||
# Resample to 15-minute intervals
|
||||
df = df.resample('15min').agg({
|
||||
'open': 'first',
|
||||
'high': 'max',
|
||||
'low': 'min',
|
||||
'close': 'last',
|
||||
'volume': 'sum'
|
||||
}).dropna()
|
||||
|
||||
# Add features
|
||||
df['sma_10'] = df['close'].rolling(10).mean()
|
||||
df['sma_30'] = df['close'].rolling(30).mean()
|
||||
df['rsi_14'] = 100 - (100 / (1 + df['close'].pct_change().add(1).rolling(14).mean()))
|
||||
df['momentum'] = df['close'] - df['close'].shift(4)
|
||||
df['price_delta'] = df['close'] - df['open']
|
||||
df['vol_rolling'] = df['volume'].rolling(10).mean()
|
||||
|
||||
# Bollinger %B
|
||||
rolling_mean = df['close'].rolling(20).mean()
|
||||
rolling_std = df['close'].rolling(20).std()
|
||||
df['bollinger_b'] = (df['close'] - rolling_mean) / (2 * rolling_std)
|
||||
|
||||
# MACD
|
||||
ema12 = df['close'].ewm(span=12, adjust=False).mean()
|
||||
ema26 = df['close'].ewm(span=26, adjust=False).mean()
|
||||
df['macd'] = ema12 - ema26
|
||||
|
||||
# Timestamp-based features
|
||||
df['hour'] = df.index.hour
|
||||
df['weekday'] = df.index.weekday
|
||||
|
||||
# Simulated portfolio balance and buy-in value (placeholders for now)
|
||||
df['balance'] = 10000.0 # Placeholder: could be dynamic in real-time
|
||||
df['buy_in'] = df['close'].shift(1) # Simulated buy price
|
||||
df['pnl_per_trade'] = df['close'] - df['buy_in'] # Fake PnL calc
|
||||
|
||||
# Target: Will price rise X% in next N intervals?
|
||||
future_window = 4
|
||||
threshold = 0.001
|
||||
df['future_max'] = df['close'].shift(-future_window).rolling(future_window).max()
|
||||
df['target'] = np.where(df['future_max'] > df['close'] * (1 + threshold), 1, 0)
|
||||
|
||||
df.dropna(inplace=True)
|
||||
|
||||
# Define feature set
|
||||
features = [
|
||||
'open', 'high', 'low', 'close', 'volume',
|
||||
'sma_10', 'sma_30', 'rsi_14', 'momentum',
|
||||
'price_delta', 'vol_rolling', 'bollinger_b', 'macd',
|
||||
'hour', 'weekday', 'balance', 'buy_in', 'pnl_per_trade'
|
||||
]
|
||||
|
||||
return df[features], df['target']
|
||||
BIN
ml/scaler.pkl
BIN
ml/scaler.pkl
Binary file not shown.
106
ml/train_mlp.py
106
ml/train_mlp.py
|
|
@ -1,106 +0,0 @@
|
|||
# ml/train_mlp.py
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.utils.data import DataLoader, TensorDataset
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.metrics import classification_report
|
||||
from ml.models.forex_mlp import ForexMLP
|
||||
from ml.preprocessing import load_and_preprocess_data
|
||||
import numpy as np
|
||||
import joblib
|
||||
import pandas as pd
|
||||
|
||||
|
||||
# Load + preprocess
|
||||
features_df, labels = load_and_preprocess_data("data/DAT_XLSX_EURUSD_M1_2021.xlsx")
|
||||
|
||||
# Encode targets into two-element vectors: [Buy, Sell]
|
||||
def encode_targets(labels):
|
||||
encoded = []
|
||||
for val in labels:
|
||||
if val == 1:
|
||||
encoded.append([1, 0])
|
||||
elif val == -1:
|
||||
encoded.append([0, 1])
|
||||
else:
|
||||
encoded.append([0, 0])
|
||||
return torch.tensor(encoded, dtype=torch.float32)
|
||||
|
||||
# Train-test split
|
||||
X_train, X_test, y_train, y_test = train_test_split(features_df, labels, test_size=0.3, shuffle=False)
|
||||
|
||||
# Scale
|
||||
scaler = StandardScaler()
|
||||
X_train_scaled = scaler.fit_transform(X_train)
|
||||
X_test_scaled = scaler.transform(X_test)
|
||||
|
||||
# Save scaler
|
||||
# 🟢 Convert to DataFrame for easier manipulation
|
||||
train_df = X_train.copy()
|
||||
train_df['target'] = y_train.values
|
||||
|
||||
# 🧪 Split into minority and majority
|
||||
minority = train_df[train_df['target'] == 1]
|
||||
majority = train_df[train_df['target'] == 0]
|
||||
|
||||
# 🔁 Oversample minority class to match majority count
|
||||
minority_oversampled = minority.sample(len(majority), replace=True, random_state=42)
|
||||
|
||||
# 🔄 Combine + shuffle
|
||||
balanced_df = pd.concat([majority, minority_oversampled]).sample(frac=1, random_state=42)
|
||||
|
||||
# ✅ Re-split into features and labels
|
||||
X_balanced = balanced_df.drop(columns=['target'])
|
||||
y_balanced = balanced_df['target']
|
||||
|
||||
# 🔢 Scale
|
||||
scaler = StandardScaler()
|
||||
X_scaled = scaler.fit_transform(X_balanced)
|
||||
X_test_scaled = scaler.transform(X_test) # Use the original test set!
|
||||
|
||||
# 💾 Save scaler
|
||||
os.makedirs("ml", exist_ok=True)
|
||||
joblib.dump(scaler, "ml/scaler.pkl")
|
||||
|
||||
# 📦 Wrap in TensorDataset with encoded 2D targets
|
||||
y_encoded = encode_targets(y_balanced.values)
|
||||
train_dataset = TensorDataset(torch.tensor(X_scaled, dtype=torch.float32), y_encoded)
|
||||
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
|
||||
|
||||
|
||||
# Init model
|
||||
model = ForexMLP(input_size=X_train.shape[1])
|
||||
criterion = nn.BCELoss()
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
||||
|
||||
# Training loop
|
||||
epochs = 100
|
||||
for epoch in range(epochs):
|
||||
total_loss = 0.0
|
||||
for inputs, targets in train_loader:
|
||||
optimizer.zero_grad()
|
||||
outputs = model(inputs)
|
||||
loss = criterion(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
total_loss += loss.item()
|
||||
print(f"📉 Epoch {epoch + 1} | Loss: {total_loss / len(train_loader):.4f}")
|
||||
|
||||
# Save model
|
||||
torch.save(model.state_dict(), "ml/models/forex_mlp.pt")
|
||||
print("✅ Trained model saved to ml/models/forex_mlp.pt")
|
||||
|
||||
# Evaluation
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
preds = model(torch.tensor(X_test_scaled, dtype=torch.float32))
|
||||
preds_bin = (preds > 0.5).int()
|
||||
y_test_encoded = encode_targets(y_test.values).int()
|
||||
print("📊 Evaluation Report:")
|
||||
print(classification_report(y_test_encoded, preds_bin))
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
backtrader
|
||||
pandas
|
||||
# numpy==2.2.4
|
||||
numpy<2
|
||||
matplotlib
|
||||
requests
|
||||
openpyxl
|
||||
ta
|
||||
scikit-learn
|
||||
xgboost
|
||||
joblib
|
||||
matplotlib
|
||||
seaborn
|
||||
torch==2.1.0
|
||||
torchvision==0.16.0
|
||||
torchaudio==2.1.0
|
||||
scikit-learn
|
||||
pandas
|
||||
matplotlib
|
||||
seaborn
|
||||
tqdm
|
||||
tensorboard
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
import backtrader as bt
|
||||
import torch
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from ml.models.forex_mlp import ForexMLP
|
||||
|
||||
class PyTorchAIModel(bt.Strategy):
|
||||
def __init__(self):
|
||||
self.model = ForexMLP()
|
||||
self.model.load_state_dict(torch.load("ml/models/forex_mlp.pt", map_location=torch.device("cpu")))
|
||||
self.model.eval()
|
||||
|
||||
self.buy_threshold = 0.7
|
||||
self.sell_threshold = 0.6
|
||||
|
||||
def next(self):
|
||||
# Skip early bars (for indicators if you add them)
|
||||
if len(self.datas[0]) < 30:
|
||||
return
|
||||
|
||||
# Create feature vector for the current candle
|
||||
features = np.array([[
|
||||
self.data.open[0],
|
||||
self.data.high[0],
|
||||
self.data.low[0],
|
||||
self.data.close[0],
|
||||
self.data.volume[0]
|
||||
]], dtype=np.float32)
|
||||
|
||||
inputs = torch.tensor(features)
|
||||
with torch.no_grad():
|
||||
output = self.model(inputs)
|
||||
buy_score, sell_score = output[0].numpy()
|
||||
|
||||
print(f"[AI] Buy: {buy_score:.3f}, Sell: {sell_score:.3f}")
|
||||
|
||||
# Trade logic
|
||||
if buy_score > self.buy_threshold and not self.position:
|
||||
self.buy()
|
||||
elif sell_score > self.sell_threshold and self.position:
|
||||
self.close()
|
||||
Loading…
Reference in a new issue