import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score

# Load historical OHLCV data
data = pd.read_csv('historical_data.csv')

# Preprocess data
data = data.dropna()
data['log_return'] = np.log(data['Close'] / data['Close'].shift(1))
data['rsi'] = (14 + np.arange(len(data))) / (14 + np.arange(len(data))) * data['log_return'].rolling(window=14).mean()
data['upper_band'], data['middle_band'], data['lower_band'] = data['Close'].rolling(window=20).apply(lambda x: np.percentile(x, 80), raw=True), data['Close'].rolling(window=20).mean(), data['Close'].rolling(window=20).apply(lambda x: np.percentile(x, 20), raw=True)
data['target'] = np.sign(data['Close'] - data['Close'].shift(1))

# Feature engineering
data['ma_50'] = data['Close'].rolling(window=50).mean()
data['ma_200'] = data['Close'].rolling(window=200).mean()

# Model training
X = data.drop(columns=['Close', 'target'])
y = data['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)

# Model evaluation
y_pred = model.predict(X_test)
print('Accuracy:', accuracy_score(y_test, y_pred))
print('Precision:', precision_score(y_test, y_pred))
print('Recall:', recall_score(y_test, y_pred))
print('F1-score:', f1_score(y_test, y_pred))

# Prediction
next_candle_stick_color = model.predict(X_test.iloc[-1].values.reshape(1, -1)))
print('Next candle stick color:', 'up' if next_candle_stick_color == 1 else 'down')
Technical Indicators

Anche su:

Declinazione di responsabilità