Compare commits

5 Commits

Author SHA1 Message Date
fa46ed472f designed a test which works! 2024-03-19 13:29:08 +01:00
fffd4b9e58 fixed some hierarchial stuff 2024-03-16 20:45:20 +01:00
8d8d102a14 moved things to a new place ! 2024-03-15 20:15:46 +01:00
5f8cea85ce added notebooks 2024-03-15 20:13:43 +01:00
4c657bb998 finalized docker infrastructure 2023-12-28 22:28:11 +02:00
47 changed files with 102698 additions and 243 deletions

0
docker-compose.yml Normal file
View File

21
dockerfiles/Dockerfile Normal file
View File

@@ -0,0 +1,21 @@
FROM python:3.11-buster
RUN curl -sSL https://install.python-poetry.org | POETRY_VERSION=1.7.1 python3 -
ENV POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_IN_PROJECT=1 \
POETRY_VIRTUALENVS_CREATE=1 \
POETRY_CACHE_DIR=/tmp/poetry_cache
WORKDIR /app
COPY pyproject.toml poetry.lock /app/
COPY market_trade /app/market_trade/
COPY tools /app/tools
RUN mkdir --parents --mode 0700 ~/.ssh && ssh-keyscan github.com >> ~/.ssh/known_hosts
RUN --mount=type=ssh --mount=type=cache,target=$POETRY_CACHE_DIR $HOME/.local/bin/poetry install --without dev
ENV VIRTUAL_ENV=/app/.venv \
PATH="/app/.venv/bin:$PATH"

View File

@@ -1,23 +1,3 @@
FROM python:3.11-buster
FROM registry.karmaxplan.ru/market_trade:0.2.0
RUN curl -sSL https://install.python-poetry.org | POETRY_VERSION=1.7.1 python3 -
ENV POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_IN_PROJECT=1 \
POETRY_VIRTUALENVS_CREATE=1 \
POETRY_CACHE_DIR=/tmp/poetry_cache
WORKDIR /app
COPY pyproject.toml poetry.lock /app/
COPY market_trade /app/market_trade/
COPY tools /app/tools
RUN mkdir --parents --mode 0700 ~/.ssh && ssh-keyscan github.com >> ~/.ssh/known_hosts
RUN --mount=type=ssh --mount=type=cache,target=$POETRY_CACHE_DIR $HOME/.local/bin/poetry install --without dev
ENV VIRTUAL_ENV=/app/.venv \
PATH="/app/.venv/bin:$PATH"
ENTRYPOINT ["python", "tools/save_currencies_data.py"]
ENTRYPOINT ["python", "tools/save_currencies_data.py"]

View File

@@ -15,7 +15,7 @@ import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import mplfinance as mpf
#import mplfinance as mpf
import plotly
#import plotly.plotly as py
@@ -79,9 +79,10 @@ class corePlt():
class coreDraw():
def __init__(self, data=[],needShow=False):
def __init__(self, data=[],needShow=False,subplot_titles={}):
self.data=self.getPlts(data)
self.needShow=needShow
self.subplot_titles=subplot_titles
self.ans=self.getAns()
@@ -156,11 +157,11 @@ class coreDraw():
rows=maxRow,
cols=maxCol,
shared_xaxes=True,
vertical_spacing=0.02,
vertical_spacing=0.1,
shared_yaxes=True,
horizontal_spacing=0.02,
#horizontal_spacing=0.02,
#column_widths=[]
subplot_titles=self.subplot_titles
)
@@ -188,7 +189,7 @@ class coreDraw():
except:
colorType='normal'
colors=self.getBarColorList(i.df[j],colorType)
fig.add_trace(go.Bar(x=i.df['date'], y=i.df[j],name=j,marker_color=colors))
fig.add_trace(go.Bar(x=i.df['date'], y=i.df[j],name=j,marker_color=colors),row=i.row, col=i.col)
@@ -196,4 +197,4 @@ class coreDraw():
ans=fig
if self.needShow:
plotly.offline.iplot(fig)
return ans
return ans

View File

@@ -1,93 +1,102 @@
import pandas as pd
import datetime
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import math
import scipy
import random
import statistics
import datetime
class CoreMath:
def __init__(self, base_df, params=None):
"""
Этот класс нужен для того, чтобы проводить операции над датафреймами
:param base_df: pandas.DataFrame , датафрейм, над которым будут проведены математические операции
:param params: словарь, который определяет какие данные пришли в класс, и что с ними нужно делать, и как
"""
if params is None:
params = {
'dataType': 'ohcl',
'action': None,
'actionOptions': {}
}
# нужно переопределить индексы, потому что нам ничего не известно об индексации входного файла
self.base_df = base_df.reset_index(drop=True)
self.params = params
# Эта часть определяет с какой частью данных нужно проводить вычисления
def __init__(self, base_df, params={
'dataType':'ohcl',
'action': None,
'actionOptions':{}
}
):
self.base_df=base_df.reset_index(drop=True)
self.params=params
if self.params['dataType']=='ohcl':
self.col=self.base_df[self.params['actionOptions']['valueType']]
elif self.params['dataType']=='series':
self.col=self.base_df
# собственно производим вычисления
self.ans=self.getAns()
def getAns(self):
"""
Эта функция занимается собственно рутингом вычислений, в зависимости от параметров
:return: ans, неопределенный тип данных, в заивисимости от action
"""
ans=None
# в зависимости от параметра action производятся соответсвующие действия
if self.params['action']=='findExt':
ans = self.getExtremumValue()
elif self.params['action']=='findMean':
ans = self.getMeanValue()
elif self.params['action']=='findSTD':
ans = self.getSTD()
ans=self.getSTD()
return ans
def getExtremumValue(self):
"""
Эта функция возвращает экстремум произвольного типа внутри одного столбца
Тип контролируется разделом внутри словаря параметров `self.params` по ключу `actionOptions`:
'extremumtype': -- тип экстремума
ans=None
'''
actionOptions:
'extremumtype':
'min'
'max'
:return ans, экстремум произвольного типа
"""
ans=None
'valueType':
'open'
'close'
'high'
'low'
'''
if self.params['actionOptions']['extremumtype']=='max':
ans=max(self.col)
if self.params['actionOptions']['extremumtype']=='min':
ans=min(self.col)
return ans
def getMeanValue(self):
"""
Божественный код
Эта функция возвращает среднее значение одного из следующих типов.
Для определения типа используется словарь `self.params`, по ключу `actionOptions`, релевантные ключи выглядят
так:
'''
actionOptions:
'MeanType':
'MA' -- среднее по всему столбцу
'SMA' -- скользящее среднее
'EMA' -- экспоненциальное скользящее среднее
'WMA' -- взвешенное скользящее среднее
'window' -- размер окна
'span' -- >=1 , аналог окна для экспоненциального среднего, чем он больше тем меньше коэффициент сглаживания
'weights' -- numpy.ndarray, список размером в параметр `window`, конкретные веса для каждого элемента
"""
'MA'
'SMA'
'EMA'
'WMA'
--'SMMA'
'valueType':
'open'
'close'
'high'
'low'
'window'
'span'
'weights'
'''
ans=None
if self.params['actionOptions']['MeanType']=='MA':
ans = self.col.mean()
if self.params['actionOptions']['MeanType']=='SMA':
ans=np.convolve(self.col, np.ones(self.params['actionOptions']['window']), 'valid') / self.params['actionOptions']['window']
#ans=self.col.rolling(window=self.params['actionOptions']['window']).mean().to_list()
if self.params['actionOptions']['MeanType']=='EMA':
ans=self.col.ewm(span=self.params['actionOptions']['span'], adjust=False).mean().to_list()
if self.params['actionOptions']['MeanType']=='WMA':
@@ -97,8 +106,10 @@ class CoreMath:
weights=np.arange(1,self.params['actionOptions']['window']+1)
ans=self.col.rolling(window=self.params['actionOptions']['window']).apply(lambda x: np.sum(weights*x) / weights.sum(), raw=False).to_list()
return ans
return(ans)
def getSTD(self):
'''
actionOptions:
@@ -109,7 +120,7 @@ class CoreMath:
'''
ans=None
@@ -117,11 +128,11 @@ class CoreMath:
window=self.params['actionOptions']['window']
ans=np.asarray([])
for i in range(len(self.col)-window+1):
ans=np.append(ans, np.std(self.col[i:i+window], ddof=1))
ans=np.append(ans,np.std(self.col[i:i+window], ddof=1))
except:
#window = len(self.col)
ans=np.std(self.col, ddof=1)
return ans

View File

@@ -46,7 +46,7 @@ class Alligator:
'valueType':self.options['valueType'],
'window':self.options[keyAns]['window']}
}
ans=CoreTraidMath.CoreMath(self.base_df,op).ans
ans=market_trade.core.CoreTraidMath.CoreMath(self.base_df,op).ans
return ans

View File

@@ -64,7 +64,7 @@ class Envelopes:
}
if dictResp['MeanType']=='SMA':
y=CoreTraidMath.CoreMath(self.base_df,op).ans
y=market_trade.core.CoreTraidMath.CoreMath(self.base_df,op).ans
ans['MainEnv']=y[:len(y)-self.options['shift']]
ans['PlusEnv']=ans['MainEnv']*(1+self.options['kProc']/100)
ans['MinusEnv']=ans['MainEnv']*(1-self.options['kProc']/100)

View File

@@ -23,7 +23,7 @@ import plotly.graph_objs as go
# these two lines allow your code to show up in a notebook
from plotly.offline import init_notebook_mode, iplot
from plotly.subplots import make_subplots
import CoreDraw
import market_trade.core.CoreDraw
init_notebook_mode()
import market_trade.core.CoreTraidMath
import plotly.express as px

View File

@@ -69,7 +69,7 @@ class Stochastic:
'action':'findMean',
'actionOptions':{'MeanType':'SMA','window':self.options['windowSMA']}
}
ans=np.asarray(CoreTraidMath.CoreMath(ser,op).ans)
ans=np.asarray(market_trade.core.CoreTraidMath.CoreMath(ser,op).ans)
return ans
#return np.convolve(col, np.ones(self.options['windowSMA']), 'valid') /self.options['windowSMA']

View File

@@ -0,0 +1,49 @@
import pandas as pd
import datetime
import numpy as np
import uuid
class DealManager():
def __init__(self):
#self.commission=0.04
self.columns=['uuid','figi','amount','startPrice']
self.deals = pd.DataFrame(columns=self.columns)
self.deals = self.deals.set_index('uuid')
def findDealByPriceAndFig(self,price,figi):
ans=None
for i in range(self.deals.shape[0]):
if self.deals.iloc[i].startPrice == price and self.deals.iloc[i].figi == figi:
ans = self.deals.iloc[i].name
break
return ans
def openDeal(self,figi,startPrice,amount=1):
desiredDeal=self.findDealByPriceAndFig(startPrice,figi)
if desiredDeal == None:
newDealDict={
'uuid':[str(uuid.uuid4())],
'figi':[figi],
'startPrice':[startPrice],
'amount':[amount]
}
#newDealDict['profit']=[startPrice*pow(1+self.commission,2)]
newDeal=pd.DataFrame.from_dict(newDealDict).set_index('uuid')
self.deals=pd.concat([self.deals, newDeal])
else:
self.deals.at[desiredDeal,'amount'] += amount
def closeDeal(self,uuid,amount):
desiredDeal=self.deals.loc[uuid]
if desiredDeal.amount - amount == 0:
self.deals = self.deals.drop(labels = [uuid],axis = 0)
else:
self.deals.at[uuid,'amount'] -= amount
#self.deals.loc[uuid].amount = desiredDeal.amount - amount

View File

@@ -2,9 +2,9 @@ project:: #bibasCopy
# Импорт
Для начала импортим все что нужно
```python
from decisionManager_v2 import *
from indicators_v2 import *
from signals_v2 import *
from market_trade.core.decisionManager_v2 import *
from market_trade.core.indicators_v2 import *
from market_trade.core.signals_v2 import *
```
Подготавливаем данные по которым собирется модель. Модель представляет из себя словарь в следующем формате:
@@ -174,9 +174,9 @@ test.generateMatrixProbabilityFromDict(retroAns)
```
## Итоговый код
```python
from decisionManager_v2 import *
from indicators_v2 import *
from signals_v2 import *
from market_trade.core.decisionManager_v2 import *
from market_trade.core.indicators_v2 import *
from market_trade.core.signals_v2 import *
import pandas as pd

View File

@@ -0,0 +1,116 @@
import pandas as pd
import datetime
import numpy as np
import pickle
from signals import *
from dealManager import *
from trandeVoter import *
from riskManager import riskManager
class decsionManager():
def __init__(self,name):
self.name = name
self.RM = riskManager()
self.DM = DealManager()
self.TV = trandeVoter(name)
self.SA = signalAgrigator()
pass
#вытащенный из signalAgrigator метод теста для сигналов
def getSignalTest(self,data: pd.DataFrame(),reqSig: dict, batchSize=30, dataType='candel') -> dict:
self.SA.mode = 'retroFast'
t.SA.createSingnalInstances(
data = data,
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=30
)
ans = t.SA.getAns(data)
return ans
#метод для генерации матрицы вероятностей.
def generateMatrixProbability(self,
data: pd.DataFrame(),
reqSig: dict,
target: str,
batchSize=30,
#dataType='candel'
):
data=data.reset_index(drop=True)
t.SA.createSingnalInstances(
data = data,
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=batchSize
)
self.TV.createMatrixAmounts(reqSig.keys())
for i in range(data.shape[0]-batchSize-1):
sigAns=self.SA.getAns(data[i:i+batchSize])
rightAns=self.getRetroStepAns(data[target][i],data[target][i+1])
self.TV.setDecisionBySignals(self.KostilEbaniy(sigAns),rightAns)
self.TV.generateMatrixProbability()
#без коментариев блять
def KostilEbaniy(self,d):
ans={}
for i in d.keys():
if d[i] == 0:
ans[i] = 'none'
elif d[i] == 1:
ans[i] = 'up'
elif d[i] == -1:
ans[i] = 'down'
return ans
#тож понятная хуита
def getRetroStepAns(self, value1,value2):
if value1 == value2:
ans = 'none'
elif value1 < value2:
ans = 'up'
else:
ans = 'down'
return ans
#метод для онлай получения решения по сигналу
def getSignal(self,data: pd.DataFrame(),reqSig: dict, dataType='candel') -> dict:
data=data.reset_index(drop=True)
self.SA.mode = 'online'
t.SA.createSingnalInstances(
data = data,
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=30
)
ans = t.SA.getAns(data)
return ans
#Создание сигналов. Вызывать перед getOnlineAns
def crateSignals(self,data: pd.DataFrame(),reqSig: dict, dataType='candel'):
data=data.reset_index(drop=True)
self.SA.mode = 'online'
t.SA.createSingnalInstances(
data = data,
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=30
)
def getOnlineAns(self,data: pd.DataFrame(),price):
sigAns = self.SA.getAns(data)
prob = self.TV.getDecisionBySignals(sigAns)
ans = self.RM.getDecision(sigAns,prob,price)
return ans

View File

@@ -0,0 +1,161 @@
import os
import pandas as pd
import datetime
import numpy as np
from tqdm import tqdm
from market_trade.core.indicators_v2 import *
from market_trade.core.signals_v2 import *
from market_trade.core.dealManager import *
from market_trade.core.trandeVoter import *
from market_trade.core.riskManager import *
import pickle
class decsionManager:
'''
sigAgrReq = {
'sig_BB':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2.5}
}
}
},
'sig_BB_2':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2}
}
}
}
}
sigAgrData = {
'sig_BB':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
},
'sig_BB_2':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
}
}
sigAgrRetroTemplate = {
'sig_BB':{
'signalData': None,
'indicatorData' :{'ind_BB': None}
},
'sig_BB_2':{
'signalData': None,
'indicatorData' :{'ind_BB': None}
}
}
'''
def __init__(self,name, sigDict: dict):
self.RM = riskManager()
self.DM = DealManager()
self.TV = trandeVoter(name)
self.SA = signalsAgrigator(sigDict)
self.sigDict = sigDict
def getOnlineAns(self, signalsAns: dict, price: float) -> dict:
probabilityDecsion = self.TV.getDecisionBySignals(self.getSignalsAns(signalsAns))
RMD = self.RM.getDecision(probabilityDecision=probabilityDecsion, price=price, deals = self.DM.deals)
return RMD
def getSignalsAns(self, signalsDataDict: dict) -> dict:
return self.SA.getAns(signalsDataDict)
def getRightAns(self,value_1, value_2):
ans=''
if value_1 > value_2:
ans = 'down'
elif value_1 < value_2:
ans = 'up'
else:
ans = 'none'
return ans
def getRetroTrendAns(self, retroTemplateDict: dict, data: pd.DataFrame(), window: int) -> list:
reqSig={}
ans = {
'signalsAns':[],
'rightAns':[]
}
target = ''
for k in tqdm(range(data.shape[0]-window-1)):
for i in retroTemplateDict.keys():
reqSig[i] = {'signalData': data[k:k+window], 'indicatorData':{}}
target = self.SA.signals[i].params['target']
for j in retroTemplateDict[i]['indicatorData'].keys():
reqSig[i]['indicatorData'][j] = data[k:k+window]
sigAns = self.getSignalsAns(reqSig)
rightAns = self.getRightAns(data[target][k], data[target][k+1])
ans['signalsAns'].append(sigAns)
ans['rightAns'].append(rightAns)
return ans
def generateMatrixProbabilityFromDict(self, dictSignals: dict) -> dict:
self.TV.createMatrixAmounts(dictSignals['signalsAns'][0].keys())
for i in range(len(dictSignals['signalsAns'])):
self.TV.setDecisionBySignals(signalDecisions = dictSignals['signalsAns'][i],
trande = dictSignals['rightAns'][i])
self.TV.generateMatrixProbability()
def createDump(self,postfix='') -> str:
dataDict = {
'RM':self.RM,
'DM':self.DM,
'TV':self.TV,
'SA':self.SA,
'sigDict':self.sigDict
}
fileName='data_'+postfix+'.pickle'
with open(fileName, 'wb') as f:
pickle.dump(dataDict, f)
return os.path.abspath(fileName)
def loadDump(self,path: str) -> None:
with open(path, 'rb') as f:
dataDict = pickle.load(f)
self.RM = dataDict['RM']
self.DM = dataDict['DM']
self.TV = dataDict['TV']
self.SA = dataDict['SA']
self.sigDict = dataDict['sigDict']

View File

@@ -2,8 +2,8 @@ import pandas as pd
import datetime
import numpy as np
import market_trade.core.CoreTraidMath as CoreTraidMath
import market_trade.core.CoreDraw as CoreDraw
import market_trade.core.CoreTraidMath
import market_trade.core.CoreDraw
class coreIndicator():
def __init__(self,
@@ -31,7 +31,7 @@ class coreIndicator():
self.getFig()
return self.ans
def getFig(self,row=1):
CoreDraw.coreDraw(self.figDict,True)
market_trade.core.CoreDraw.market_trade.core.CoreDraw(self.figDict,True)
def getCalculate(self):
return "Error"
def getFigDict(self):
@@ -79,7 +79,7 @@ class indicatorAgrigator():
req[0].append(i)
else:
req.append([i])
CoreDraw.agrigateFig(req,True)
market_trade.core.CoreDraw.agrigateFig(req,True)
def runAll(self,indList,df,needDraw=False):
self.createIndFromList(indList)
self.calculateInd(df)
@@ -99,12 +99,12 @@ class ind_BB(coreIndicator):
'window':self.options['window']
}
}
ans['BB']=CoreTraidMath.CoreMath(self.data,opMA).ans
ans['BB']=market_trade.core.CoreTraidMath.CoreMath(self.data,opMA).ans
opSTD={'dataType':'ohcl',
'action':'findSTD',
'actionOptions':{'valueType':self.options['valueType'],'window':self.options['window']}
}
ans['STD']=CoreTraidMath.CoreMath(self.data,opSTD).ans
ans['STD']=market_trade.core.CoreTraidMath.CoreMath(self.data,opSTD).ans
ans['pSTD']=ans['BB']+ans['STD']*self.options['kDev']
ans['mSTD']=ans['BB']-ans['STD']*self.options['kDev']
ans['x']=np.array(self.data['date'][self.options['window']-1:].to_list())

View File

@@ -0,0 +1,89 @@
import pandas as pd
import datetime
import numpy as np
import market_trade.core.CoreTraidMath
class coreIndicator():
def __init__(self,options: dict, dataType: str = None, predictType: str = None, name: str = None):
self.options = options
self.dataType = dataType #ochl
self.predictType = predictType #trend
def getAns(self, data: pd.DataFrame() ):
return "ERROR"
class indicatorsAgrigator:
"""
indicators = {
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}
}
}
dataDic={
'ind_BB':df_candle[:1000]
}
"""
def __init__ (self,indDict={}):
self.indDict = indDict
self.indInst = {}
self.ans={}
self.createIndicatorsInstance()
def createIndicatorsInstance(self):
for i in self.indDict.keys():
self.indInst[i]=self.indDict[i]['className'](self.indDict[i]['params'])
def getAns(self,dataDict={}):
ans={}
for i in dataDict.keys():
ans[i] = self.indInst[i].getAns(dataDict[i])
return ans
class ind_BB(coreIndicator):
"""
options
MeanType -> SMA
window -> int
valueType -> str: low, high, open, close
kDev -> float
"""
def __init__(self,options: dict,name = None):
super().__init__(
options = options,
dataType = 'ochl',
predictType = 'trend',
name = name
)
def getAns(self, data: pd.DataFrame()):
data=data.reset_index(drop=True)
ans={}
opMA={'dataType':'ohcl',
'action':'findMean',
'actionOptions':{
'MeanType':self.options['MeanType'],
'valueType':self.options['valueType'],
'window':self.options['window']
}
}
ans['BB']=market_trade.core.CoreTraidMath.CoreMath(data,opMA).ans
opSTD={'dataType':'ohcl',
'action':'findSTD',
'actionOptions':{'valueType':self.options['valueType'],'window':self.options['window']}
}
ans['STD']=market_trade.core.CoreTraidMath.CoreMath(data,opSTD).ans
ans['pSTD']=ans['BB']+ans['STD']*self.options['kDev']
ans['mSTD']=ans['BB']-ans['STD']*self.options['kDev']
ans['x']=np.array(data['date'][self.options['window']-1:].to_list())
self.ans= ans
return ans

View File

@@ -0,0 +1,29 @@
import pandas as pd
import datetime
import numpy as np
import random
class riskManager:
def __init__(self,commision=0.04):
self.commision = commision
pass
def getDecision(self,probabilityDecision, price, deals=None) -> dict:
ans = {}
ans['decision'] = 'none'
if probabilityDecision['trande'] == 'up':
ans['decision'] = 'buy'
ans['amount'] = 1
elif probabilityDecision['trande'] == 'none':
ans['decision'] = 'none'
elif probabilityDecision['trande'] == 'down':
for i in range(deals.shape[0]):
ans['decision'] = 'None'
ans['deals'] = []
row = deals.iloc[i]
if row.startPrice < price*pow(1+self.commission,2):
ans['decision'] = 'sell'
ans['deals'].append(row.name)
return ans

View File

@@ -2,13 +2,12 @@ import pandas as pd
import datetime
import numpy as np
import market_trade.core.CoreTraidMath as CoreTraidMath
import market_trade.core.CoreDraw as CoreDraw
import market_trade.core.CoreTraidMath
import market_trade.core.CoreDraw
from tqdm import tqdm
from market_trade.core.indicators import *
class coreSignalTrande():
def __init__(self,
data=pd.DataFrame(),
@@ -17,140 +16,133 @@ class coreSignalTrande():
batchSize=None,
indParams=None,
signalParams=None,
# needFig=False,
# showOnlyIndex=False,
# drawFig=False,
# equalityGap=0
):
self.data = data.reset_index(drop=True)
self.onlineData = data.reset_index(drop=True)
self.dataType = dataType
self.mode = mode
self.ans = None
self.softAnalizList = np.asarray([])
self.hardAnalizList = np.asarray([])
self.analizMetrics = {}
self.indParams = indParams
self.signalParams = signalParams
self.batchSize = batchSize
# self.needFig=needFig
# self.showOnlyIndex=showOnlyIndex
# self.drawFig=drawFig
# self.equalityGap=equalityGap
# Роутер получения ответа
def getAns(self, data):
# ans='Error: unknown Mode!'
ans = None
#needFig=False,
#showOnlyIndex=False,
#drawFig=False,
#equalityGap=0
):
self.data=data.reset_index(drop=True)
self.onlineData=data.reset_index(drop=True)
self.dataType=dataType
self.mode=mode
self.ans=None
self.softAnalizList=np.asarray([])
self.hardAnalizList=np.asarray([])
self.analizMetrics={}
self.indParams=indParams
self.signalParams=signalParams
self.batchSize=batchSize
#self.needFig=needFig
#self.showOnlyIndex=showOnlyIndex
#self.drawFig=drawFig
#self.equalityGap=equalityGap
#Роутер получения ответа
def getAns(self,data):
#ans='Error: unknown Mode!'
ans=None
print("Start processing...")
if self.mode == 'online':
ans = self.getOnlineAns(data.reset_index(drop=True))
ans=self.getOnlineAns(data.reset_index(drop=True))
elif self.mode == 'retro':
ans = self.getRetroAns(data)
ans=self.getRetroAns(data)
elif self.mode == 'retroFast':
ans = self.getRetroFastAns(data)
ans=self.getRetroFastAns(data)
print("Processing DONE!")
return ans
# Ретро режим, где расширяется окно добавлением новых элементов
def getRetroAns(self, data):
ans = np.asarray([])
for i in tqdm(range(self.batchSize, len(data) - 1)):
# self.onlineData=self.data[0:i]
#Ретро режим, где расширяется окно добавлением новых элементов
def getRetroAns(self,data):
ans=np.asarray([])
for i in tqdm(range(self.batchSize,len(data)-1)):
#self.onlineData=self.data[0:i]
window_data = data[0:i]
window_data.reset_index(drop=True)
ans = np.append(ans, (self.getOnlineAns(window_data)))
self.ans = ans
ans=np.append(ans,(self.getOnlineAns(window_data)))
self.ans=ans
self.getAnaliz()
self.getMetrix()
return ans
# Ретро режим, где двигается окно
def getRetroFastAns(self, data):
# print('d - ',data)
ans = np.asarray([])
for i in tqdm(range(len(data) - 1 - self.batchSize)):
# self.onlineData=self.data[i:i+self.batchSize]
window_data = data[i:i + self.batchSize]
# print('win - ',window_data)
#Ретро режим, где двигается окно
def getRetroFastAns(self,data):
#print('d - ',data)
ans=np.asarray([])
for i in tqdm(range(len(data)-1-self.batchSize)):
#self.onlineData=self.data[i:i+self.batchSize]
window_data = data[i:i+self.batchSize]
#print('win - ',window_data)
window_data.reset_index(drop=True)
# print('win - ',window_data)
ans = np.append(ans, (self.getOnlineAns(window_data)))
self.ans = ans
#print('win - ',window_data)
ans=np.append(ans,(self.getOnlineAns(window_data)))
self.ans=ans
self.getAnaliz()
self.getMetrix()
return ans
# Метод, который будет переопределять каждый дочерний класс
#Метод, который будет переопределять каждый дочерний класс
def getOnlineAns(self):
return 'Error'
def getAnaliz(self):
print("Start analiz...")
for i in (range(len(self.ans))):
sourceValue = self.data[self.signalParams['source']][i + self.batchSize]
targetValue = self.data[self.signalParams['target']][i + self.batchSize + 1]
if (targetValue) > sourceValue:
if self.ans[i] == 1:
self.softAnalizList = np.append(self.softAnalizList, 1)
self.hardAnalizList = np.append(self.hardAnalizList, 1)
elif self.ans[i] == -1:
self.softAnalizList = np.append(self.softAnalizList, -1)
self.hardAnalizList = np.append(self.hardAnalizList, -1)
sourceValue=self.data[self.signalParams['source']][i+self.batchSize]
targetValue=self.data[self.signalParams['target']][i+self.batchSize + 1]
if (targetValue)>sourceValue:
if self.ans[i]==1:
self.softAnalizList=np.append(self.softAnalizList,1)
self.hardAnalizList=np.append(self.hardAnalizList,1)
elif self.ans[i]==-1:
self.softAnalizList=np.append(self.softAnalizList,-1)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
else:
self.softAnalizList = np.append(self.softAnalizList, 0)
self.hardAnalizList = np.append(self.hardAnalizList, -1)
elif (targetValue) < sourceValue:
if self.ans[i] == 1:
self.softAnalizList = np.append(self.softAnalizList, -1)
self.hardAnalizList = np.append(self.hardAnalizList, -1)
elif self.ans[i] == -1:
self.softAnalizList = np.append(self.softAnalizList, 1)
self.hardAnalizList = np.append(self.hardAnalizList, 1)
self.softAnalizList=np.append(self.softAnalizList,0)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
elif (targetValue)<sourceValue:
if self.ans[i]==1:
self.softAnalizList=np.append(self.softAnalizList,-1)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
elif self.ans[i]==-1:
self.softAnalizList=np.append(self.softAnalizList,1)
self.hardAnalizList=np.append(self.hardAnalizList,1)
else:
self.softAnalizList = np.append(self.softAnalizList, 0)
self.hardAnalizList = np.append(self.hardAnalizList, -1)
self.softAnalizList=np.append(self.softAnalizList,0)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
else:
if self.ans[i] == 1:
self.softAnalizList = np.append(self.softAnalizList, -1)
self.hardAnalizList = np.append(self.hardAnalizList, -1)
elif self.ans[i] == -1:
self.softAnalizList = np.append(self.softAnalizList, -1)
self.hardAnalizList = np.append(self.hardAnalizList, -1)
if self.ans[i]==1:
self.softAnalizList=np.append(self.softAnalizList,-1)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
elif self.ans[i]==-1:
self.softAnalizList=np.append(self.softAnalizList,-1)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
else:
self.softAnalizList = np.append(self.softAnalizList, 0)
self.hardAnalizList = np.append(self.hardAnalizList, 1)
self.softAnalizList=np.append(self.softAnalizList,0)
self.hardAnalizList=np.append(self.hardAnalizList,1)
print("Analiz DONE!")
return 0
def getMeteixDict(self, d):
def getMeteixDict(self,d):
'''
1 - (сбывшиеся + несбывшиеся) \ (сбывшиеся + несбывшиеся +0)
2 - (сбывшиеся - несбывшиеся) \ (сбывшиеся + несбывшиеся +0)
'''
return {
'1': (d['1'] + d['-1']) / (d['1'] + d['-1'] + d['0']),
'2': (d['1'] - d['-1']) / (d['1'] + d['-1'] + d['0']),
'1':(d['1'] + d['-1']) / (d['1'] + d['-1'] + d['0']),
'2':(d['1'] - d['-1']) / (d['1'] + d['-1'] + d['0']),
}
def getMetrix(self):
softAnalizCount = {'-1': 0, '0': 0, '1': 0}
hardAnalizCount = {'-1': 0, '0': 0, '1': 0}
softAnalizCount = {'-1':0,'0':0,'1':0}
hardAnalizCount = {'-1':0,'0':0,'1':0}
for i in range(len(self.softAnalizList)):
softAnalizCount[str(int(self.softAnalizList[i]))] += 1
hardAnalizCount[str(int(self.hardAnalizList[i]))] += 1
self.analizMetrics = {'softAnaliz': self.getMeteixDict(softAnalizCount),
'hardAnaliz': self.getMeteixDict(hardAnalizCount)
}
softAnalizCount[str(int(self.softAnalizList[i]))]+=1
hardAnalizCount[str(int(self.hardAnalizList[i]))]+=1
self.analizMetrics = {'softAnaliz':self.getMeteixDict(softAnalizCount),
'hardAnaliz':self.getMeteixDict(hardAnalizCount)
}
class signal_BB(coreSignalTrande):
def __init__(self,
data=pd.DataFrame(),
dataType='candel',
@@ -158,37 +150,101 @@ class signal_BB(coreSignalTrande):
batchSize=None,
indParams=None,
signalParams=None,
):
):
super().__init__(
data=data,
dataType=dataType,
mode=mode,
batchSize=batchSize,
indParams=indParams,
signalParams=signalParams,
)
data=data,
dataType=dataType,
mode=mode,
batchSize=batchSize,
indParams=indParams,
signalParams=signalParams,
)
if self.indParams == None:
indParams = {'MeanType': 'SMA', 'window': 15, 'valueType': 'low', 'kDev': 2}
indParams={'MeanType':'SMA','window':15,'valueType':'low','kDev':2}
else:
indParams = self.indParams
self.BB = ind_BB(
indParams=self.indParams
self.BB=ind_BB(
data=data,
options=indParams,
)
def getOnlineAns(self, data):
ans = 0
# print(data)
def getOnlineAns(self,data):
ans=0
#print(data)
self.BB.getAns(data)
# print(BB)
lastValue = data[self.signalParams['source']].to_list()[-1]
if lastValue > self.BB.ans['pSTD'][-1]:
ans = -1
elif lastValue < self.BB.ans['mSTD'][-1]:
ans = +1
#print(BB)
lastValue=data[self.signalParams['source']].to_list()[-1]
if lastValue>self.BB.ans['pSTD'][-1]:
ans=-1
elif lastValue<self.BB.ans['mSTD'][-1]:
ans=+1
else:
ans = 0
ans=0
return ans
class signalAgrigator:
"""
dictAgrigSignal
key - name str
value - dict
className - class
indParams - dict
signalParams - dict
batchSize - int
"""
def __init__(self,
data=pd.DataFrame(),
dictAgrigSignal={},
mode='online',
dataType='candel',
batchSize=None
):
self.createSingnalInstances(
data,
dictAgrigSignal,
dataType,
batchSize
)
self.mode=mode
def createSingnalInstances(
self,
data,
dictAgrigSignal,
dataType,
batchSize
):
ans={}
for i in dictAgrigSignal:
ans[i]=dictAgrigSignal[i]['className'](
data=data,
dataType=dataType,
batchSize=batchSize,
indParams=dictAgrigSignal[i]['indParams'],
signalParams=dictAgrigSignal[i]['signalParams'],
mode=self.mode
)
self.signalsInstances = ans
return ans
def getAns(self, data):
ans={}
if self.mode == 'online':
for i in self.signalsInstances:
ans[i]=(self.signalsInstances[i].getAns(data))
elif self.mode == 'retroFast' or self.mode == 'retro':
for i in self.signalsInstances:
self.signalsInstances[i].getAns(data)
ans[i]=self.signalsInstances[i].analizMetrics
return ans

View File

@@ -0,0 +1,112 @@
import pandas as pd
import datetime
import numpy as np
import market_trade.core.CoreTraidMath
#import market_trade.core.CoreDraw
from tqdm import tqdm
from market_trade.core.indicators_v2 import *
class coreSignalTrande:
def __init__(self, name: str, req: dict, dataType: str):
self.name = name
self.agrigateInds = self.createIndicatorsInstance(req)
self.params = req['params']
self.dataType = dataType
def createIndicatorsInstance(self,req: dict) -> dict:
return indicatorsAgrigator(req['indicators'])
def getIndAns(self, dataDict: dict) -> dict:
return self.agrigateInds.getAns(dataDict)
def getAns(self, data: pd.DataFrame(), indDataDict: dict) -> dict:
return self.getSigAns(data, self.getIndAns(indDataDict))
class sig_BB(coreSignalTrande):
"""
ind keys:
ind_BB
"""
def __init__(self, name: str, req:dict):
super().__init__(name, req, 'ochl')
def getSigAns(self, data: pd.DataFrame(), indAnsDict: dict) -> dict:
lastValue = data[self.params['source']].to_list()[-1]
if lastValue>indAnsDict['ind_BB']['pSTD'][-1]:
ans='down'
elif lastValue<indAnsDict['ind_BB']['mSTD'][-1]:
ans='up'
else:
ans='none'
return ans
class signalsAgrigator:
"""
sigAgrReq = {
'sig_BB':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}
}
}
},
'sig_BB_2':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2}
}
}
}
}
sigAgrData = {
'sig_BB':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
},
'sig_BB_2':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
}
}
"""
def __init__ (self,req:dict):
self.signals = self.createSignalsInstance(req)
def createSignalsInstance(self, siganlsDict: dict) -> dict:
ans = {}
for i in siganlsDict.keys():
ans[i]=siganlsDict[i]['className'](name = i, req = siganlsDict[i])
return ans
def getAns(self, dataDict: dict) -> dict:
ans = {}
for i in dataDict.keys():
ans[i] = self.signals[i].getAns(data = dataDict[i]['signalData'],
indDataDict = dataDict[i]['indicatorData'])
return ans

View File

@@ -0,0 +1,84 @@
import pandas as pd
import datetime
import numpy as np
#import random
class trandeVoter():
def __init__(self,name):
self.name = name # просто имя
self.trandeValuesList = ['up','none','down'] #словарь трегдов
self.matrixAmounts = None # матрица сумм
self.keysMatrixAmounts = None #ключи матрицы сумм, техническое поле
self.matrixProbability = None # матрица вероятностей
#функция которая создает df с заданным набором колонок и индексов. индексы - уникальные соотношения
def createDFbyNames(self, namesIndex, namesColoms,defaultValue=0.0):
df = pd.DataFrame(dict.fromkeys(namesColoms, [defaultValue]*pow(3,len(namesIndex))),
index=pd.MultiIndex.from_product([self.trandeValuesList]*len(namesIndex), names=namesIndex)
#,columns=namesColoms
)
return(df)
#создание матрицы сумм с дефолтным значением
def createMatrixAmounts(self,namesIndex: list) -> pd.DataFrame():
self.matrixAmounts = self.createDFbyNames(namesIndex,self.trandeValuesList,0)
self.keysMatrixAmounts = self.matrixAmounts.to_dict('tight')['index_names']
self.createMatrixProbability(namesIndex)
return(self.matrixAmounts)
#создание матрицы вероятностей с дефолтным значением
def createMatrixProbability(self,namesIndex: list) -> pd.DataFrame():
self.matrixProbability = self.createDFbyNames(namesIndex,self.trandeValuesList)
return(self.matrixProbability)
#установка значений в матрицы сумм. signalDecisions - значения индикаторов key:value; trande - реальное значение
def setDecisionBySignals(self,signalDecisions: dict,trande: str) -> None:
buff=[]
for i in self.keysMatrixAmounts:
buff.append(signalDecisions[i])
self.matrixAmounts.loc[tuple(buff),trande] += 1
#заполнение матрицы вероятностей вычисляемыми значениями из матрицы сумм
def generateMatrixProbability(self) -> None:
for i in range(self.matrixAmounts.shape[0]):
print(self.matrixAmounts)
rowSum=sum(self.matrixAmounts.iloc[i]) + 1
self.matrixProbability.iloc[i]['up'] = self.matrixAmounts.iloc[i]['up'] / rowSum
self.matrixProbability.iloc[i]['none'] = self.matrixAmounts.iloc[i]['none'] / rowSum
self.matrixProbability.iloc[i]['down'] = self.matrixAmounts.iloc[i]['down'] / rowSum
#получение рещения из матрицы вероятностей по заданным значениям сигналов
def getDecisionBySignals(self,signalDecisions: dict) -> dict:
ans = {}
spliceSearch =self.matrixProbability.xs(tuple(signalDecisions.values()),
level=list(signalDecisions.keys())
)
ans['probability'] = spliceSearch.to_dict('records')[0]
ans['trande'] = spliceSearch.iloc[0].idxmax()
return ans
#получение матриц вероятностей и суммы в видей словарей
def getMatrixDict(self) -> dict:
ans={}
ans['amounts'] = self.matrixAmounts.to_dict('tight')
ans['probability'] = self.matrixProbability.to_dict('tight')
return ans
#установка матриц вероятностей и суммы в видей словарей
def setMatrixDict(self,matrixDict: dict) -> dict:
if matrixDict['amounts'] != None:
self.matrixAmounts = pd.DataFrame.from_dict(y['amounts'], orient='tight')
if matrixDict['probability'] != None:
self.matrixProbability = pd.DataFrame.from_dict(y['probability'], orient='tight')

View File

@@ -22,7 +22,7 @@ class DukaMTInterface:
# droppnig old timestamp index
self.duka_dataset.reset_index(inplace=True, drop=True)
print(self.duka_dataset)
# adding bids
self.bid_candlesticks = self.duka_dataset['bid'].copy()
self.bid_candlesticks['date'] = self.duka_dataset['date']

View File

@@ -4,7 +4,7 @@ import pandas as pd
if __name__ == '__main__':
df_candle = pd.read_csv(market_trade.src.constants.TEST_CANDLESTICKS_PATH)
df_candle = pd.read_csv(market_trade.constants.TEST_CANDLESTICKS_PATH)
df_candle.rename(columns={'timestamp': 'date'}, inplace=True)
ind_params = {'MeanType': 'SMA', 'window': 15, 'valueType': 'close', 'kDev': 2.5}
signalParams = {'source': 'close', 'target': 'close'}

View File

@@ -3,11 +3,11 @@ import market_trade.constants
def test_dataloader(data_path):
duka_interface = (market_trade.src.dataloader.DukaMTInterface(data_path))
duka_interface = (market_trade.data.dataloader.DukaMTInterface(data_path))
print(duka_interface.ask_candlesticks)
if __name__ == '__main__':
candlesticks_filepaths = [filepath for filepath in market_trade.src.constants.CANDLESTICK_DATASETS_PATH.iterdir()]
candlesticks_filepaths = [filepath for filepath in market_trade.constants.CANDLESTICK_DATASETS_PATH.iterdir()]
candlesticks_filepath = candlesticks_filepaths[0]
test_dataloader(candlesticks_filepath)

View File

@@ -0,0 +1,63 @@
from market_trade.core.decisionManager_v2 import *
from market_trade.core.indicators_v2 import *
from market_trade.core.signals_v2 import *
import market_trade.data.dataloader
sigAgrReq = {
'sig_BB':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2.5}
}
}
},
'sig_BB_2':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2}
}
}
}
}
test = decsionManager('Pipa', sigAgrReq)
import pandas as pd
df_candle = pd.read_csv("../../data/EURUSD_price_candlestick.csv")
df_candle["date"] = df_candle["timestamp"]
sigAgrRetroTemplate = {
'sig_BB':{
'signalData': None,
'indicatorData' :{'ind_BB': None}
},
'sig_BB_2':{
'signalData': None,
'indicatorData' :{'ind_BB': None}
}
}
retroAns = test.getRetroTrendAns(sigAgrRetroTemplate,df_candle[5000:6000].reset_index(drop=True),40)
test.generateMatrixProbabilityFromDict(retroAns)
sigAgrData = {
'sig_BB':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
},
'sig_BB_2':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
}
}
test.getOnlineAns(sigAgrData, 0.0)

8929
notebooks/Indicators.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,78 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "ad08f522",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import datetime\n",
"import numpy as np\n",
"import random"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b9b18667",
"metadata": {},
"outputs": [],
"source": [
"class riskManager:\n",
" \n",
" def __init__(self,commision=0.04):\n",
" self.commision = commision\n",
" pass\n",
" def getDecision(self,signalDecision,probabilityDecision, price, deals=None) -> dict:\n",
" ans = {}\n",
" if probabilityDecision['trande'] == 'up':\n",
" ans['decision'] = 'buy'\n",
" ans['amount'] = 1\n",
" elif probabilityDecision['trande'] == 'none':\n",
" ans['decision'] = 'none'\n",
" elif probabilityDecision['trande'] == 'down': \n",
" for i in deals.shape[0]:\n",
" ans['decision'] = 'None'\n",
" ans['deals'] = []\n",
" row = deals.iloc[i]\n",
" if row.startPrice < price*pow(1+self.commission,2):\n",
" ans['decision'] = 'sell'\n",
" ans['deals'].append(row.name)\n",
" return ans\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8f5dd64e",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

736
notebooks/Signals.ipynb Normal file

File diff suppressed because one or more lines are too long

1961
notebooks/Signals_v2.ipynb Normal file

File diff suppressed because one or more lines are too long

1656
notebooks/TrandVoter.ipynb Normal file

File diff suppressed because one or more lines are too long

1509
notebooks/Voter_ne_tot.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,267 @@
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import datetime
import numpy as np
#import plotly as pl
#import plotly.graph_objs as go
#from plotly.offline import init_notebook_mode, iplot
#from plotly.subplots import make_subplots
#init_notebook_mode()
import CoreTraidMath
import CoreDraw
# In[3]:
class coreIndicator():
def __init__(self,
data=pd.DataFrame(),
options={},
showMode='None',
):
'''
showMode = None/Ind/PartOf
'''
self.data=data
self.showMode=showMode
self.options=options
self.overlayInd=None #True/False
self.ans=None
self.figDict=None
def getAns(self,data=None):
if type(data)!=type(None):
self.data=data
self.ans=self.getCalculate()
if self.showMode=='Ind' or self.showMode=='PartOf':
self.figDict=self.getFigDict()
if self.showMode=='Ind':
self.getFig()
return self.ans
def getFig(self,row=1):
CoreDraw.coreDraw(self.figDict,True)
def getCalculate(self):
return "Error"
def getFigDict(self):
return "Error"
class indicatorAgrigator():
'''
Тема чисто для отладки
jj=indicatorAgrigator().runAll([o1,o2],df_candle[:30])
#jj.createIndFromList([o1,o2])
#jj.calculateInd(df_candle[:30])
'''
def __init__(self):
self.indList=None
self.data=None
def createInd(self,classDict):
return classDict['name'](
options=classDict['params'],
showMode=classDict['showMode']
)
def createIndFromList(self,indList):
self.indList=indList
ans=[]
for i in self.indList:
ans.append(self.createInd(i))
self.indList=ans
return ans
def calculateInd(self,data):
self.data=data
for i in self.indList:
#i.getAns(data)
i.data=self.data
i.ans=i.getCalculate()
i.figDict=i.getFigDict()
#i.getFig()
def agrigateFig(self):
req=[[]]
for i in self.indList:
if i.overlayInd==True:
req[0].append(i)
else:
req.append([i])
CoreDraw.agrigateFig(req,True)
def runAll(self,indList,df,needDraw=False):
self.createIndFromList(indList)
self.calculateInd(df)
if needDraw:
self.agrigateFig()
# In[4]:
class ind_BB(coreIndicator):
def getCalculate(self):
self.overlayInd=True
ans={}
opMA={'dataType':'ohcl',
'action':'findMean',
'actionOptions':{
'MeanType':self.options['MeanType'],
'valueType':self.options['valueType'],
'window':self.options['window']
}
}
ans['BB']=CoreTraidMath.CoreMath(self.data,opMA).ans
opSTD={'dataType':'ohcl',
'action':'findSTD',
'actionOptions':{'valueType':self.options['valueType'],'window':self.options['window']}
}
ans['STD']=CoreTraidMath.CoreMath(self.data,opSTD).ans
ans['pSTD']=ans['BB']+ans['STD']*self.options['kDev']
ans['mSTD']=ans['BB']-ans['STD']*self.options['kDev']
ans['x']=np.array(self.data['date'][self.options['window']-1:].to_list())
return ans
def getFigDict(self,row=1):
req=[]
req.append({
'vtype':'Scatter',
'df':pd.DataFrame(
{'value':self.ans['BB'],'date':self.ans['x']}) ,
'row':row,
'col':1,
'name':'BB'
})
req.append({
'vtype':'Scatter',
'df':pd.DataFrame(
{'value':self.ans['pSTD'],'date':self.ans['x']}) ,
'row':row,
'col':1,
'name':'pSTD'
})
req.append({
'vtype':'Scatter',
'df':pd.DataFrame(
{'value':self.ans['mSTD'],'date':self.ans['x']}) ,
'row':row,
'col':1,
'name':'mSTD'
})
return req
# In[5]:
class ind_OCHL(coreIndicator):
def getCalculate(self):
self.overlayInd=True
def getFigDict(self,row=1):
req=[]
req.append({
'vtype':'OCHL',
'df':self.data,
'row':1,
'col':1,
'name':'OHCL'
})
return req
# In[7]:
df_candle = pd.read_csv("../data/EURUSD_price_candlestick.csv")
df_candle.rename(columns={'timestamp': 'date'}, inplace=True)
df_candle
# In[8]:
o1={
'name':ind_OCHL,
'params':{},
'showMode':'PartOf',
}
o2={
'name':ind_BB,
'params':{'MeanType':'SMA','window':25,'valueType':'low','kDev':2},
'showMode':'PartOf',
}
jj=indicatorAgrigator().runAll([o1,o2],df_candle[:300],True)
#jj.createIndFromList([o1,o2])
#jj.calculateInd(df_candle[:30])
# In[9]:
op={'MeanType':'SMA','window':5,'valueType':'low','kDev':2}
a=ind_BB(df_candle[:100],op,'PartOf')
# In[10]:
a.getAns()
# In[11]:
b=ind_OCHL(df_candle[:30],{},'Ind')
b.getAns(df_candle[:100])
# In[12]:
opc={'MeanType':'SMA','window':20,'valueType':'low','kDev':2}
c=ind_BB(df_candle[:100],opc,'PartOf')
c.getAns()
# In[13]:
hhh = CoreDraw.agrigateFig([[b,a,c]],True)
# In[14]:
import indicators
# In[15]:
op_1={'MeanType':'SMA','window':5,'valueType':'low','kDev':2}
test_1=indicators.ind_BB(df_candle[:100],op)
test_1.getAns()
# In[ ]:

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import datetime
import numpy as np
import random
# In[2]:
class riskManager:
def __init__(self,commision=0.04):
self.commision = commision
pass
def getDecision(self,signalDecision,probabilityDecision, price, deals=None) -> dict:
ans = {}
if probabilityDecision['trande'] == 'up':
ans['decision'] = 'buy'
ans['amount'] = 1
elif probabilityDecision['trande'] == 'none':
ans['decision'] = 'none'
elif probabilityDecision['trande'] == 'down':
for i in deals.shape[0]:
ans['decision'] = 'None'
ans['deals'] = []
row = deals.iloc[i]
if row.startPrice < price*pow(1+self.commission,2):
ans['decision'] = 'sell'
ans['deals'].append(row.name)
return ans
# In[ ]:

View File

@@ -0,0 +1,390 @@
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import datetime
import numpy as np
import CoreTraidMath
import CoreDraw
from tqdm import tqdm
from indicators import *
# In[2]:
df_candle = pd.read_csv("../data/EURUSD_price_candlestick.csv")
df_candle.rename(columns={'timestamp': 'date'}, inplace=True)
df_candle
# In[3]:
class coreSignalTrande():
def __init__(self,
data=pd.DataFrame(),
dataType='candel',
mode='online',
batchSize=None,
indParams=None,
signalParams=None,
#needFig=False,
#showOnlyIndex=False,
#drawFig=False,
#equalityGap=0
):
self.data=data.reset_index(drop=True)
self.onlineData=data.reset_index(drop=True)
self.dataType=dataType
self.mode=mode
self.ans=None
self.softAnalizList=np.asarray([])
self.hardAnalizList=np.asarray([])
self.analizMetrics={}
self.indParams=indParams
self.signalParams=signalParams
self.batchSize=batchSize
#self.needFig=needFig
#self.showOnlyIndex=showOnlyIndex
#self.drawFig=drawFig
#self.equalityGap=equalityGap
#Роутер получения ответа
def getAns(self,data):
#ans='Error: unknown Mode!'
ans=None
print("Start processing...")
if self.mode == 'online':
ans=self.getOnlineAns(data.reset_index(drop=True))
elif self.mode == 'retro':
ans=self.getRetroAns(data)
elif self.mode == 'retroFast':
ans=self.getRetroFastAns(data)
print("Processing DONE!")
return ans
#Ретро режим, где расширяется окно добавлением новых элементов
def getRetroAns(self,data):
ans=np.asarray([])
for i in tqdm(range(self.batchSize,len(data)-1)):
#self.onlineData=self.data[0:i]
window_data = data[0:i]
window_data.reset_index(drop=True)
ans=np.append(ans,(self.getOnlineAns(window_data)))
self.ans=ans
self.getAnaliz()
self.getMetrix()
return ans
#Ретро режим, где двигается окно
def getRetroFastAns(self,data):
#print('d - ',data)
ans=np.asarray([])
for i in tqdm(range(len(data)-1-self.batchSize)):
#self.onlineData=self.data[i:i+self.batchSize]
window_data = data[i:i+self.batchSize]
#print('win - ',window_data)
window_data.reset_index(drop=True)
#print('win - ',window_data)
ans=np.append(ans,(self.getOnlineAns(window_data)))
self.ans=ans
self.getAnaliz()
self.getMetrix()
return ans
#Метод, который будет переопределять каждый дочерний класс
def getOnlineAns(self):
return 'Error'
def getAnaliz(self):
print("Start analiz...")
for i in (range(len(self.ans))):
sourceValue=self.data[self.signalParams['source']][i+self.batchSize]
targetValue=self.data[self.signalParams['target']][i+self.batchSize + 1]
if (targetValue)>sourceValue:
if self.ans[i]==1:
self.softAnalizList=np.append(self.softAnalizList,1)
self.hardAnalizList=np.append(self.hardAnalizList,1)
elif self.ans[i]==-1:
self.softAnalizList=np.append(self.softAnalizList,-1)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
else:
self.softAnalizList=np.append(self.softAnalizList,0)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
elif (targetValue)<sourceValue:
if self.ans[i]==1:
self.softAnalizList=np.append(self.softAnalizList,-1)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
elif self.ans[i]==-1:
self.softAnalizList=np.append(self.softAnalizList,1)
self.hardAnalizList=np.append(self.hardAnalizList,1)
else:
self.softAnalizList=np.append(self.softAnalizList,0)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
else:
if self.ans[i]==1:
self.softAnalizList=np.append(self.softAnalizList,-1)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
elif self.ans[i]==-1:
self.softAnalizList=np.append(self.softAnalizList,-1)
self.hardAnalizList=np.append(self.hardAnalizList,-1)
else:
self.softAnalizList=np.append(self.softAnalizList,0)
self.hardAnalizList=np.append(self.hardAnalizList,1)
print("Analiz DONE!")
return 0
def getMeteixDict(self,d):
'''
1 - (сбывшиеся + несбывшиеся) \ (сбывшиеся + несбывшиеся +0)
2 - (сбывшиеся - несбывшиеся) \ (сбывшиеся + несбывшиеся +0)
'''
return {
'1':(d['1'] + d['-1']) / (d['1'] + d['-1'] + d['0']),
'2':(d['1'] - d['-1']) / (d['1'] + d['-1'] + d['0']),
}
def getMetrix(self):
softAnalizCount = {'-1':0,'0':0,'1':0}
hardAnalizCount = {'-1':0,'0':0,'1':0}
for i in range(len(self.softAnalizList)):
softAnalizCount[str(int(self.softAnalizList[i]))]+=1
hardAnalizCount[str(int(self.hardAnalizList[i]))]+=1
self.analizMetrics = {'softAnaliz':self.getMeteixDict(softAnalizCount),
'hardAnaliz':self.getMeteixDict(hardAnalizCount)
}
# In[4]:
class signal_BB(coreSignalTrande):
def __init__(self,
data=pd.DataFrame(),
dataType='candel',
mode='online',
batchSize=None,
indParams=None,
signalParams=None,
):
super().__init__(
data=data,
dataType=dataType,
mode=mode,
batchSize=batchSize,
indParams=indParams,
signalParams=signalParams,
)
if self.indParams == None:
indParams={'MeanType':'SMA','window':15,'valueType':'low','kDev':2}
else:
indParams=self.indParams
self.BB=ind_BB(
data=data,
options=indParams,
)
def getOnlineAns(self,data):
ans=0
#print(data)
self.BB.getAns(data)
#print(BB)
lastValue=data[signalParams['source']].to_list()[-1]
if lastValue>self.BB.ans['pSTD'][-1]:
ans=-1
elif lastValue<self.BB.ans['mSTD'][-1]:
ans=+1
else:
ans=0
return ans
# In[5]:
ind_params={'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}
signalParams={'source':'close','target':'close'}
b=signal_BB(data=df_candle[:99999],
mode='retroFast',
indParams=ind_params,
signalParams=signalParams,
batchSize=15
)
# In[6]:
a=b.getAns(df_candle[:99900])
# In[7]:
b.analizMetrics
# In[ ]:
# In[ ]:
# In[ ]:
# In[8]:
from signals import *
# In[9]:
class signalAgrigator:
"""
dictAgrigSignal
key - name str
value - dict
className - class
indParams - dict
signalParams - dict
batchSize - int
"""
def __init__(self,
data=pd.DataFrame(),
dictAgrigSignal={},
mode='online',
dataType='candel',
batchSize=None
):
self.createSingnalInstances(
data,
dictAgrigSignal,
dataType,
batchSize
)
self.mode=mode
def createSingnalInstances(
self,
data,
dictAgrigSignal,
dataType,
batchSize
):
ans={}
for i in dictAgrigSignal:
ans[i]=dictAgrigSignal[i]['className'](
data=data,
dataType=dataType,
batchSize=batchSize,
indParams=dictAgrigSignal[i]['indParams'],
signalParams=dictAgrigSignal[i]['signalParams'],
mode=self.mode
)
self.signalsInstances = ans
return ans
def getAns(self, data):
ans={}
if self.mode == 'online':
for i in self.signalsInstances:
ans[i]=(self.signalsInstances[i].getAns(data))
elif self.mode == 'retroFast' or self.mode == 'retro':
for i in self.signalsInstances:
self.signalsInstances[i].getAns(data)
ans[i]=self.signalsInstances[i].analizMetrics
return ans
# In[10]:
reqSig={
'BB1':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':15
},
'BB2':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':20
}
}
# In[11]:
reqSig.values()
# In[12]:
testh=signalAgrigator(df_candle[:99999],reqSig,'online','ohcl',30)
# In[13]:
testh.signalsInstances['BB1'].__dict__
# In[ ]:
testh.getAns(df_candle[:100])
# In[ ]:
testh.signalsInstances['BB1'].__dict__
# In[ ]:
# In[ ]:

View File

@@ -0,0 +1,249 @@
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import datetime
import numpy as np
import CoreTraidMath
import CoreDraw
from tqdm import tqdm
from indicators_v2 import *
# In[3]:
df_candle = pd.read_csv(r"../data/EURUSD_price_candlestick.csv")
df_candle.rename(columns={'timestamp': 'date'}, inplace=True)
df_candle
# In[4]:
class coreSignalTrande:
def __init__(self, name: str, req: dict, dataType: str):
self.name = name
self.agrigateInds = self.createIndicatorsInstance(req)
self.params = req['params']
self.dataType = dataType
def createIndicatorsInstance(self,req: dict) -> dict:
return indicatorsAgrigator(req['indicators'])
def getIndAns(self, dataDict: dict) -> dict:
return self.agrigateInds.getAns(dataDict)
def getAns(self, data: pd.DataFrame(), indDataDict: dict) -> dict:
return self.getSigAns(data, self.getIndAns(indDataDict))
class sig_BB(coreSignalTrande):
"""
ind keys:
ind_BB
"""
def __init__(self, name: str, req:dict):
super().__init__(name, req, 'ochl')
def getSigAns(self, data: pd.DataFrame(), indAnsDict: dict) -> dict:
lastValue = data[self.params['source']].to_list()[-1]
if lastValue>indAnsDict['ind_BB']['pSTD'][-1]:
ans='down'
elif lastValue<indAnsDict['ind_BB']['mSTD'][-1]:
ans='up'
else:
ans='none'
return ans
# In[5]:
class signalsAgrigator:
def __init__ (self,req:dict):
self.signals = self.createSignalsInstance(req)
def createSignalsInstance(self, siganlsDict: dict) -> dict:
ans = {}
for i in siganlsDict.keys():
ans[i]=siganlsDict[i]['className'](name = i, req = siganlsDict[i])
return ans
def getAns(self, dataDict: dict) -> dict:
ans = {}
for i in dataDict.keys():
ans[i] = self.signals[i].getAns(data = dataDict[i]['signalData'],
indDataDict = dataDict[i]['indicatorData'])
return ans
# In[ ]:
# In[6]:
sigreq= {
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}
}
}
}
indReqDict ={'ind_BB':df_candle[:1000]}
# In[7]:
sigAgrReq = {
'sig_BB':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}
}
}
},
'sig_BB_2':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2}
}
}
}
}
sigAgrData = {
'sig_BB':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
},
'sig_BB_2':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
}
}
# In[ ]:
# In[8]:
ttt=signalsAgrigator(sigAgrReq)
# In[9]:
ttt.__dict__
# In[10]:
ttt.signals['sig_BB'].__dict__
# In[11]:
ttt.getAns(sigAgrData)
# In[ ]:
# In[ ]:
# In[12]:
list({'ttt':2}.keys())[0]
# In[13]:
test = sig_BB('sig_BB', sigreq)
# In[14]:
test.__dict__
# In[ ]:
# In[15]:
test.agrigateInds.__dict__
# In[16]:
ians = test.getIndAns(indReqDict)
ians
# In[17]:
test.getAns(df_candle[:100],indReqDict)
# In[ ]:
# In[ ]:

View File

@@ -0,0 +1,360 @@
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import datetime
import numpy as np
import random
from signals import * #потом удалить
# In[2]:
class trandeVoter():
def __init__(self,name):
self.name = name # просто имя
self.trandeValuesList = ['up','none','down'] #словарь трегдов
self.matrixAmounts = None # матрица сумм
self.keysMatrixAmounts = None #ключи матрицы сумм, техническое поле
self.matrixProbability = None # матрица вероятностей
#функция которая создает df с заданным набором колонок и индексов. индексы - уникальные соотношения
def createDFbyNames(self, namesIndex, namesColoms,defaultValue=0.0):
df = pd.DataFrame(dict.fromkeys(namesColoms, [defaultValue]*pow(3,len(namesIndex))),
index=pd.MultiIndex.from_product([self.trandeValuesList]*len(namesIndex), names=namesIndex)
#,columns=namesColoms
)
return(df)
#создание матрицы сумм с дефолтным значением
def createMatrixAmounts(self,namesIndex: list) -> pd.DataFrame():
self.matrixAmounts = self.createDFbyNames(namesIndex,self.trandeValuesList,0)
self.keysMatrixAmounts = self.matrixAmounts.to_dict('tight')['index_names']
self.createMatrixProbability(namesIndex)
return(self.matrixAmounts)
#создание матрицы вероятностей с дефолтным значением
def createMatrixProbability(self,namesIndex: list) -> pd.DataFrame():
self.matrixProbability = self.createDFbyNames(namesIndex,self.trandeValuesList)
return(self.matrixProbability)
#установка значений в матрицы сумм. signalDecisions - значения индикаторов key:value; trande - реальное значение
def setDecisionBySignals(self,signalDecisions: dict,trande: str) -> None:
buff=[]
for i in self.keysMatrixAmounts:
buff.append(signalDecisions[i])
self.matrixAmounts.loc[tuple(buff),trande] += 1
#заполнение матрицы вероятностей вычисляемыми значениями из матрицы сумм
def generateMatrixProbability(self) -> None:
for i in range(self.matrixAmounts.shape[0]):
rowSum=sum(self.matrixAmounts.iloc[i])
self.matrixProbability.iloc[i]['up'] = (self.matrixAmounts.iloc[i]['up'] / rowSum)
self.matrixProbability.iloc[i]['none'] = self.matrixAmounts.iloc[i]['none'] / rowSum
self.matrixProbability.iloc[i]['down'] = self.matrixAmounts.iloc[i]['down'] / rowSum
#получение рещения из матрицы вероятностей по заданным значениям сигналов
def getDecisionBySignals(self,signalDecisions: dict) -> dict:
ans = {}
spliceSearch =self.matrixProbability.xs(tuple(signalDecisions.values()),
level=list(signalDecisions.keys())
)
ans['probability'] = spliceSearch.to_dict('records')[0]
ans['trande'] = spliceSearch.iloc[0].idxmax()
return ans
#получение матриц вероятностей и суммы в видей словарей
def getMatrixDict(self) -> dict:
ans={}
ans['amounts'] = self.matrixAmounts.to_dict('tight')
ans['probability'] = self.matrixProbability.to_dict('tight')
return ans
#установка матриц вероятностей и суммы в видей словарей
def setMatrixDict(self,matrixDict: dict) -> dict:
if matrixDict['amounts'] != None:
self.matrixAmounts = pd.DataFrame.from_dict(y['amounts'], orient='tight')
if matrixDict['probability'] != None:
self.matrixProbability = pd.DataFrame.from_dict(y['probability'], orient='tight')
# In[3]:
reqSig={
'BB1':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':15
},
'BB2':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':20
}
}
# In[4]:
reqDS={'BB1':'up','BB2':'none'}
# In[7]:
reqCreate=list(reqSig.keys())
reqCreate
# In[8]:
t=trandeVoter('piu')
o=t.createMatrixAmounts(['BB1', 'BB2'])
o
# In[9]:
for i in range(100000):
t.setDecisionBySignals({'BB1':random.choice(['up','down','none']),
'BB2':random.choice(['up','down','none'])},
random.choice(['up','down','none']))
# In[10]:
t.matrixAmounts
# In[11]:
t.generateMatrixProbability()
# In[577]:
t.matrixProbability
# In[14]:
t.setMatrixDict(y)
# In[15]:
t.getDecisionBySignals(reqDS)
# In[ ]:
# In[ ]:
# In[13]:
y = t.getMatrixDict()
y
# In[16]:
ddf = pd.DataFrame.from_dict(y['amounts'], orient='tight')
ddf
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[17]:
t.matrixProbability.iloc[0]['up'] = (t.matrixProbability.iloc[0]['up'] / (sum(t.matrixProbability.iloc[0])))
t.matrixProbability
# In[ ]:
# In[ ]:
# In[ ]:
# In[18]:
t.matrixProbability['trandе']
# In[19]:
random.choice(['up','down','none'])
# In[20]:
t.setDecisionBySignals(reqDS,'up')
# In[21]:
#t.matrixAmounts.at(bbb,'up')
t.matrixAmounts.iloc[0]
# In[22]:
for i in t.matrixAmounts.iloc[0]:
print (i)
# In[23]:
(t.matrixAmounts.iloc[0]).idxmax()
# In[24]:
t.matrixAmounts
# In[ ]:
# In[25]:
o.xs(('up','down'), level=['BB1','BB2'])['up'].iloc[0]
#oldValue = o.xs(('up','down'), level=['BB1','BB2'])['up']
#o=o.replace(oldValue,oldValue.iloc[0]+1)
#o.xs(('up','down'), level=['BB1','BB2'])
# In[26]:
o.xs(('up','down'), level=['BB1','BB2'], drop_level=False)#.iloc[0].loc['up']=2#.at['up']=4
# In[27]:
o.xs(('up','down'), level=['BB1','BB2']).iloc[0].at['up']
# In[28]:
o.loc['up'].loc['down']
# In[29]:
bbb=tuple(['up','down'])
bbb
# In[30]:
o.loc[bbb,]
# In[31]:
o.at[bbb, 'up']+=1
o
# In[32]:
o.loc[bbb]
# In[33]:
dict(zip(['a','b','c'], [1,2,3]))
# In[ ]:

View File

@@ -0,0 +1,427 @@
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import datetime
import numpy as np
from signals import * #потом удалить
# In[2]:
class voter_v2():
def __init__(self,name):
self.name=name
pass
def createPredictMatrixBySignals(self,signalsName):
pass
# In[ ]:
# In[3]:
reqSig={
'BB1':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':15
},
'BB2':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':20
}
}
# In[4]:
reqCreate=reqSig.keys()
reqCreate
# In[5]:
class Voter():
def __init__ (self, name=''):
self.name=name
self.mop={
'up': pd.DataFrame(),
'down':pd.DataFrame(),
'none':pd.DataFrame()
}
self.value={}
self.decision=''
self.real_decision=''
self.keys=[]
self.slice_dict={}
def addValue(self, dic_value):
self.value=dic_value
self.checkForNew()
self.setSlice()
self.getDecision()
def checkForNew(self):
if not (list(self.value.keys()) == self.keys):
self.createNewMop(list(self.value.keys()))
def createNewMop(self,missing_indicators):
print('reassembly mop')
new_columns= (missing_indicators)
#new_columns=new_columns.append(['value','p'])
n=len(new_columns)
start_value=-1
variator=3
new_lst=[]
buf_lst=[]
for i in range(n):
buf_lst.append(start_value)
for i in range(pow(variator,n)):
new_lst.append(buf_lst.copy())
for j in range(n):
for i in range(len(new_lst)):
dob_iterator=(i // pow(variator,j)) % variator
new_lst[i][j]=new_lst[i][j] + dob_iterator
#print (new_columns)
self.keys=new_columns
new_columns = new_columns+['amount']+['percentage']
for i in new_lst:
i = i.extend([0,0])
#i = i.append(0)
#print(new_lst)
#print(new_columns)
new_df=pd.DataFrame(new_lst,columns=new_columns)
self.mop['up']=pd.DataFrame.from_dict(new_df.to_dict())
self.mop['down']=pd.DataFrame.from_dict(new_df.to_dict())
self.mop['none']=pd.DataFrame.from_dict(new_df.to_dict())
def setSlice(self):
row_flg=True
self.slice_dict={}
for j in self.mop.keys():
for index, row in self.mop[j].iterrows():
for key, value in self.value.items():
if value != row[key]:
#print('fasle ',key,value,row[key])
row_flg=False
break
if row_flg:
self.slice_dict[j]=dict(row)
#print(j,dict(row))
row_flg=True
def getDecision (self):
max_value=0
for key, value in self.slice_dict.items():
if value['amount'] >= max_value:
max_value = value['amount']
self.decision = key
return self.decision
def setDecision (self,real_decision):
self.real_decision=real_decision
self.updMop()
self.slice_dict[real_decision]['amount']+=1
def updMop(self):
row_flg=True
for index, row in self.mop[self.real_decision].iterrows():
for key, value in self.value.items():
if value != row[key]:
row_flg=False
break
if row_flg:
#self.slice_dict[j]=dict(row)
row['amount']=row['amount']+1
row_flg=True
# In[6]:
test_dic_value_1={'lupa':1 }
test_dic_value_2={'lupa':1 , 'pupa':1}
test_dic_value_3={'lupa':1 , 'pupa':1 , 'zalupa':1 , 'zapupa':1 }
test_dic_value_4={'lupa':1 , 'pupa':1 , 'zalupa':1 , 'zapupa':-1 }
# In[7]:
test=Voter('huita')
test.addValue(test_dic_value_2)
test.decision
test.getDecision()
# In[8]:
test.setDecision('down')
test.getDecision()
# In[9]:
test.slice_dict
# In[ ]:
# In[10]:
import pickle
# In[11]:
dictionary_data = {"a": 1, "b": 2}
a_file = open("data.pkl", "wb")
pickle.dump(dictionary_data, a_file)
a_file.close()
a_file = open("data.pkl", "rb")
output = pickle.load(a_file)
print(output)
a_file.close()
# In[ ]:
# In[ ]:
# In[12]:
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
tuples
# In[13]:
index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
# In[14]:
s = pd.DataFrame(np.random.randn(8), index=index)
s
# In[15]:
s.to_dict()
# In[16]:
s.loc(('bar', 'one'))
# In[18]:
iterables = [["up", "down", "none"], ["up", "down", "none"]]
df = pd.DataFrame({'col1': np.random.randn(9),'col2': np.random.randn(9)}, index=pd.MultiIndex.from_product(iterables, names=["first", "second"]))
df
# In[19]:
df.__dict__
# In[ ]:
# In[ ]:
# In[20]:
def createDF(namesIndex, namesColoms):
trandeValuesList = ['up','none','down']
colomsName_lvl = ['trande','amaunt','probability']
#micolumns = pd.MultiIndex.from_tuples(
#[('amaunt', 'up'), ('amaunt', 'none'), ('amaunt', 'down'), ('trande',),('probability',)], names=["lvl0", "lvl1"]
#)
df = pd.DataFrame({
'trande': [None]*pow(3,len(namesIndex)),
'amaunt': [None]*pow(3,len(namesIndex)),
'probability': [None]*pow(3,len(namesIndex))
},
index=pd.MultiIndex.from_product([trandeValuesList]*len(namesIndex), names=namesIndex)
,columns=namesColoms
)
return(df)
# In[21]:
dd=createDF( ['1','2','3'],['trande','amaunt','probability'] )
dd
# In[22]:
df.xs(('up','down'), level=['first','second'])
# In[23]:
dd['trande']
# In[24]:
tvl = ['up','none','down']
colomsName_lvl = ['trande','amaunt','probability']
# In[25]:
tuplesCol = list(zip(['amaunt']*3,tvl))
tuplesCol
# In[26]:
df.loc['up','down']
# In[27]:
df.xs(('up','down'), level=['first','second']).iloc[0]
# In[28]:
df_d=df.to_dict('tight')
df_d
# In[29]:
df_d['index_names']
# In[30]:
ddf = pd.DataFrame.from_dict(df_d, orient='tight')
ddf
# In[31]:
tuple([1,2,3])
# In[32]:
ddf.xs(('up','down'), level=['first','second']).iloc[0]
# In[ ]:

View File

@@ -0,0 +1,221 @@
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import datetime
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import random
import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import plotly
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode, iplot
from plotly.subplots import make_subplots
init_notebook_mode()
#import CoreTraidMath
import plotly.express as px
# In[ ]:
# In[2]:
class agrigateFig():
def __init__(self,data=[],needDraw=False ,subplot_titles=None):
self.data=data
self.ans=self.getAgrPlt()
if needDraw:
self.subplot_titles=subplot_titles
self.fig=coreDraw(self.ans,True,self.subplot_titles)
def getAgrPlt(self):
count=0
ans=[]
for i in self.data:
count=count+1
if type(i)==list:
for g in i:
for j in g.figDict:
ans.append(j)
ans[-1]['row']=count
else:
for j in i.figDict:
ans.append(j)
ans[-1]['row']=count
return ans
# In[3]:
class corePlt():
def __init__(self, params={
'vtype':'',
'df':pd.DataFrame(),
'row':1,
'col':1,
'name':''
}):
self.vtype=params['vtype']
self.df=params['df']
self.row=params['row']
self.col=params['col']
self.name=params['name']
if 'colorType' in params.keys():
self.colorType=params['colorType']
class coreDraw():
def __init__(self, data=[],needShow=False,subplot_titles={}):
self.data=self.getPlts(data)
self.needShow=needShow
self.subplot_titles=subplot_titles
self.ans=self.getAns()
def getBarColorList(self,l,colorType):
if colorType=='diffAbs':
ans=['green']
for i in range(1,len(l)):
if abs(l[i])>abs(l[i-1]):
ans.append('green')
else:
ans.append('red')
elif colorType=='diff':
ans=['green']
for i in range(1,len(l)):
if (l[i])>(l[i-1]):
ans.append('green')
else:
ans.append('red')
elif colorType=='normal':
ans=[]
for i in range(len(l)):
ans.append('gray')
return ans
def getPlts(self, data):
ans=None
if type(data)==list:
ans=[]
for i in data:
ans.append(corePlt(i))
else:
ans=[corePlt(data)]
return ans
def getAns(self):
'''
data list
vtype
df
row=1
col=1
name
'''
ans=None
maxRow=1
maxCol=1
for i in self.data:
if i.row > maxRow:
maxRow =i.row
if i.col > maxCol:
maxCol =i.col
fig = make_subplots(
rows=maxRow,
cols=maxCol,
shared_xaxes=True,
vertical_spacing=0.1,
shared_yaxes=True,
#horizontal_spacing=0.02,
#column_widths=[]
subplot_titles=self.subplot_titles
)
fig.update_layout(xaxis_rangeslider_visible=False)
fig.update_layout(barmode='relative')
for i in self.data:
if i.vtype=='Scatter':
fig.add_trace(go.Scatter(x=i.df['date'],y=i.df['value'],name=i.name), row=i.row, col=i.col)
elif i.vtype=='OCHL':
fig.add_trace(go.Candlestick(
x=i.df['date'],
open=i.df['open'],
high=i.df['high'],
low=i.df['low'],
close=i.df['close'],
name=i.name),
row=i.row, col=i.col
)
elif i.vtype=='Bars':
for j in i.df.keys():
if j!='date':
try:
colorType=i.colorType
except:
colorType='normal'
colors=self.getBarColorList(i.df[j],colorType)
fig.add_trace(go.Bar(x=i.df['date'], y=i.df[j],name=j,marker_color=colors),row=i.row, col=i.col)
ans=fig
if self.needShow:
plotly.offline.iplot(fig)
return ans
# In[ ]:

View File

@@ -0,0 +1,145 @@
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import datetime
import numpy as np
import uuid
# In[2]:
class DealManager():
def __init__(self):
self.commission=0.04
self.columns=['uuid','figi','amount','startPrice','profit']
self.deals = pd.DataFrame(columns=self.columns)
self.deals = self.deals.set_index('uuid')
def findDealByPriceAndFig(self,price,figi):
ans=None
for i in range(self.deals.shape[0]):
if self.deals.iloc[i].startPrice == price and self.deals.iloc[i].figi == figi:
ans = self.deals.iloc[i].name
break
return ans
def openDeal(self,figi,startPrice,amount=1):
desiredDeal=self.findDealByPriceAndFig(startPrice,figi)
if desiredDeal == None:
newDealDict={
'uuid':[str(uuid.uuid4())],
'figi':[figi],
'startPrice':[startPrice],
'amount':[amount]
}
#newDealDict['profit']=[startPrice*pow(1+self.commission,2)]
newDeal=pd.DataFrame.from_dict(newDealDict).set_index('uuid')
self.deals=pd.concat([self.deals, newDeal])
else:
self.deals.at[desiredDeal,'amount'] += amount
def closeDeal(self,uuid,amount):
desiredDeal=self.deals.loc[uuid]
if desiredDeal.amount - amount == 0:
self.deals = self.deals.drop(labels = [uuid],axis = 0)
else:
self.deals.at[uuid,'amount'] -= amount
#self.deals.loc[uuid].amount = desiredDeal.amount - amount
# In[3]:
t=DealManager()
t.__dict__
# In[ ]:
# In[4]:
t.deals.shape[0]
# In[5]:
t.openDeal('huigi',100,1)
t.openDeal('huigi',100,3)
t.openDeal('huigi1',100,3)
t.openDeal('huigi1',200,3)
# In[6]:
t.deals
# In[7]:
t.deals[t.deals.figi == 'huigi1']
# In[ ]:
# In[8]:
for i in range(t.deals.shape[0]):
print(t.deals.iloc[i])
# In[9]:
t.findDealByPriceAndFig
# In[10]:
t.closeDeal('78228979-3daf-470a-9c2a-8db180c8c3b0',1)
t.deals
# In[11]:
t.deals.iloc[0].name
# In[12]:
a=2
a==None
# In[ ]:

View File

@@ -0,0 +1,358 @@
#!/usr/bin/env python
# coding: utf-8
# In[4]:
import pandas as pd
import datetime
import numpy as np
import pickle
from signals import *
from dealManager import *
from trandeVoter import *
from riskManager import riskManager
# In[5]:
df_candle = pd.read_csv("../data/EURUSD_price_candlestick.csv")
df_candle.rename(columns={'timestamp': 'date'}, inplace=True)
df_candle
# In[6]:
class decsionManager():
def __init__(self,name):
self.name = name
self.RM = riskManager()
self.DM = DealManager()
self.TV = trandeVoter(name)
self.SA = signalAgrigator()
pass
#вытащенный из signalAgrigator метод теста для сигналов
def getSignalTest(self,data: pd.DataFrame(),reqSig: dict, batchSize=30, dataType='candel') -> dict:
self.SA.mode = 'retroFast'
t.SA.createSingnalInstances(
data = data,
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=30
)
ans = t.SA.getAns(data)
return ans
#метод для генерации матрицы вероятностей.
def generateMatrixProbability(self,
data: pd.DataFrame(),
reqSig: dict,
target: str,
batchSize=30,
#dataType='candel'
):
data=data.reset_index(drop=True)
t.SA.createSingnalInstances(
data = data,
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=batchSize
)
self.TV.createMatrixAmounts(reqSig.keys())
for i in range(data.shape[0]-batchSize-1):
sigAns=self.SA.getAns(data[i:i+batchSize])
rightAns=self.getRetroStepAns(data[target][i],data[target][i+1])
self.TV.setDecisionBySignals(self.KostilEbaniy(sigAns),rightAns)
self.TV.generateMatrixProbability()
#без коментариев блять
def KostilEbaniy(self,d):
ans={}
for i in d.keys():
if d[i] == 0:
ans[i] = 'none'
elif d[i] == 1:
ans[i] = 'up'
elif d[i] == -1:
ans[i] = 'down'
return ans
#тож понятная хуита
def getRetroStepAns(self, value1,value2):
if value1 == value2:
ans = 'none'
elif value1 < value2:
ans = 'up'
else:
ans = 'down'
return ans
#метод для онлай получения решения по сигналу
def getSignal(self,data: pd.DataFrame(),reqSig: dict, dataType='candel') -> dict:
data=data.reset_index(drop=True)
self.SA.mode = 'online'
t.SA.createSingnalInstances(
data = data,
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=30
)
ans = t.SA.getAns(data)
return ans
#Создание сигналов. Вызывать перед getOnlineAns
def crateSignals(self,data: pd.DataFrame(),reqSig: dict, dataType='candel'):
data=data.reset_index(drop=True)
self.SA.mode = 'online'
t.SA.createSingnalInstances(
data = data,
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=30
)
def getOnlineAns(self,data: pd.DataFrame(),price):
sigAns = self.SA.getAns(data)
prob = self.TV.getDecisionBySignals(sigAns)
ans = self.RM.getDecision(sigAns,prob,price)
return ans
# In[ ]:
# In[7]:
t= decsionManager('TEST')
# In[8]:
t.__dict__
# In[9]:
reqSig={
'BB1':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':15
},
'BB2':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':20
}
}
# In[10]:
reqSig.keys()
# In[11]:
t.SA.__dict__
# In[12]:
t.generateMatrixProbability(df_candle[:10000],reqSig,'close',40)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[13]:
mop = t.TV.matrixProbability
mop
# In[ ]:
# In[ ]:
# In[14]:
t.getSignal(df_candle[:10000],reqSig)
# In[15]:
t.getSignalTest(df_candle[:10000],reqSig,40)
# In[ ]:
# In[16]:
t.SA.createSingnalInstances(
data = df_candle[:10000],
dictAgrigSignal = reqSig,
dataType='candel',
batchSize=30
)
# In[ ]:
# In[ ]:
# In[ ]:
# In[17]:
reqSig={
'BB1':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':15
},
'BB2':{
'className':signal_BB,
'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5},
'signalParams':{'source':'close','target':'close'},
'batchSize':20
}
}
# In[18]:
t=decsionManager(reqSig)
# In[ ]:
# In[ ]:
import pickle
# In[ ]:
dictionary_data = {"a": 1, "b": 2}
a_file = open("data.pkl", "wb")
pickle.dump(reqSig, a_file)
a_file.close()
a_file = open("data.pkl", "rb")
output = pickle.load(a_file)
print(output)
a_file.close()
# In[ ]:

View File

@@ -0,0 +1,382 @@
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import os
import pandas as pd
import datetime
import numpy as np
from tqdm import tqdm
from indicators_v2 import *
from signals_v2 import *
from dealManager import *
from trandeVoter import *
from riskManager import *
import pickle
# In[6]:
df_candle = pd.read_csv("../data/EURUSD_price_candlestick.csv")
df_candle.rename(columns={'timestamp': 'date'}, inplace=True)
df_candle
# In[7]:
df_candle['close']
# In[8]:
class decsionManager:
'''
sigAgrReq = {
'sig_BB':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2.5}
}
}
},
'sig_BB_2':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2}
}
}
}
}
sigAgrData = {
'sig_BB':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
},
'sig_BB_2':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
}
}
sigAgrRetroTemplate = {
'sig_BB':{
'signalData': None,
'indicatorData' :{'ind_BB': None}
},
'sig_BB_2':{
'signalData': None,
'indicatorData' :{'ind_BB': None}
}
}
'''
def __init__(self,name, sigDict: dict):
self.RM = riskManager()
self.DM = DealManager()
self.TV = trandeVoter(name)
self.SA = signalsAgrigator(sigDict)
self.sigDict = sigDict
def getOnlineAns(self, signalsAns: dict, price: float) -> dict:
probabilityDecsion = self.TV.getDecisionBySignals(self.getSignalsAns(signalsAns))
RMD = self.RM.getDecision(probabilityDecision=probabilityDecsion, price=price, deals = self.DM.deals)
return RMD
def getSignalsAns(self, signalsDataDict: dict) -> dict:
return self.SA.getAns(signalsDataDict)
def getRightAns(self,value_1, value_2):
ans=''
if value_1 > value_2:
ans = 'down'
elif value_1 < value_2:
ans = 'up'
else:
ans = 'none'
return ans
def getRetroTrendAns(self, retroTemplateDict: dict, data: pd.DataFrame(), window: int) -> list:
reqSig={}
ans = {
'signalsAns':[],
'rightAns':[]
}
target = ''
for k in tqdm(range(data.shape[0]-window-1)):
for i in retroTemplateDict.keys():
reqSig[i] = {'signalData': data[k:k+window], 'indicatorData':{}}
target = self.SA.signals[i].params['target']
for j in retroTemplateDict[i]['indicatorData'].keys():
reqSig[i]['indicatorData'][j] = data[k:k+window]
sigAns = self.getSignalsAns(reqSig)
rightAns = self.getRightAns(data[target][k], data[target][k+1])
ans['signalsAns'].append(sigAns)
ans['rightAns'].append(rightAns)
return ans
def generateMatrixProbabilityFromDict(self, dictSignals: dict) -> dict:
self.TV.createMatrixAmounts(dictSignals['signalsAns'][0].keys())
for i in range(len(dictSignals['signalsAns'])):
self.TV.setDecisionBySignals(signalDecisions = dictSignals['signalsAns'][i],
trande = dictSignals['rightAns'][i])
self.TV.generateMatrixProbability()
def createDump(self,postfix='') -> str:
dataDict = {
'RM':self.RM,
'DM':self.DM,
'TV':self.TV,
'SA':self.SA,
'sigDict':self.sigDict
}
fileName='data_'+postfix+'.pickle'
with open(fileName, 'wb') as f:
pickle.dump(dataDict, f)
return os.path.abspath(fileName)
def loadDump(self,path: str) -> None:
with open(path, 'rb') as f:
dataDict = pickle.load(f)
self.RM = dataDict['RM']
self.DM = dataDict['DM']
self.TV = dataDict['TV']
self.SA = dataDict['SA']
self.sigDict = dataDict['sigDict']
# In[9]:
sigAgrReq = {
'sig_BB':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2.5}
}
}
},
'sig_BB_2':{
'className':sig_BB,
'params':{'source':'close','target':'close'},
'indicators':{
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2}
}
}
}
}
sigAgrData = {
'sig_BB':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
},
'sig_BB_2':{
'signalData': df_candle[990:1000],
'indicatorData' :{'ind_BB': df_candle[:1000]}
}
}
sigAgrRetroTemplate = {
'sig_BB':{
'signalData': None,
'indicatorData' :{'ind_BB': None}
},
'sig_BB_2':{
'signalData': None,
'indicatorData' :{'ind_BB': None}
}
}
# In[10]:
test = decsionManager('Pipa', sigAgrReq)
# In[11]:
test.__dict__
# In[12]:
test.TV.__dict__
# In[13]:
test.SA.signals['sig_BB'].params['target']
# In[14]:
test.getSignalsAns(sigAgrData)
# In[15]:
#test.loadDump('C:\\Users\\Redsandy\\PyProj\\Trade\\MVP\\data_pupa.pickle')
# In[16]:
uuu = test.getRetroTrendAns(sigAgrRetroTemplate,df_candle[:5000],40)
uuu
# In[17]:
test.generateMatrixProbabilityFromDict(uuu)
# In[18]:
test.TV.__dict__
# In[19]:
test.getOnlineAns(sigAgrData, 0.0)
# In[20]:
(test.DM.deals).shape
# In[21]:
test.createDump('pupa')
# In[ ]:
# In[22]:
with open('C:\\Users\\Redsandy\\PyProj\\Trade\\MVP\\data_pupa.pickle', 'rb') as f:
data_new = pickle.load(f)
data_new
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:

View File

@@ -0,0 +1,194 @@
#!/usr/bin/env python
# coding: utf-8
# In[8]:
import pandas as pd
import datetime
import numpy as np
import CoreTraidMath
# In[9]:
df_candle = pd.read_csv(r"../data/EURUSD_price_candlestick.csv")
df_candle.rename(columns={'timestamp': 'date'}, inplace=True)
df_candle
# In[10]:
class coreIndicator():
def __init__(self,options: dict, dataType: str = None, predictType: str = None, name: str = None):
self.options = options
self.dataType = dataType #ochl
self.predictType = predictType #trend
def getAns(self, data: pd.DataFrame() ):
return "ERROR"
# In[11]:
class ind_BB(coreIndicator):
"""
options
MeanType -> SMA
window -> int
valueType -> str: low, high, open, close
kDev -> float
"""
def __init__(self,options: dict,name = None):
super().__init__(
options = options,
dataType = 'ochl',
predictType = 'trend',
name = name
)
def getAns(self, data: pd.DataFrame()):
data=data.reset_index(drop=True)
ans={}
opMA={'dataType':'ohcl',
'action':'findMean',
'actionOptions':{
'MeanType':self.options['MeanType'],
'valueType':self.options['valueType'],
'window':self.options['window']
}
}
ans['BB']=CoreTraidMath.CoreMath(data,opMA).ans
opSTD={'dataType':'ohcl',
'action':'findSTD',
'actionOptions':{'valueType':self.options['valueType'],'window':self.options['window']}
}
ans['STD']=CoreTraidMath.CoreMath(data,opSTD).ans
ans['pSTD']=ans['BB']+ans['STD']*self.options['kDev']
ans['mSTD']=ans['BB']-ans['STD']*self.options['kDev']
ans['x']=np.array(data['date'][self.options['window']-1:].to_list())
self.ans= ans
return ans
# In[12]:
class indicatorsAgrigator:
def __init__ (self,indDict={}):
self.indDict = indDict
self.indInst = {}
self.ans={}
self.createIndicatorsInstance()
def createIndicatorsInstance(self):
for i in self.indDict.keys():
self.indInst[i]=self.indDict[i]['className'](self.indDict[i]['params'])
def getAns(self,dataDict={}):
ans={}
for i in dataDict.keys():
ans[i] = self.indInst[i].getAns(dataDict[i])
return ans
# In[13]:
indicators = {
'ind_BB':{
'className':ind_BB,
'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}
}
}
dataDic={
'ind_BB':df_candle[:1000]
}
# In[ ]:
# In[14]:
ia= indicatorsAgrigator(indicators)
# In[15]:
ia.__dict__
# In[16]:
ia.indInst['ind_BB'].__dict__
# In[17]:
ia.getAns(dataDict=dataDic)
# In[ ]:
# In[ ]:
# In[18]:
op = {'MeanType':'SMA','window':5,'valueType':'low','kDev':2}
# In[19]:
t = ind_BB(op)
# In[20]:
t.getAns(df_candle[:100])
# In[21]:
t.__dict__
# In[ ]:
# In[ ]:

291
notebooks/coreDraw.ipynb Normal file

File diff suppressed because one or more lines are too long

467
notebooks/dealManager.ipynb Normal file
View File

@@ -0,0 +1,467 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "0eb4ab92",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import datetime\n",
"import numpy as np\n",
"import uuid "
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4ed42153",
"metadata": {},
"outputs": [],
"source": [
"class DealManager():\n",
" \n",
" def __init__(self):\n",
" self.commission=0.04\n",
" self.columns=['uuid','figi','amount','startPrice','profit']\n",
" self.deals = pd.DataFrame(columns=self.columns)\n",
" self.deals = self.deals.set_index('uuid')\n",
" \n",
" def findDealByPriceAndFig(self,price,figi):\n",
" ans=None\n",
" for i in range(self.deals.shape[0]):\n",
" if self.deals.iloc[i].startPrice == price and self.deals.iloc[i].figi == figi:\n",
" ans = self.deals.iloc[i].name\n",
" break\n",
" return ans\n",
"\n",
" def openDeal(self,figi,startPrice,amount=1):\n",
" desiredDeal=self.findDealByPriceAndFig(startPrice,figi)\n",
" if desiredDeal == None:\n",
" newDealDict={\n",
" 'uuid':[str(uuid.uuid4())],\n",
" 'figi':[figi],\n",
" 'startPrice':[startPrice],\n",
" 'amount':[amount]\n",
" }\n",
"\n",
" #newDealDict['profit']=[startPrice*pow(1+self.commission,2)]\n",
"\n",
"\n",
"\n",
" newDeal=pd.DataFrame.from_dict(newDealDict).set_index('uuid')\n",
" self.deals=pd.concat([self.deals, newDeal])\n",
" else:\n",
" self.deals.at[desiredDeal,'amount'] += amount\n",
"\n",
" def closeDeal(self,uuid,amount):\n",
" \n",
" desiredDeal=self.deals.loc[uuid]\n",
" if desiredDeal.amount - amount == 0:\n",
" self.deals = self.deals.drop(labels = [uuid],axis = 0)\n",
" else:\n",
" self.deals.at[uuid,'amount'] -= amount\n",
" #self.deals.loc[uuid].amount = desiredDeal.amount - amount\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "622b8115",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'commission': 0.04,\n",
" 'columns': ['uuid', 'figi', 'amount', 'startPrice', 'profit'],\n",
" 'deals': Empty DataFrame\n",
" Columns: [figi, amount, startPrice, profit]\n",
" Index: []}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"t=DealManager()\n",
"t.__dict__"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e3509306",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 4,
"id": "4fae838f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"t.deals.shape[0]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "68ec95c6",
"metadata": {},
"outputs": [],
"source": [
"t.openDeal('huigi',100,1)\n",
"t.openDeal('huigi',100,3)\n",
"t.openDeal('huigi1',100,3)\n",
"t.openDeal('huigi1',200,3)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "cc5f9cb2",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>figi</th>\n",
" <th>amount</th>\n",
" <th>startPrice</th>\n",
" <th>profit</th>\n",
" </tr>\n",
" <tr>\n",
" <th>uuid</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>a38e6fa8-c160-481c-b666-78ec9fade50b</th>\n",
" <td>huigi</td>\n",
" <td>4</td>\n",
" <td>100</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>482abf6b-7a4f-4e49-aea6-1b5940277c24</th>\n",
" <td>huigi1</td>\n",
" <td>3</td>\n",
" <td>100</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6e940b6a-30e1-435c-b214-fc952aba02dc</th>\n",
" <td>huigi1</td>\n",
" <td>3</td>\n",
" <td>200</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" figi amount startPrice profit\n",
"uuid \n",
"a38e6fa8-c160-481c-b666-78ec9fade50b huigi 4 100 NaN\n",
"482abf6b-7a4f-4e49-aea6-1b5940277c24 huigi1 3 100 NaN\n",
"6e940b6a-30e1-435c-b214-fc952aba02dc huigi1 3 200 NaN"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"t.deals"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "eed2748a",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>figi</th>\n",
" <th>amount</th>\n",
" <th>startPrice</th>\n",
" <th>profit</th>\n",
" </tr>\n",
" <tr>\n",
" <th>uuid</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>482abf6b-7a4f-4e49-aea6-1b5940277c24</th>\n",
" <td>huigi1</td>\n",
" <td>3</td>\n",
" <td>100</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6e940b6a-30e1-435c-b214-fc952aba02dc</th>\n",
" <td>huigi1</td>\n",
" <td>3</td>\n",
" <td>200</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" figi amount startPrice profit\n",
"uuid \n",
"482abf6b-7a4f-4e49-aea6-1b5940277c24 huigi1 3 100 NaN\n",
"6e940b6a-30e1-435c-b214-fc952aba02dc huigi1 3 200 NaN"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"t.deals[t.deals.figi == 'huigi1']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d7a4a820",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 8,
"id": "0cac1f5c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"figi huigi\n",
"amount 4\n",
"startPrice 100\n",
"profit NaN\n",
"Name: a38e6fa8-c160-481c-b666-78ec9fade50b, dtype: object\n",
"figi huigi1\n",
"amount 3\n",
"startPrice 100\n",
"profit NaN\n",
"Name: 482abf6b-7a4f-4e49-aea6-1b5940277c24, dtype: object\n",
"figi huigi1\n",
"amount 3\n",
"startPrice 200\n",
"profit NaN\n",
"Name: 6e940b6a-30e1-435c-b214-fc952aba02dc, dtype: object\n"
]
}
],
"source": [
"for i in range(t.deals.shape[0]):\n",
" print(t.deals.iloc[i])"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "8906d273",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<bound method DealManager.findDealByPriceAndFig of <__main__.DealManager object at 0x7f05b4351590>>"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"t.findDealByPriceAndFig"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "896d4dac",
"metadata": {},
"outputs": [
{
"ename": "KeyError",
"evalue": "'78228979-3daf-470a-9c2a-8db180c8c3b0'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
"File \u001b[0;32m~/projects/marketTrade/.venv/lib/python3.11/site-packages/pandas/core/indexes/base.py:3791\u001b[0m, in \u001b[0;36mIndex.get_loc\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 3790\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 3791\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_loc\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcasted_key\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3792\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m err:\n",
"File \u001b[0;32mindex.pyx:152\u001b[0m, in \u001b[0;36mpandas._libs.index.IndexEngine.get_loc\u001b[0;34m()\u001b[0m\n",
"File \u001b[0;32mindex.pyx:181\u001b[0m, in \u001b[0;36mpandas._libs.index.IndexEngine.get_loc\u001b[0;34m()\u001b[0m\n",
"File \u001b[0;32mpandas/_libs/hashtable_class_helper.pxi:7080\u001b[0m, in \u001b[0;36mpandas._libs.hashtable.PyObjectHashTable.get_item\u001b[0;34m()\u001b[0m\n",
"File \u001b[0;32mpandas/_libs/hashtable_class_helper.pxi:7088\u001b[0m, in \u001b[0;36mpandas._libs.hashtable.PyObjectHashTable.get_item\u001b[0;34m()\u001b[0m\n",
"\u001b[0;31mKeyError\u001b[0m: '78228979-3daf-470a-9c2a-8db180c8c3b0'",
"\nThe above exception was the direct cause of the following exception:\n",
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[10], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mt\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcloseDeal\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m78228979-3daf-470a-9c2a-8db180c8c3b0\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2\u001b[0m t\u001b[38;5;241m.\u001b[39mdeals\n",
"Cell \u001b[0;32mIn[2], line 38\u001b[0m, in \u001b[0;36mDealManager.closeDeal\u001b[0;34m(self, uuid, amount)\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcloseDeal\u001b[39m(\u001b[38;5;28mself\u001b[39m,uuid,amount):\n\u001b[0;32m---> 38\u001b[0m desiredDeal\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdeals\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloc\u001b[49m\u001b[43m[\u001b[49m\u001b[43muuid\u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 39\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m desiredDeal\u001b[38;5;241m.\u001b[39mamount \u001b[38;5;241m-\u001b[39m amount \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 40\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdeals \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdeals\u001b[38;5;241m.\u001b[39mdrop(labels \u001b[38;5;241m=\u001b[39m [uuid],axis \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m)\n",
"File \u001b[0;32m~/projects/marketTrade/.venv/lib/python3.11/site-packages/pandas/core/indexing.py:1153\u001b[0m, in \u001b[0;36m_LocationIndexer.__getitem__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 1150\u001b[0m axis \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39maxis \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 1152\u001b[0m maybe_callable \u001b[38;5;241m=\u001b[39m com\u001b[38;5;241m.\u001b[39mapply_if_callable(key, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mobj)\n\u001b[0;32m-> 1153\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_getitem_axis\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmaybe_callable\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maxis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maxis\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/projects/marketTrade/.venv/lib/python3.11/site-packages/pandas/core/indexing.py:1393\u001b[0m, in \u001b[0;36m_LocIndexer._getitem_axis\u001b[0;34m(self, key, axis)\u001b[0m\n\u001b[1;32m 1391\u001b[0m \u001b[38;5;66;03m# fall thru to straight lookup\u001b[39;00m\n\u001b[1;32m 1392\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_validate_key(key, axis)\n\u001b[0;32m-> 1393\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_label\u001b[49m\u001b[43m(\u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maxis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maxis\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/projects/marketTrade/.venv/lib/python3.11/site-packages/pandas/core/indexing.py:1343\u001b[0m, in \u001b[0;36m_LocIndexer._get_label\u001b[0;34m(self, label, axis)\u001b[0m\n\u001b[1;32m 1341\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_get_label\u001b[39m(\u001b[38;5;28mself\u001b[39m, label, axis: AxisInt):\n\u001b[1;32m 1342\u001b[0m \u001b[38;5;66;03m# GH#5567 this will fail if the label is not present in the axis.\u001b[39;00m\n\u001b[0;32m-> 1343\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mobj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mxs\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maxis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maxis\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/projects/marketTrade/.venv/lib/python3.11/site-packages/pandas/core/generic.py:4236\u001b[0m, in \u001b[0;36mNDFrame.xs\u001b[0;34m(self, key, axis, level, drop_level)\u001b[0m\n\u001b[1;32m 4234\u001b[0m new_index \u001b[38;5;241m=\u001b[39m index[loc]\n\u001b[1;32m 4235\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 4236\u001b[0m loc \u001b[38;5;241m=\u001b[39m \u001b[43mindex\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_loc\u001b[49m\u001b[43m(\u001b[49m\u001b[43mkey\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4238\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(loc, np\u001b[38;5;241m.\u001b[39mndarray):\n\u001b[1;32m 4239\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m loc\u001b[38;5;241m.\u001b[39mdtype \u001b[38;5;241m==\u001b[39m np\u001b[38;5;241m.\u001b[39mbool_:\n",
"File \u001b[0;32m~/projects/marketTrade/.venv/lib/python3.11/site-packages/pandas/core/indexes/base.py:3798\u001b[0m, in \u001b[0;36mIndex.get_loc\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 3793\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(casted_key, \u001b[38;5;28mslice\u001b[39m) \u001b[38;5;129;01mor\u001b[39;00m (\n\u001b[1;32m 3794\u001b[0m \u001b[38;5;28misinstance\u001b[39m(casted_key, abc\u001b[38;5;241m.\u001b[39mIterable)\n\u001b[1;32m 3795\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28many\u001b[39m(\u001b[38;5;28misinstance\u001b[39m(x, \u001b[38;5;28mslice\u001b[39m) \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m casted_key)\n\u001b[1;32m 3796\u001b[0m ):\n\u001b[1;32m 3797\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m InvalidIndexError(key)\n\u001b[0;32m-> 3798\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m(key) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01merr\u001b[39;00m\n\u001b[1;32m 3799\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m:\n\u001b[1;32m 3800\u001b[0m \u001b[38;5;66;03m# If we have a listlike key, _check_indexing_error will raise\u001b[39;00m\n\u001b[1;32m 3801\u001b[0m \u001b[38;5;66;03m# InvalidIndexError. Otherwise we fall through and re-raise\u001b[39;00m\n\u001b[1;32m 3802\u001b[0m \u001b[38;5;66;03m# the TypeError.\u001b[39;00m\n\u001b[1;32m 3803\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_check_indexing_error(key)\n",
"\u001b[0;31mKeyError\u001b[0m: '78228979-3daf-470a-9c2a-8db180c8c3b0'"
]
}
],
"source": [
"t.closeDeal('78228979-3daf-470a-9c2a-8db180c8c3b0',1)\n",
"t.deals"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "8fbd7460",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'a38e6fa8-c160-481c-b666-78ec9fade50b'"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"t.deals.iloc[0].name"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "83549b44",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a=2\n",
"a==None\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2d794cff",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

26
poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "anyio"
@@ -1099,6 +1099,7 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [
{file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"},
{file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"},
]
[[package]]
@@ -1598,6 +1599,16 @@ files = [
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
{file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
{file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
@@ -1754,13 +1765,13 @@ test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=
[[package]]
name = "nbconvert"
version = "7.12.0"
description = "Converting Jupyter Notebooks"
version = "7.16.2"
description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)."
optional = false
python-versions = ">=3.8"
files = [
{file = "nbconvert-7.12.0-py3-none-any.whl", hash = "sha256:5b6c848194d270cc55fb691169202620d7b52a12fec259508d142ecbe4219310"},
{file = "nbconvert-7.12.0.tar.gz", hash = "sha256:b1564bd89f69a74cd6398b0362da94db07aafb991b7857216a766204a71612c0"},
{file = "nbconvert-7.16.2-py3-none-any.whl", hash = "sha256:0c01c23981a8de0220255706822c40b751438e32467d6a686e26be08ba784382"},
{file = "nbconvert-7.16.2.tar.gz", hash = "sha256:8310edd41e1c43947e4ecf16614c61469ebc024898eb808cce0999860fc9fb16"},
]
[package.dependencies]
@@ -1787,7 +1798,7 @@ docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sp
qtpdf = ["nbconvert[qtpng]"]
qtpng = ["pyqtwebengine (>=5.15)"]
serve = ["tornado (>=6.1)"]
test = ["flaky", "ipykernel", "ipywidgets (>=7)", "pytest"]
test = ["flaky", "ipykernel", "ipywidgets (>=7.5)", "pytest"]
webpdf = ["playwright"]
[[package]]
@@ -2483,6 +2494,7 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@@ -3386,4 +3398,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.13"
content-hash = "29bb47a642b8c954617e72b3b407196c5cc1e13c7eb483c712538b08c3d4b8e7"
content-hash = "8e185f19d0891fa375ca3e5d878742e677c332f20e5969c6e058af9ba5c41e2e"

View File

@@ -21,6 +21,7 @@ black = "^23.12.0"
tinkoff-grpc = {git = "git@github.com:strategy155/tinkoff_grpc.git", branch="master"}
python-dotenv = "^1.0.0"
jupyterlab = "^4.0.9"
nbconvert = "^7.16.2"
[tool.poetry.dev-dependencies]