Compare commits

...

17 Commits

Author SHA1 Message Date
Steve Nyemba ea30cd1c00 bug fix: attempt 2023-08-09 15:23:33 -05:00
Steve Nyemba d06441ace8 bug fix ... 2023-08-09 11:36:30 -05:00
Steve Nyemba 5932513666 bug fix: format, should be handled on post processing 2023-08-09 10:04:26 -05:00
Steve Nyemba ca09ea0202 Merge branch 'dev' 2023-08-08 11:09:10 -05:00
Steve Nyemba 2ad56dcff6 bug fixes 2023-08-08 11:08:18 -05:00
Steve Nyemba 523f7364f1 bug fix with logs 2023-08-08 09:48:22 -05:00
Steve Nyemba 8e997675f8 bug fix with data-frame 2023-08-08 09:30:28 -05:00
Steve L. Nyemba dd58a92eb5 Merge pull request 'merge developement changes into main branch' (#5) from dev into master
Reviewed-on: #5
2023-08-08 14:26:10 +00:00
Steve Nyemba a0a2d8ae56 bug fix: version update 2023-07-28 12:40:51 -05:00
Steve Nyemba d8035a6705 bugfix: version import 2023-07-27 10:36:55 -05:00
Steve Nyemba c865e59ff4 Merge branch 'dev' 2023-07-25 14:42:50 -05:00
Steve Nyemba c1c17bc59d bug fix: random shuffle 2023-07-25 14:40:45 -05:00
Steve Nyemba b9596edd8e bug fix: random shuffle improvements 2023-07-25 14:30:20 -05:00
Steve Nyemba ef43f20e9c matrix processing: special case 2023-06-07 11:59:19 -05:00
Steve Nyemba 78a650b29b bug fixes ... 2023-04-25 15:07:35 -05:00
Steve Nyemba e5af702ddb bug fixes: stages, other training/generation 2023-04-24 16:37:08 -05:00
Steve Nyemba f1e2fe3699 bug fixes: stages, other training/generation 2023-04-24 16:36:25 -05:00
7 changed files with 219 additions and 121 deletions

View File

@ -103,6 +103,7 @@ class GNet :
CHECKPOINT_SKIPS = int(args['checkpoint_skips']) if 'checkpoint_skips' in args else int(self.MAX_EPOCHS/10)
CHECKPOINT_SKIPS = 1 if CHECKPOINT_SKIPS < 1 else CHECKPOINT_SKIPS
# if self.MAX_EPOCHS < 2*CHECKPOINT_SKIPS :
# CHECKPOINT_SKIPS = 2
# self.CHECKPOINTS = [1,self.MAX_EPOCHS] + np.repeat( np.divide(self.MAX_EPOCHS,CHECKPOINT_SKIPS),CHECKPOINT_SKIPS ).cumsum().astype(int).tolist()
@ -287,8 +288,17 @@ class Generator (GNet):
"""
def __init__(self,**args):
if 'trainer' not in args :
GNet.__init__(self,**args)
self.discriminator = Discriminator(**args)
else:
_args = {}
_trainer = args['trainer']
for key in vars(_trainer) :
value = getattr(_trainer,key)
setattr(self,key,value)
_args[key] = value
self.discriminator = Discriminator(**_args)
def loss(self,**args):
fake = args['fake']
label = args['label']
@ -657,7 +667,9 @@ class Predict(GNet):
fake = self.generator.network(inputs=z, label=label)
init = tf.compat.v1.global_variables_initializer()
saver = tf.compat.v1.train.Saver()
print ([self.CHECKPOINTS])
# saver = tf.compat.v1.train.Saver()
saver = tf.compat.v1.train.Saver(max_to_keep=len(self.CHECKPOINTS))
df = pd.DataFrame()
CANDIDATE_COUNT = args['candidates'] if 'candidates' in args else 1 #0 if self.ROW_COUNT < 1000 else 100
candidates = []

View File

@ -22,7 +22,7 @@ import nujson as json
from multiprocessing import Process, RLock
from datetime import datetime, timedelta
from multiprocessing import Queue
from data.maker.version import __version__
import time
@ -33,6 +33,7 @@ class Learner(Process):
super(Learner, self).__init__()
self._arch = {'init':_args}
self.ndx = 0
self._queue = Queue()
self.lock = RLock()
@ -44,6 +45,8 @@ class Learner(Process):
self.gpu = None
self.info = _args['info']
if 'context' not in self.info :
self.info['context'] = self.info['from']
self.columns = self.info['columns'] if 'columns' in self.info else None
self.store = _args['store']
@ -97,9 +100,12 @@ class Learner(Process):
# __info = (pd.DataFrame(self._states)[['name','path','args']]).to_dict(orient='records')
if self._states :
__info = {}
# print (self._states)
for key in self._states :
__info[key] = [{"name":_item['name'],"args":_item['args'],"path":_item['path']} for _item in self._states[key]]
_pipeline = self._states[key]
# __info[key] = ([{'name':_payload['name']} for _payload in _pipeline])
__info[key] = [{"name":_item['name'],"args":_item['args'],"path":_item['path']} for _item in self._states[key] if _item ]
self.log(object='state-space',action='load',input=__info)
@ -173,19 +179,23 @@ class Learner(Process):
for name in columns :
#
# randomly sampling 5 elements to make sense of data-types
if self._df[name].size < 5 :
continue
_index = np.random.choice(np.arange(self._df[name].size),5,False)
no_value = [type(value) in [int,float,np.int64,np.int32,np.float32,np.float64] for value in self._df[name].values[_index]]
_index = np.random.choice(np.arange(self._df[name].shape[0]),5,False)
no_value = [type(value) in [int,float,np.int64,np.int32,np.float32,np.float64] for value in self._df[name].values[_index] if value is not None]
no_value = 0 if np.sum(no_value) > 0 else ''
try:
self._df[name] = self._df[name].fillna(no_value)
except Exception as e:
print (['.... skipping ',name,no_value])
finally:
pass
_log[name] = self._df[name].dtypes.name
_log = {'action':'structure','input':_log}
self.log(**_log)
# _log[name] = self._df[name].dtypes.name
# _log[name] = reader.meta()
# _log = {'action':'structure','input':_log}
# self.log(**_log)
#
# convert the data to binary here ...
_schema = self.get_schema()
@ -273,15 +283,20 @@ class Trainer(Learner):
_args['network_args']['max_epochs'] = _epochs[0]['epochs']
self.log(action='autopilot',input={'epoch':_epochs[0]})
g = Generator(**_args)
# g.run()
end = datetime.now() #.strftime('%Y-%m-%d %H:%M:%S')
_min = float((end-beg).seconds/ 60)
_logs = {'action':'train','input':{'start':beg.strftime('%Y-%m-%d %H:%M:%S'),'minutes':_min,"unique_counts":self._encoder._io[0]}}
self.log(**_logs)
self._g = g
if self.autopilot :
# g = Generator(**_args)
g = Generator(**self._arch['init'])
self._g = g
self._g.run()
#
#@TODO Find a way to have the data in the object ....
@ -300,10 +315,15 @@ class Generator (Learner):
#
# We need to load the mapping information for the space we are working with ...
#
self.network_args['candidates'] = int(_args['candidates']) if 'candidates' in _args else 1
filename = os.sep.join([self.network_args['logs'],'output',self.network_args['context'],'map.json'])
# filename = os.sep.join([self.network_args['logs'],'output',self.network_args['context'],'map.json'])
_suffix = self.network_args['context']
filename = os.sep.join([self.network_args['logs'],'output',self.network_args['context'],'meta-',_suffix,'.json'])
self.log(**{'action':'init-map','input':{'filename':filename,'exists':os.path.exists(filename)}})
if os.path.exists(filename):
file = open(filename)
self._map = json.loads(file.read())
file.close()
@ -412,66 +432,51 @@ class Generator (Learner):
return _date.strftime(FORMAT)
pass
def format(self,_df,_schema):
r = {}
def _format(self,_df,_schema):
"""
:_df data-frame being processed
:_schema table schema with types
"""
_columns = [_item['name'] for _item in _schema]
_map = {'INT64':np.int64,'FLOAT64':np.float64,'DATE':np.datetime64,'TIMESTAMP':(lambda field: pd.to_datetime(field).dt.tz_localize(None))}
# pd.to_datetime(_df.measurement_datetime).dt.tz_localize(None)
for _item in _schema :
name = _item['name']
if _item['type'].upper() in ['DATE','DATETIME','TIMESTAMP'] :
FORMAT = '%Y-%m-%d'
_name = _item['name']
if _item['type'] not in _map :
continue
_pointer = _map[_item['type']]
try:
if type(_pointer).__name__ == 'type':
if _item['type'] in ['INT64','FLOAT64'] :
novalue = np.int64(0) if _item['type'] == 'INT64' else np.float64(0)
elif _item['type'] == 'STRING' :
novalue = ''
if _item['type'] in ['INT64','FLOAT64','STRING'] :
_df[_name] = _df[_name].fillna(novalue)
#
#-- Sometimes data isn't all it's meant to be
SIZE = -1
if 'format' in self.info and name in self.info['format'] :
FORMAT = self.info['format'][name]
SIZE = 10
elif _item['type'] in ['DATETIME','TIMESTAMP'] :
FORMAT = '%Y-%m-%-d %H:%M:%S'
SIZE = 19
if SIZE > 0 :
values = pd.to_datetime(_df[name], format=FORMAT).astype(np.datetime64)
# _df[name] = [_date[:SIZE].strip() for _date in values]
# This is not guaranteed to work but may help ...
_df[_name] = _df[_name].values.astype(_pointer)
# _df[name] = _df[name].astype(str)
r[name] = FORMAT
# _df[name] = pd.to_datetime(_df[name], format=FORMAT) #.astype('datetime64[ns]')
if _item['type'] in ['DATETIME','TIMESTAMP']:
pass #;_df[name] = _df[name].fillna('').astype('datetime64[ns]')
except Exception as e:
pass
finally:
pass
else:
#
# Because types are inferred on the basis of the sample being processed they can sometimes be wrong
# To help disambiguate we add the schema information
_type = None
if 'int' in _df[name].dtypes.name or 'int' in _item['type'].lower():
_type = np.int
elif 'float' in _df[name].dtypes.name or 'float' in _item['type'].lower():
_type = np.float
if _type :
_df[name] = _df[name].fillna(0).replace(' ',0).replace('',0).replace('NA',0).replace('nan',0).astype(_type)
# else:
# _df[name] = _df[name].astype(str)
# _df = _df.replace('NaT','').replace('NA','')
if r :
self.log(**{'action':'format','input':r})
return _df
_df[_name] = _pointer(_df[_name])
pass
except Exception as e:
pass
# bqw = transport.factory.instance(**_store['target'])
# bqw.write(_df,schema=_schema)
return _df[_columns]
def post(self,_candidates):
if 'target' in self.store :
@ -483,42 +488,58 @@ class Generator (Learner):
else:
_store = None
N = 0
_haslist = np.sum([type(_item)==list for _item in self.columns]) > 0
_schema = self.get_schema()
for _iodf in _candidates :
_df = self._df.copy()
if self.columns and _haslist is False:
_df[self.columns] = _iodf[self.columns]
else:
#
# In here we have the case of all attributes have undergone random permutations
#
_df = _iodf
N += _df.shape[0]
if self._states and 'post' in self._states:
_df = State.apply(_df,self._states['post'])
# #
# #@TODO:
# # Improve formatting with better post-processing pipeline
# if 'approximate' in self.info :
# _df = self.approximate(_df)
# if 'make_date' in self.info :
# for name in self.info['make_date'] :
# # iname = self.info['make_date']['init_field']
# iname = self.info['make_date'][name]
# years = _df[iname]
# _dates = [self.make_date(year=_year,field=name) for _year in years]
# if _dates :
# _df[name] = _dates
#
# Let us format the data frame so as to be able to minimize write errors
#
if _schema :
_df = self._format(_df,_schema)
_schema = self.get_schema()
_df = self.format(_df,_schema)
_log = [{"name":_schema[i]['name'],"dataframe":_df[_df.columns[i]].dtypes.name,"schema":_schema[i]['type']} for i in np.arange(len(_schema)) ]
self.log(**{"action":"consolidate","input":_log})
# _df = self.format(_df,_schema)
# _log = [{"name":_schema[i]['name'],"dataframe":_df[_df.columns[i]].dtypes.name,"schema":_schema[i]['type']} for i in np.arange(len(_schema)) ]
self.log(**{"action":"consolidate","input":{"rows":N,"candidate":_candidates.index(_iodf)}})
if _store :
_log = {'action':'write','input':{'table':self.info['from'],'schema':[],'rows':_df.shape[0]}}
writer = transport.factory.instance(**_store)
if _store['provider'] == 'bigquery':
writer.write(_df,schema=[],table=self.info['from'])
if _store['provider'] == 'bigquery' and _schema:
try:
_log['schema'] = _schema
writer.write(_df,schema=_schema)
except Exception as e:
print (e)
writer.write(_df)
else:
writer.write(_df,table=self.info['from'])
writer.write(_df)
self.log(**_log)
else:
self.cache.append(_df)
@ -533,30 +554,63 @@ class Shuffle(Generator):
"""
def __init__(self,**_args):
super().__init__(**_args)
if 'data' not in _args :
reader = transport.factory.instance(**self.store['source'])
self._df = reader.read(sql=self.info['sql'])
def run(self):
np.random.seed(1)
self.initalize()
_index = np.arange(self._df.shape[0])
np.random.shuffle(_index)
np.random.shuffle(_index)
_iocolumns = self.info['columns']
_ocolumns = list(set(self._df.columns) - set(_iocolumns) )
# _iodf = pd.DataFrame(self._df[_ocolumns],self._df.loc[_index][_iocolumns],index=np.arange(_index.size))
_iodf = pd.DataFrame(self._df[_iocolumns].copy(),index = np.arange(_index.size))
# self._df = self._df.loc[_index][_ocolumns].join(_iodf)
self._df = self._df.loc[_index][_ocolumns]
self._df.index = np.arange(self._df.shape[0])
self._df = self._df.join(_iodf)
#
# The following is a full shuffle
self._df = self._df.loc[_index]
self._df.index = np.arange(self._df.shape[0])
# If we are given lists of columns instead of a list-of-list
# unpack the list
_invColumns = []
_colNames = []
_ucolNames= []
_rmColumns = []
for _item in self.info['columns'] :
if type(_item) == list :
_invColumns.append(_item)
_rmColumns += _item
elif _item in self._df.columns.tolist():
_colNames.append(_item)
#
# At this point we build the matrix of elements we are interested in considering the any unspecified column
#
if _colNames :
_invColumns.append(_colNames)
_ucolNames = list(set(self._df.columns) - set(_colNames) - set(_rmColumns))
if _ucolNames :
_invColumns += [ [_name] for _name in _ucolNames]
_xdf = pd.DataFrame()
_xdf = pd.DataFrame()
_index = np.arange(self._df.shape[0])
for _columns in _invColumns :
_tmpdf = self._df[_columns].copy()[_columns]
np.random.seed(1)
np.random.shuffle(_index)
# _values = _tmpdf.values[_index]
#_tmpdf = _tmpdf.iloc[_index]
_tmpdf = pd.DataFrame(_tmpdf.values[_index],columns=_columns)
if _xdf.shape[0] == 0 :
_xdf = _tmpdf
else:
_xdf = _xdf.join(_tmpdf)
_xdf = _xdf[self._df.columns]
self._df = _xdf
_log = {'action':'io-data','input':{'candidates':1,'rows':int(self._df.shape[0])}}
self.log(**_log)
try:
self.post([self._df])
self.log(**{'action':'completed','input':{'candidates':1,'rows':int(self._df.shape[0])}})
except Exception as e :
@ -580,6 +634,7 @@ class factory :
"""
#
if _args['apply'] in [apply.RANDOM] :
pthread = Shuffle(**_args)

View File

@ -31,12 +31,22 @@ class State :
continue
pointer = _item['module']
_args = _item['args']
if type(pointer).__name__ != 'function':
_args = _item['args'] if 'args' in _item else {}
else:
pointer = _item['module']
_args = _item['args'] if 'args' in _item else {}
_data = pointer(_data,_args)
return _data
@staticmethod
def instance(_args):
"""
"""
pre = []
post=[]
@ -45,8 +55,20 @@ class State :
#
# If the item has a path property is should be ignored
path = _args[key]['path'] if 'path' in _args[key] else ''
out[key] = [ State._build(dict(_item,**{'path':path})) if 'path' not in _item else State._build(_item) for _item in _args[key]['pipeline']]
# out[key] = [ State._build(dict(_item,**{'path':path})) if 'path' not in _item else State._build(_item) for _item in _args[key]['pipeline']]
out[key] = []
for _item in _args[key]['pipeline'] :
if type(_item).__name__ == 'function':
_stageInfo = {'module':_item,'name':_item.__name__,'args':{},'path':''}
pass
else:
if 'path' in _item :
_stageInfo = State._build(dict(_item,**{'path':path}))
else :
_stageInfo= State._build(_item)
out[key].append(_stageInfo)
# print ([out])
return out
# if 'pre' in _args:
# path = _args['pre']['path'] if 'path' in _args['pre'] else ''
@ -68,6 +90,13 @@ class State :
pass
@staticmethod
def _build(_args):
"""
This function builds the object {module,path} where module is extracted from a file (if needed)
:param _args dictionary containing attributes that can be value pair
It can also be a function
"""
#
# In the advent an actual pointer is passed we should do the following
_info = State._extract(_args)
# _info = dict(_args,**_info)

1
data/maker/version.py Normal file
View File

@ -0,0 +1 @@
__version__='1.7.6'

View File

@ -1,10 +1,10 @@
from setuptools import setup, find_packages
import os
import sys
import version
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
args = {"name":"data-maker","version":"1.6.4",
args = {"name":"data-maker","version":version.__version__,
"author":"Vanderbilt University Medical Center","author_email":"steve.l.nyemba@vumc.org","license":"MIT",
"packages":find_packages(),"keywords":["healthcare","data","transport","protocol"]}
args["install_requires"] = ['data-transport@git+https://github.com/lnyemba/data-transport.git','tensorflow']

1
version.py Symbolic link
View File

@ -0,0 +1 @@
data/maker/version.py