Compare commits
No commits in common. "c865e59ff4bea2c6f71372b4300874916458e3f2" and "59d6cc50c06df334c9d90dd5472271aeaf1f0664" have entirely different histories.
c865e59ff4
...
59d6cc50c0
18
data/gan.py
18
data/gan.py
|
@ -103,7 +103,6 @@ class GNet :
|
|||
CHECKPOINT_SKIPS = int(args['checkpoint_skips']) if 'checkpoint_skips' in args else int(self.MAX_EPOCHS/10)
|
||||
|
||||
CHECKPOINT_SKIPS = 1 if CHECKPOINT_SKIPS < 1 else CHECKPOINT_SKIPS
|
||||
|
||||
# if self.MAX_EPOCHS < 2*CHECKPOINT_SKIPS :
|
||||
# CHECKPOINT_SKIPS = 2
|
||||
# self.CHECKPOINTS = [1,self.MAX_EPOCHS] + np.repeat( np.divide(self.MAX_EPOCHS,CHECKPOINT_SKIPS),CHECKPOINT_SKIPS ).cumsum().astype(int).tolist()
|
||||
|
@ -288,17 +287,8 @@ class Generator (GNet):
|
|||
|
||||
"""
|
||||
def __init__(self,**args):
|
||||
if 'trainer' not in args :
|
||||
GNet.__init__(self,**args)
|
||||
self.discriminator = Discriminator(**args)
|
||||
else:
|
||||
_args = {}
|
||||
_trainer = args['trainer']
|
||||
for key in vars(_trainer) :
|
||||
value = getattr(_trainer,key)
|
||||
setattr(self,key,value)
|
||||
_args[key] = value
|
||||
self.discriminator = Discriminator(**_args)
|
||||
GNet.__init__(self,**args)
|
||||
self.discriminator = Discriminator(**args)
|
||||
def loss(self,**args):
|
||||
fake = args['fake']
|
||||
label = args['label']
|
||||
|
@ -667,9 +657,7 @@ class Predict(GNet):
|
|||
|
||||
fake = self.generator.network(inputs=z, label=label)
|
||||
init = tf.compat.v1.global_variables_initializer()
|
||||
print ([self.CHECKPOINTS])
|
||||
# saver = tf.compat.v1.train.Saver()
|
||||
saver = tf.compat.v1.train.Saver(max_to_keep=len(self.CHECKPOINTS))
|
||||
saver = tf.compat.v1.train.Saver()
|
||||
df = pd.DataFrame()
|
||||
CANDIDATE_COUNT = args['candidates'] if 'candidates' in args else 1 #0 if self.ROW_COUNT < 1000 else 100
|
||||
candidates = []
|
||||
|
|
|
@ -22,7 +22,7 @@ import nujson as json
|
|||
from multiprocessing import Process, RLock
|
||||
from datetime import datetime, timedelta
|
||||
from multiprocessing import Queue
|
||||
from version import __version__
|
||||
|
||||
import time
|
||||
|
||||
|
||||
|
@ -33,7 +33,6 @@ class Learner(Process):
|
|||
|
||||
|
||||
super(Learner, self).__init__()
|
||||
self._arch = {'init':_args}
|
||||
self.ndx = 0
|
||||
self._queue = Queue()
|
||||
self.lock = RLock()
|
||||
|
@ -45,8 +44,6 @@ class Learner(Process):
|
|||
self.gpu = None
|
||||
|
||||
self.info = _args['info']
|
||||
if 'context' not in self.info :
|
||||
self.info['context'] = self.info['from']
|
||||
self.columns = self.info['columns'] if 'columns' in self.info else None
|
||||
self.store = _args['store']
|
||||
|
||||
|
@ -100,12 +97,9 @@ class Learner(Process):
|
|||
# __info = (pd.DataFrame(self._states)[['name','path','args']]).to_dict(orient='records')
|
||||
if self._states :
|
||||
__info = {}
|
||||
# print (self._states)
|
||||
for key in self._states :
|
||||
_pipeline = self._states[key]
|
||||
|
||||
# __info[key] = ([{'name':_payload['name']} for _payload in _pipeline])
|
||||
__info[key] = [{"name":_item['name'],"args":_item['args'],"path":_item['path']} for _item in self._states[key] if _item ]
|
||||
for key in self._states :
|
||||
__info[key] = [{"name":_item['name'],"args":_item['args'],"path":_item['path']} for _item in self._states[key]]
|
||||
self.log(object='state-space',action='load',input=__info)
|
||||
|
||||
|
||||
|
@ -179,7 +173,6 @@ class Learner(Process):
|
|||
for name in columns :
|
||||
#
|
||||
# randomly sampling 5 elements to make sense of data-types
|
||||
|
||||
if self._df[name].size < 5 :
|
||||
continue
|
||||
_index = np.random.choice(np.arange(self._df[name].size),5,False)
|
||||
|
@ -280,20 +273,15 @@ class Trainer(Learner):
|
|||
|
||||
_args['network_args']['max_epochs'] = _epochs[0]['epochs']
|
||||
self.log(action='autopilot',input={'epoch':_epochs[0]})
|
||||
|
||||
g = Generator(**_args)
|
||||
# g.run()
|
||||
|
||||
end = datetime.now() #.strftime('%Y-%m-%d %H:%M:%S')
|
||||
_min = float((end-beg).seconds/ 60)
|
||||
_logs = {'action':'train','input':{'start':beg.strftime('%Y-%m-%d %H:%M:%S'),'minutes':_min,"unique_counts":self._encoder._io[0]}}
|
||||
self.log(**_logs)
|
||||
|
||||
self._g = g
|
||||
if self.autopilot :
|
||||
|
||||
# g = Generator(**_args)
|
||||
|
||||
g = Generator(**self._arch['init'])
|
||||
self._g = g
|
||||
self._g.run()
|
||||
#
|
||||
#@TODO Find a way to have the data in the object ....
|
||||
|
@ -312,15 +300,10 @@ class Generator (Learner):
|
|||
#
|
||||
# We need to load the mapping information for the space we are working with ...
|
||||
#
|
||||
|
||||
|
||||
self.network_args['candidates'] = int(_args['candidates']) if 'candidates' in _args else 1
|
||||
# filename = os.sep.join([self.network_args['logs'],'output',self.network_args['context'],'map.json'])
|
||||
_suffix = self.network_args['context']
|
||||
filename = os.sep.join([self.network_args['logs'],'output',self.network_args['context'],'meta-',_suffix,'.json'])
|
||||
filename = os.sep.join([self.network_args['logs'],'output',self.network_args['context'],'map.json'])
|
||||
self.log(**{'action':'init-map','input':{'filename':filename,'exists':os.path.exists(filename)}})
|
||||
if os.path.exists(filename):
|
||||
|
||||
file = open(filename)
|
||||
self._map = json.loads(file.read())
|
||||
file.close()
|
||||
|
@ -502,10 +485,7 @@ class Generator (Learner):
|
|||
N = 0
|
||||
for _iodf in _candidates :
|
||||
_df = self._df.copy()
|
||||
if self.columns :
|
||||
_df[self.columns] = _iodf[self.columns]
|
||||
|
||||
|
||||
_df[self.columns] = _iodf[self.columns]
|
||||
N += _df.shape[0]
|
||||
if self._states and 'post' in self._states:
|
||||
_df = State.apply(_df,self._states['post'])
|
||||
|
@ -553,55 +533,27 @@ class Shuffle(Generator):
|
|||
"""
|
||||
def __init__(self,**_args):
|
||||
super().__init__(**_args)
|
||||
if 'data' not in _args :
|
||||
reader = transport.factory.instance(**self.store['source'])
|
||||
self._df = reader.read(sql=self.info['sql'])
|
||||
|
||||
def run(self):
|
||||
|
||||
|
||||
|
||||
np.random.seed(1)
|
||||
self.initalize()
|
||||
#
|
||||
# If we are given lists of columns instead of a list-of-list
|
||||
# unpack the list
|
||||
_invColumns = []
|
||||
_colNames = []
|
||||
_ucolNames= []
|
||||
for _item in self.info['columns'] :
|
||||
if type(_item) == list :
|
||||
_invColumns.append(_item)
|
||||
elif _item in self._df.columns.tolist():
|
||||
_colNames.append(_item)
|
||||
#
|
||||
# At this point we build the matrix of elements we are interested in considering the any unspecified column
|
||||
#
|
||||
if _colNames :
|
||||
_invColumns.append(_colNames)
|
||||
_ucolNames = list(set(self._df.columns) - set(_colNames))
|
||||
if _ucolNames :
|
||||
_invColumns += [ [_name] for _name in _ucolNames]
|
||||
|
||||
_xdf = pd.DataFrame()
|
||||
_xdf = pd.DataFrame()
|
||||
_index = np.arange(self._df.shape[0])
|
||||
np.random.shuffle(_index)
|
||||
np.random.shuffle(_index)
|
||||
_iocolumns = self.info['columns']
|
||||
_ocolumns = list(set(self._df.columns) - set(_iocolumns) )
|
||||
# _iodf = pd.DataFrame(self._df[_ocolumns],self._df.loc[_index][_iocolumns],index=np.arange(_index.size))
|
||||
_iodf = pd.DataFrame(self._df[_iocolumns].copy(),index = np.arange(_index.size))
|
||||
# self._df = self._df.loc[_index][_ocolumns].join(_iodf)
|
||||
self._df = self._df.loc[_index][_ocolumns]
|
||||
self._df.index = np.arange(self._df.shape[0])
|
||||
self._df = self._df.join(_iodf)
|
||||
#
|
||||
# The following is a full shuffle
|
||||
self._df = self._df.loc[_index]
|
||||
self._df.index = np.arange(self._df.shape[0])
|
||||
|
||||
for _columns in _invColumns :
|
||||
|
||||
_tmpdf = self._df[_columns].copy()[_columns]
|
||||
np.random.seed(1)
|
||||
np.random.shuffle(_index)
|
||||
print (_columns,_index)
|
||||
# _values = _tmpdf.values[_index]
|
||||
#_tmpdf = _tmpdf.iloc[_index]
|
||||
_tmpdf = pd.DataFrame(_tmpdf.values[_index],columns=_columns)
|
||||
if _xdf.shape[0] == 0 :
|
||||
_xdf = _tmpdf
|
||||
else:
|
||||
_xdf = _xdf.join(_tmpdf)
|
||||
|
||||
_xdf = _xdf[self._df.columns]
|
||||
self._df = _xdf
|
||||
_log = {'action':'io-data','input':{'candidates':1,'rows':int(self._df.shape[0])}}
|
||||
self.log(**_log)
|
||||
try:
|
||||
|
@ -628,7 +580,6 @@ class factory :
|
|||
|
||||
"""
|
||||
|
||||
#
|
||||
|
||||
if _args['apply'] in [apply.RANDOM] :
|
||||
pthread = Shuffle(**_args)
|
||||
|
|
|
@ -69,7 +69,7 @@ class Date(Post):
|
|||
"""
|
||||
|
||||
"""
|
||||
pass
|
||||
pass
|
||||
class Approximate(Post):
|
||||
def apply(**_args):
|
||||
pass
|
||||
|
|
|
@ -31,22 +31,12 @@ class State :
|
|||
continue
|
||||
|
||||
pointer = _item['module']
|
||||
|
||||
if type(pointer).__name__ != 'function':
|
||||
_args = _item['args'] if 'args' in _item else {}
|
||||
else:
|
||||
pointer = _item['module']
|
||||
|
||||
_args = _item['args'] if 'args' in _item else {}
|
||||
|
||||
_args = _item['args']
|
||||
|
||||
_data = pointer(_data,_args)
|
||||
return _data
|
||||
@staticmethod
|
||||
def instance(_args):
|
||||
"""
|
||||
|
||||
"""
|
||||
pre = []
|
||||
post=[]
|
||||
|
||||
|
@ -55,20 +45,8 @@ class State :
|
|||
#
|
||||
# If the item has a path property is should be ignored
|
||||
path = _args[key]['path'] if 'path' in _args[key] else ''
|
||||
# out[key] = [ State._build(dict(_item,**{'path':path})) if 'path' not in _item else State._build(_item) for _item in _args[key]['pipeline']]
|
||||
out[key] = []
|
||||
for _item in _args[key]['pipeline'] :
|
||||
out[key] = [ State._build(dict(_item,**{'path':path})) if 'path' not in _item else State._build(_item) for _item in _args[key]['pipeline']]
|
||||
|
||||
if type(_item).__name__ == 'function':
|
||||
_stageInfo = {'module':_item,'name':_item.__name__,'args':{},'path':''}
|
||||
pass
|
||||
else:
|
||||
if 'path' in _item :
|
||||
_stageInfo = State._build(dict(_item,**{'path':path}))
|
||||
else :
|
||||
_stageInfo= State._build(_item)
|
||||
out[key].append(_stageInfo)
|
||||
# print ([out])
|
||||
return out
|
||||
# if 'pre' in _args:
|
||||
# path = _args['pre']['path'] if 'path' in _args['pre'] else ''
|
||||
|
@ -90,13 +68,6 @@ class State :
|
|||
pass
|
||||
@staticmethod
|
||||
def _build(_args):
|
||||
"""
|
||||
This function builds the object {module,path} where module is extracted from a file (if needed)
|
||||
:param _args dictionary containing attributes that can be value pair
|
||||
It can also be a function
|
||||
"""
|
||||
#
|
||||
# In the advent an actual pointer is passed we should do the following
|
||||
|
||||
_info = State._extract(_args)
|
||||
# _info = dict(_args,**_info)
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
__version__='1.7.0'
|
4
setup.py
4
setup.py
|
@ -1,10 +1,10 @@
|
|||
from setuptools import setup, find_packages
|
||||
import os
|
||||
import sys
|
||||
import version
|
||||
|
||||
def read(fname):
|
||||
return open(os.path.join(os.path.dirname(__file__), fname)).read()
|
||||
args = {"name":"data-maker","version":version.__version__,
|
||||
args = {"name":"data-maker","version":"1.6.4",
|
||||
"author":"Vanderbilt University Medical Center","author_email":"steve.l.nyemba@vumc.org","license":"MIT",
|
||||
"packages":find_packages(),"keywords":["healthcare","data","transport","protocol"]}
|
||||
args["install_requires"] = ['data-transport@git+https://github.com/lnyemba/data-transport.git','tensorflow']
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
data/maker/version.py
|
Loading…
Reference in New Issue