Merging the release into stable. Handling of GPU and epochs
This commit is contained in:
commit
76e84c3859
|
@ -245,15 +245,12 @@ class Discriminator(GNet):
|
||||||
:label
|
:label
|
||||||
"""
|
"""
|
||||||
x = args['inputs']
|
x = args['inputs']
|
||||||
print ()
|
|
||||||
print (x[:3,:])
|
|
||||||
print()
|
|
||||||
label = args['label']
|
label = args['label']
|
||||||
with tf.compat.v1.variable_scope('D', reuse=tf.compat.v1.AUTO_REUSE , regularizer=l2_regularizer(0.00001)):
|
with tf.compat.v1.variable_scope('D', reuse=tf.compat.v1.AUTO_REUSE , regularizer=l2_regularizer(0.00001)):
|
||||||
for i, dim in enumerate(self.D_STRUCTURE[1:]):
|
for i, dim in enumerate(self.D_STRUCTURE[1:]):
|
||||||
kernel = self.get.variables(name='W_' + str(i), shape=[self.D_STRUCTURE[i], dim])
|
kernel = self.get.variables(name='W_' + str(i), shape=[self.D_STRUCTURE[i], dim])
|
||||||
bias = self.get.variables(name='b_' + str(i), shape=[dim])
|
bias = self.get.variables(name='b_' + str(i), shape=[dim])
|
||||||
print (["\t",bias,kernel])
|
# print (["\t",bias,kernel])
|
||||||
x = tf.nn.relu(tf.add(tf.matmul(x, kernel), bias))
|
x = tf.nn.relu(tf.add(tf.matmul(x, kernel), bias))
|
||||||
x = self.normalize(inputs=x, name='cln' + str(i), shift=1,labels=label, n_labels=self.NUM_LABELS)
|
x = self.normalize(inputs=x, name='cln' + str(i), shift=1,labels=label, n_labels=self.NUM_LABELS)
|
||||||
i = len(self.D_STRUCTURE)
|
i = len(self.D_STRUCTURE)
|
||||||
|
@ -538,6 +535,7 @@ if __name__ == '__main__' :
|
||||||
# Now we get things done ...
|
# Now we get things done ...
|
||||||
column = SYS_ARGS['column']
|
column = SYS_ARGS['column']
|
||||||
column_id = SYS_ARGS['id'] if 'id' in SYS_ARGS else 'person_id'
|
column_id = SYS_ARGS['id'] if 'id' in SYS_ARGS else 'person_id'
|
||||||
|
column_id = column_id.split(',') if ',' in column_id else column_id
|
||||||
df = pd.read_csv(SYS_ARGS['raw-data'])
|
df = pd.read_csv(SYS_ARGS['raw-data'])
|
||||||
LABEL = pd.get_dummies(df[column_id]).astype(np.float32).values
|
LABEL = pd.get_dummies(df[column_id]).astype(np.float32).values
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ def train (**args) :
|
||||||
else:
|
else:
|
||||||
logger = None
|
logger = None
|
||||||
|
|
||||||
trainer = gan.Train(context=context,max_epochs=max_epochs,real=real,label=labels,column=column,column_id=column_id,logger = logger,logs=logs)
|
trainer = gan.Train(context=context,max_epochs=max_epochs,num_gpu=num_gpu,real=real,label=labels,column=column,column_id=column_id,logger = logger,logs=logs)
|
||||||
return trainer.apply()
|
return trainer.apply()
|
||||||
|
|
||||||
def generate(**args):
|
def generate(**args):
|
||||||
|
@ -57,6 +57,9 @@ def generate(**args):
|
||||||
column_id = args['id']
|
column_id = args['id']
|
||||||
logs = args['logs']
|
logs = args['logs']
|
||||||
context = args['context']
|
context = args['context']
|
||||||
|
num_gpu = 1 if 'num_gpu' not in args else args['num_gpu']
|
||||||
|
max_epochs = 10 if 'max_epochs' not in args else args['max_epochs']
|
||||||
|
|
||||||
#
|
#
|
||||||
#@TODO:
|
#@TODO:
|
||||||
# If the identifier is not present, we should fine a way to determine or make one
|
# If the identifier is not present, we should fine a way to determine or make one
|
||||||
|
@ -67,9 +70,9 @@ def generate(**args):
|
||||||
values.sort()
|
values.sort()
|
||||||
|
|
||||||
labels = pd.get_dummies(df[column_id]).astype(np.float32).values
|
labels = pd.get_dummies(df[column_id]).astype(np.float32).values
|
||||||
handler = gan.Predict (context=context,label=labels,values=values,column=column)
|
handler = gan.Predict (context=context,label=labels,max_epochs=max_epochs,num_gpu=num_gpu,values=values,column=column,logs=logs)
|
||||||
handler.load_meta(column)
|
handler.load_meta(column)
|
||||||
r = handler.apply()
|
r = handler.apply()
|
||||||
_df = df.copy()
|
_df = df.copy()
|
||||||
_df[column] = r[column]
|
_df[column] = r[column]
|
||||||
return _df
|
return _df
|
4
setup.py
4
setup.py
|
@ -4,9 +4,9 @@ import sys
|
||||||
|
|
||||||
def read(fname):
|
def read(fname):
|
||||||
return open(os.path.join(os.path.dirname(__file__), fname)).read()
|
return open(os.path.join(os.path.dirname(__file__), fname)).read()
|
||||||
args = {"name":"data-maker","version":"1.0.2","author":"Vanderbilt University Medical Center","author_email":"steve.l.nyemba@vanderbilt.edu","license":"MIT",
|
args = {"name":"data-maker","version":"1.0.5","author":"Vanderbilt University Medical Center","author_email":"steve.l.nyemba@vanderbilt.edu","license":"MIT",
|
||||||
"packages":find_packages(),"keywords":["healthcare","data","transport","protocol"]}
|
"packages":find_packages(),"keywords":["healthcare","data","transport","protocol"]}
|
||||||
args["install_requires"] = ['data-transport@git+https://dev.the-phi.com/git/steve/data-transport.git','tensorflow==1.14.0','numpy==1.16.3','pandas','pandas-gbq','pymongo']
|
args["install_requires"] = ['data-transport@git+https://dev.the-phi.com/git/steve/data-transport.git','tensorflow==1.15','pandas','pandas-gbq','pymongo']
|
||||||
args['url'] = 'https://hiplab.mc.vanderbilt.edu/aou/data-maker.git'
|
args['url'] = 'https://hiplab.mc.vanderbilt.edu/aou/data-maker.git'
|
||||||
|
|
||||||
if sys.version_info[0] == 2 :
|
if sys.version_info[0] == 2 :
|
||||||
|
|
Loading…
Reference in New Issue