data-maker/data/maker/__init__.py

184 lines
6.5 KiB
Python
Raw Normal View History

"""
(c) 2019 Data Maker, hiplab.mc.vanderbilt.edu
version 1.0.0
This package serves as a proxy to the overall usage of the framework.
This package is designed to generate synthetic data from a dataset from an original dataset using deep learning techniques
@TODO:
- Make configurable GPU, EPOCHS
"""
import pandas as pd
import numpy as np
2020-01-05 05:02:15 +00:00
import data.gan as gan
2020-01-04 03:47:05 +00:00
from transport import factory
2020-02-18 08:59:39 +00:00
from data.bridge import Binary
2020-02-11 18:00:16 +00:00
import threading as thread
2020-02-29 03:37:26 +00:00
class ContinuousToDiscrete :
ROUND_UP = 2
2020-02-29 03:37:26 +00:00
@staticmethod
def binary(X,n=4) :
"""
This function will convert a continous stream of information into a variety a bit stream of bins
"""
# BOUNDS = np.repeat(np.divide(X.max(),n),n).cumsum().tolist()
BOUNDS = ContinuousToDiscrete.bounds(np.round(X,ContinuousToDiscrete.ROUND_UP),n)
2020-02-29 03:37:26 +00:00
# _map = [{"index":BOUNDS.index(i),"ubound":i} for i in BOUNDS]
_matrix = []
m = []
for value in X :
x_ = np.zeros(n)
_matrix.append(x_)
for row in BOUNDS :
if value>= row.left and value <= row.right :
index = BOUNDS.index(row)
x_[index] = 1
break
return _matrix
@staticmethod
def bounds(x,n):
return list(pd.cut(np.array( np.round(x,ContinuousToDiscrete.ROUND_UP) ),n).categories)
2020-02-29 03:37:26 +00:00
@staticmethod
def continuous(X,BIN_SIZE=4) :
"""
This function will approximate a binary vector given boundary information
:X binary matrix
:BIN_SIZE
"""
BOUNDS = ContinuousToDiscrete.bounds(X,BIN_SIZE)
values = []
_BINARY= ContinuousToDiscrete.binary(X,BIN_SIZE)
# # print (BOUNDS)
# values = []
for row in _BINARY :
# ubound = BOUNDS[row.index(1)]
index = np.where(row == 1)[0][0]
ubound = BOUNDS[ index ].right
lbound = BOUNDS[ index ].left
x_ = np.round(np.random.uniform(lbound,ubound),ContinuousToDiscrete.ROUND_UP).astype(float)
2020-02-29 03:37:26 +00:00
values.append(x_)
lbound = ubound
return values
def train (**args) :
"""
This function is intended to train the GAN in order to learn about the distribution of the features
:column columns that need to be synthesized (discrete)
:logs where the output of the (location on disk)
:id identifier of the dataset
:data data-frame to be synthesized
:context label of what we are synthesizing
"""
2020-02-11 18:00:16 +00:00
column = args['column'] if (isinstance(args['column'],list)) else [args['column']]
2020-02-29 03:37:26 +00:00
CONTINUOUS = args['continuous'] if 'continuous' in args else []
# column_id = args['id']
2020-01-10 19:12:58 +00:00
df = args['data'] if not isinstance(args['data'],str) else pd.read_csv(args['data'])
2020-02-11 18:00:16 +00:00
df.columns = [name.lower() for name in df.columns]
2020-02-29 03:37:26 +00:00
#
# @TODO:
# Consider sequential training of sub population for extremely large datasets
#
2020-02-11 18:00:16 +00:00
#
# If we have several columns we will proceed one at a time (it could be done in separate threads)
# @TODO : Consider performing this task on several threads/GPUs simulataneously
#
2020-02-29 03:37:26 +00:00
for col in column :
# args['real'] = pd.get_dummies(df[col].fillna('')).astype(np.float32).values
# if 'float' not in df[col].dtypes.name :
# args['real'] = pd.get_dummies(df[col].fillna('')).astype(np.float32).values
if 'float' in df[col].dtypes.name and col in CONTINUOUS:
BIN_SIZE = 4 if 'bin_size' not in args else int(args['bin_size'])
2020-02-29 03:37:26 +00:00
args['real'] = ContinuousToDiscrete.binary(df[col],BIN_SIZE).astype(np.float32)
else:
args['real'] = pd.get_dummies(df[col].dropna()).astype(np.float32).values
2020-02-29 03:37:26 +00:00
2020-02-11 18:00:16 +00:00
args['column'] = col
args['context'] = col
context = args['context']
if 'store' in args :
args['store']['args']['doc'] = context
logger = factory.instance(**args['store'])
args['logger'] = logger
else:
logger = None
trainer = gan.Train(**args)
trainer.apply()
def post(**args):
"""
This uploads the tensorflow checkpoint to a data-store (mongodb, biguqery, s3)
"""
pass
def get(**args):
2020-02-11 18:00:16 +00:00
"""
This function will restore a checkpoint from a persistant storage on to disk
"""
pass
def generate(**args):
"""
This function will generate a synthetic dataset on the basis of a model that has been learnt for the dataset
@return pandas.DataFrame
:data data-frame to be synthesized
:column columns that need to be synthesized (discrete)
:id column identifying an entity
:logs location on disk where the learnt knowledge of the dataset is
"""
2020-01-10 19:12:58 +00:00
# df = args['data']
df = args['data'] if not isinstance(args['data'],str) else pd.read_csv(args['data'])
2020-02-29 03:37:26 +00:00
CONTINUOUS = args['continous'] if 'continuous' in args else []
2020-02-11 18:00:16 +00:00
column = args['column'] if (isinstance(args['column'],list)) else [args['column']]
# column_id = args['id']
#
#@TODO:
# If the identifier is not present, we should fine a way to determine or make one
#
2020-02-11 18:00:16 +00:00
_df = df.copy()
for col in column :
args['context'] = col
args['column'] = col
2020-02-29 03:37:26 +00:00
# if 'float' in df[col].dtypes.name or col in CONTINUOUS :
# #
# # We should create the bins for the values we are observing here
# BIN_SIZE = 4 if 'bin_size' not in args else int(args['bin_size'])
# values = ContinuousToDiscrete.continuous(df[col].values,BIN_SIZE)
# # values = np.unique(values).tolist()
# else:
values = df[col].unique().tolist()
2020-02-29 03:37:26 +00:00
args['values'] = values
args['row_count'] = df.shape[0]
2020-02-11 18:00:16 +00:00
#
# we can determine the cardinalities here so we know what to allow or disallow
handler = gan.Predict (**args)
2020-02-11 18:00:16 +00:00
handler.load_meta(col)
r = handler.apply()
BIN_SIZE = 4 if 'bin_size' not in args else int(args['bin_size'])
_df[col] = ContinuousToDiscrete.continuous(r[col],BIN_SIZE) if 'float' in df[col].dtypes.name or col in CONTINUOUS else r[col]
2020-02-29 03:37:26 +00:00
#
# @TODO: log basic stats about the synthetic attribute
#
# print (r)s
2020-02-11 18:00:16 +00:00
# break
return _df