Compare commits

..

No commits in common. "ca09ea0202c8acd76405255559892b783ba5afb1" and "dd58a92eb5cbe64f86481bf8ad58de44793402f5" have entirely different histories.

1 changed files with 15 additions and 38 deletions

View File

@ -192,10 +192,9 @@ class Learner(Process):
finally:
pass
# _log[name] = self._df[name].dtypes.name
# _log[name] = reader.meta()
# _log = {'action':'structure','input':_log}
# self.log(**_log)
_log[name] = self._df[name].dtypes.name
_log = {'action':'structure','input':_log}
self.log(**_log)
#
# convert the data to binary here ...
_schema = self.get_schema()
@ -452,10 +451,10 @@ class Generator (Learner):
FORMAT = '%Y-%m-%-d %H:%M:%S'
SIZE = 19
# if SIZE > 0 :
if SIZE > 0 :
# values = pd.to_datetime(_df[name], format=FORMAT).astype(np.datetime64)
# # _df[name] = [_date[:SIZE].strip() for _date in values]
values = pd.to_datetime(_df[name], format=FORMAT).astype(np.datetime64)
# _df[name] = [_date[:SIZE].strip() for _date in values]
# _df[name] = _df[name].astype(str)
@ -465,7 +464,6 @@ class Generator (Learner):
pass #;_df[name] = _df[name].fillna('').astype('datetime64[ns]')
except Exception as e:
print (e)
pass
finally:
pass
@ -504,20 +502,12 @@ class Generator (Learner):
else:
_store = None
N = 0
_haslist = np.sum([type(_item)==list for _item in self.columns]) > 0
_schema = self.get_schema()
for _iodf in _candidates :
_df = self._df.copy()
if self.columns and _haslist is False:
if self.columns :
_df[self.columns] = _iodf[self.columns]
else:
_df = _iodf
N += _df.shape[0]
if self._states and 'post' in self._states:
_df = State.apply(_df,self._states['post'])
@ -538,27 +528,19 @@ class Generator (Learner):
_schema = self.get_schema()
_df = self.format(_df,_schema)
# _log = [{"name":_schema[i]['name'],"dataframe":_df[_df.columns[i]].dtypes.name,"schema":_schema[i]['type']} for i in np.arange(len(_schema)) ]
self.log(**{"action":"consolidate","input":{"rows":N,"candidate":_candidates.index(_iodf)}})
_log = [{"name":_schema[i]['name'],"dataframe":_df[_df.columns[i]].dtypes.name,"schema":_schema[i]['type']} for i in np.arange(len(_schema)) ]
self.log(**{"action":"consolidate","input":_log})
if _store :
_log = {'action':'write','input':{'table':self.info['from'],'schema':[],'rows':_df.shape[0]}}
writer = transport.factory.instance(**_store)
writer = transport.factory.instance(**_store)
if _store['provider'] == 'bigquery':
try:
_log['schema'] = _schema
writer.write(_df,schema=_schema,table=self.info['from'])
except Exception as e:
_log['schema'] = []
writer.write(_df,table=self.info['from'])
writer.write(_df,schema=[],table=self.info['from'])
else:
writer.write(_df,table=self.info['from'])
self.log(**_log)
else:
self.cache.append(_df)
@ -588,21 +570,17 @@ class Shuffle(Generator):
_invColumns = []
_colNames = []
_ucolNames= []
_rmColumns = []
for _item in self.info['columns'] :
if type(_item) == list :
_invColumns.append(_item)
_rmColumns += _item
elif _item in self._df.columns.tolist():
_colNames.append(_item)
#
# At this point we build the matrix of elements we are interested in considering the any unspecified column
#
if _colNames :
_invColumns.append(_colNames)
_ucolNames = list(set(self._df.columns) - set(_colNames) - set(_rmColumns))
_ucolNames = list(set(self._df.columns) - set(_colNames))
if _ucolNames :
_invColumns += [ [_name] for _name in _ucolNames]
@ -629,7 +607,6 @@ class Shuffle(Generator):
_log = {'action':'io-data','input':{'candidates':1,'rows':int(self._df.shape[0])}}
self.log(**_log)
try:
self.post([self._df])
self.log(**{'action':'completed','input':{'candidates':1,'rows':int(self._df.shape[0])}})
except Exception as e :