merger issues fix schema ...
This commit is contained in:
parent
9b00473ba4
commit
cddb6a98c8
|
@ -414,8 +414,10 @@ class Parser (Process):
|
||||||
|
|
||||||
def get_default_value(self,content,_code):
|
def get_default_value(self,content,_code):
|
||||||
|
|
||||||
|
|
||||||
util = Formatters()
|
util = Formatters()
|
||||||
TOP_ROW = content[1].split('*')
|
TOP_ROW = content[1].split('*')
|
||||||
|
|
||||||
SUBMITTED_DATE = util.parse.date(TOP_ROW[4])
|
SUBMITTED_DATE = util.parse.date(TOP_ROW[4])
|
||||||
|
|
||||||
CATEGORY= content[2].split('*')[1].strip()
|
CATEGORY= content[2].split('*')[1].strip()
|
||||||
|
@ -451,18 +453,29 @@ class Parser (Process):
|
||||||
file = file.read().split('CLP')
|
file = file.read().split('CLP')
|
||||||
_code = '835'
|
_code = '835'
|
||||||
section = 'CLP'
|
section = 'CLP'
|
||||||
|
|
||||||
if len(file) == 1 :
|
if len(file) == 1 :
|
||||||
|
|
||||||
file = file[0].split('CLM')
|
file = file[0].split('CLM')
|
||||||
_code = '837'
|
_code = '837'
|
||||||
section = 'HL'
|
section = 'HL'
|
||||||
|
|
||||||
INITIAL_ROWS = file[0].split(section)[0].split('\n')
|
INITIAL_ROWS = file[0].split(section)[0].split('\n')
|
||||||
|
|
||||||
if len(INITIAL_ROWS) == 1 :
|
if len(INITIAL_ROWS) == 1 :
|
||||||
INITIIAL_ROWS = INITIAL_ROWS[0].split('~')
|
|
||||||
|
INITIAL_ROWS = INITIAL_ROWS[0].split('~')
|
||||||
|
|
||||||
|
# for item in file[1:] :
|
||||||
|
# item = item.replace('~','\n')
|
||||||
|
# print (INITIAL_ROWS)
|
||||||
|
|
||||||
DEFAULT_VALUE = self.get.default_value(INITIAL_ROWS,_code)
|
DEFAULT_VALUE = self.get.default_value(INITIAL_ROWS,_code)
|
||||||
DEFAULT_VALUE['name'] = filename.strip()
|
DEFAULT_VALUE['name'] = filename.strip()
|
||||||
print (json.dumps(DEFAULT_VALUE))
|
|
||||||
file = section.join(file).split('\n')
|
file = section.join(file).split('\n')
|
||||||
if len(file) == 1:
|
if len(file) == 1:
|
||||||
|
|
||||||
file = file[0].split('~')
|
file = file[0].split('~')
|
||||||
#
|
#
|
||||||
# In the initial rows, there's redundant information (so much for x12 standard)
|
# In the initial rows, there's redundant information (so much for x12 standard)
|
||||||
|
@ -504,10 +517,13 @@ class Parser (Process):
|
||||||
schema = {"properties":dict.fromkeys(schema,{"mergeStrategy":"append"})}
|
schema = {"properties":dict.fromkeys(schema,{"mergeStrategy":"append"})}
|
||||||
|
|
||||||
else:
|
else:
|
||||||
schema = {}
|
schema = None
|
||||||
merger = jsonmerge.Merger(schema)
|
merger = jsonmerge.Merger(schema)
|
||||||
_claim = merger.merge(DEFAULT_VALUE.copy(),_claim)
|
if not schema :
|
||||||
|
merger = jsonmerge
|
||||||
|
|
||||||
|
_claim = merger.merge(DEFAULT_VALUE.copy(),_claim)
|
||||||
|
print (['billing_pr_name' in _claim, 'billing_pr_name' in DEFAULT_VALUE])
|
||||||
claims.append( _claim)
|
claims.append( _claim)
|
||||||
segment = [row]
|
segment = [row]
|
||||||
|
|
||||||
|
@ -529,7 +545,9 @@ class Parser (Process):
|
||||||
schema = {"properties":dict.fromkeys(schema,{"mergeStrategy":"append"})}
|
schema = {"properties":dict.fromkeys(schema,{"mergeStrategy":"append"})}
|
||||||
|
|
||||||
else:
|
else:
|
||||||
schema = {}
|
schema = None
|
||||||
|
if not schema :
|
||||||
|
merger = jsonmerge
|
||||||
merger = jsonmerge.Merger(schema)
|
merger = jsonmerge.Merger(schema)
|
||||||
# top_row_claim = self.apply(_toprows,_code)
|
# top_row_claim = self.apply(_toprows,_code)
|
||||||
|
|
||||||
|
|
2
setup.py
2
setup.py
|
@ -8,7 +8,7 @@ import sys
|
||||||
def read(fname):
|
def read(fname):
|
||||||
return open(os.path.join(os.path.dirname(__file__), fname)).read()
|
return open(os.path.join(os.path.dirname(__file__), fname)).read()
|
||||||
args = {
|
args = {
|
||||||
"name":"healthcareio","version":"1.6.2.16",
|
"name":"healthcareio","version":"1.6.2.18",
|
||||||
"author":"Vanderbilt University Medical Center",
|
"author":"Vanderbilt University Medical Center",
|
||||||
"author_email":"steve.l.nyemba@vumc.org",
|
"author_email":"steve.l.nyemba@vumc.org",
|
||||||
"include_package_data":True,
|
"include_package_data":True,
|
||||||
|
|
Loading…
Reference in New Issue