removing notebooks

This commit is contained in:
Steve Nyemba 2019-03-05 12:07:09 -06:00
parent b724353155
commit 111d672d3c
8 changed files with 0 additions and 2967 deletions

View File

@ -1,273 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"from google.cloud import bigquery as bq\n",
"\n",
"client = bq.Client.from_service_account_json('/home/steve/dev/google-cloud-sdk/accounts/vumc-test.json')"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [],
"source": [
"xo = ['person_id','date_of_birth','race']\n",
"xi = ['person_id','value_as_number','value_source_value']"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {},
"outputs": [],
"source": [
"def get_tables(client,did,fields=[]):\n",
" \"\"\"\n",
" getting table lists from google\n",
" \"\"\"\n",
" r = []\n",
" ref = client.dataset(id)\n",
" tables = list(client.list_tables(ref))\n",
" for table in tables :\n",
" ref = table.reference\n",
" schema = client.get_table(ref).schema\n",
" names = [f.field_name for f in schema]\n",
" x = list(set(names) & set(fields))\n",
" if x :\n",
" r.append({\"name\":table.table_id,\"fields\":names})\n",
" return r\n",
" \n",
"def get_fields(**args):\n",
" \"\"\"\n",
" This function will generate a random set of fields from two tables. Tables are structured as follows \n",
" {name,fields:[],\"y\":}, with \n",
" name table name (needed to generate sql query)\n",
" fields list of field names, used in the projection\n",
" y name of the field to be joined.\n",
" @param xo candidate table in the join\n",
" @param xi candidate table in the join\n",
" @param join field by which the tables can be joined.\n",
" \"\"\"\n",
" # The set operation will remove redundancies in the field names (not sure it's a good idea)\n",
" xo = args['xo']['fields']\n",
" xi = args['xi']['fields']\n",
" zi = args['xi']['name']\n",
" return list(set(xo) | set(['.'.join([args['xi']['name'],name]) for name in xi if name != args['join']]) )\n",
"def generate_sql(**args):\n",
" \"\"\"\n",
" This function will generate the SQL query for the resulting join\n",
" \"\"\"\n",
" xo = args['xo']\n",
" xi = args['xi']\n",
" sql = \"SELECT :fields FROM :xo.name INNER JOIN :xi.name ON :xi.name.:xi.y = :xo.y \"\n",
" fields = \",\".join(get_fields(xo=xi,xi=xi,join=xi['y']))\n",
" \n",
" \n",
" sql = sql.replace(\":fields\",fields).replace(\":xo.name\",xo['name']).replace(\":xi.name\",xi['name'])\n",
" sql = sql.replace(\":xi.y\",xi['y']).replace(\":xo.y\",xo['y'])\n",
" return sql\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['person_id',\n",
" 'measurements.value_as_number',\n",
" 'date_of_birth',\n",
" 'race',\n",
" 'measurements.value_source_value']"
]
},
"execution_count": 54,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race']}\n",
"xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value']}\n",
"get_fields(xo=xo,xi=xi,join=\"person_id\")"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'SELECT person_id,value_as_number,measurements.value_source_value,measurements.value_as_number,value_source_value FROM person INNER JOIN measurements ON measurements.person_id = person_id '"
]
},
"execution_count": 55,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race'],\"y\":\"person_id\"}\n",
"xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value'],\"y\":\"person_id\"}\n",
"generate_sql(xo=xo,xi=xi)"
]
},
{
"cell_type": "code",
"execution_count": 59,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[('a', 'b'), ('a', 'c'), ('b', 'c')]"
]
},
"execution_count": 59,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\"\"\"\n",
" We are designing a process that will take two tables that will generate \n",
"\"\"\"\n",
"import itertools\n",
"list(itertools.combinations(['a','b','c'],2))"
]
},
{
"cell_type": "code",
"execution_count": 87,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"TableReference(DatasetReference(u'aou-res-deid-vumc-test', u'raw'), 'care_site')"
]
},
"execution_count": 87,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ref = client.dataset('raw')\n",
"tables = list(client.list_tables(ref))\n",
"names = [table.table_id for table in tables]\n",
"(tables[0].reference)"
]
},
{
"cell_type": "code",
"execution_count": 85,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(u'care_site',\n",
" u'concept',\n",
" u'concept_ancestor',\n",
" u'concept_class',\n",
" u'concept_relationship',\n",
" u'concept_synonym',\n",
" u'condition_occurrence',\n",
" u'criteria',\n",
" u'death',\n",
" u'device_exposure',\n",
" u'domain',\n",
" u'drug_exposure',\n",
" u'drug_strength',\n",
" u'location',\n",
" u'measurement',\n",
" u'note',\n",
" u'observation',\n",
" u'people_seed',\n",
" u'person',\n",
" u'procedure_occurrence',\n",
" u'relationship',\n",
" u'visit_occurrence',\n",
" u'vocabulary')"
]
},
"execution_count": 85,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#\n",
"# find every table with person id at the very least or a subset of fields\n",
"#\n",
"def get_tables\n",
"q = ['person_id']\n",
"pairs = list(itertools.combinations(names,len(names)))\n",
"pairs[0]"
]
},
{
"cell_type": "code",
"execution_count": 90,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['a']"
]
},
"execution_count": 90,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"list(set(['a','b']) & set(['a']))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.15rc1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,238 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import itertools \n",
"import pandas as pd\n",
"import numpy as np\n",
"# from pandas_risk import *\n",
"from time import time\n",
"import os\n",
"\n",
"attr = ['gender','race','zip','year_of_birth']\n",
"comb_attr = [\n",
" ['zip' ,'gender', 'birth_datetime', 'race'], \n",
" ['zip', 'gender', 'year_of_birth', 'race'], \n",
" ['gender','race','zip'],\n",
" ['race','year_of_birth','zip']\n",
"]\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"SQL_CONTROLLED=\"SELECT * FROM deid_risk.basic_risk60k\"\n",
"dfc = pd.read_gbq(SQL_CONTROLLED,private_key='/home/steve/dev/google-cloud-sdk/accounts/curation-test.json')\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def risk(**args):\n",
" Yi = args['data']\n",
" Yi = Yi.fillna(' ')\n",
" sizes = args['prop'] if 'prop' in args else np.arange(5,100,5)\n",
" FLAG = args['flag'] if 'flag' in args else 'UNFLAGGED'\n",
" N = args['num_runs']\n",
" if 'cols' in args :\n",
" columns = args['cols']\n",
" else:\n",
" columns = list(set(Yi.columns.tolist()) - set(['person_id']))\n",
" p = pd.DataFrame()\n",
" y_i= pd.DataFrame({\"group_size\":Yi.groupby(columns,as_index=False).size()}).reset_index()\n",
" for index in sizes :\n",
" for n in np.repeat(index,N):\n",
" \n",
" # we will randomly sample n% rows from the dataset\n",
" i = np.random.choice(Yi.shape[0],((Yi.shape[0] * n)/100),replace=False)\n",
" x_i= pd.DataFrame(Yi).loc[i] \n",
" risk = x_i.deid.risk(id='person_id',quasi_id = columns)\n",
" x_i = pd.DataFrame({\"group_size\":x_i.groupby(columns,as_index=False).size()}).reset_index()\n",
"\n",
"\n",
" r = pd.merge(x_i,y_i,on=columns,how='inner')\n",
" if r.shape[0] == 0 :\n",
" continue\n",
" r['marketer'] = r.apply(lambda row: (row.group_size_x / np.float64(row.group_size_y)) /np.sum(x_i.group_size) ,axis=1)\n",
" r['sample %'] = np.repeat(n,r.shape[0])\n",
" r['tier'] = np.repeat(FLAG,r.shape[0])\n",
" r['sample marketer'] = np.repeat(risk['marketer'].values[0],r.shape[0])\n",
" # r['patient_count'] = np.repeat(r.shape[0],r.shape[0])\n",
" r = r.groupby(['sample %','tier','sample marketer'],as_index=False).sum()[['sample %','marketer','sample marketer','tier']]\n",
" p = p.append(r)\n",
" p.index = np.arange(p.shape[0]).astype(np.int64)\n",
" return p\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from pandas_risk import *\n",
"o = pd.DataFrame()\n",
"PATH=\"out/experiment-phase-2.xlsx\"\n",
"writer = pd.ExcelWriter(PATH,engine='xlsxwriter')\n",
"comb_attr = [\n",
" ['zip' ,'gender', 'birth_datetime', 'race'], \n",
" ['zip', 'gender', 'year_of_birth', 'race'], \n",
" ['gender','race','zip'],\n",
" ['race','year_of_birth','zip']\n",
"]\n",
"\n",
"for cols in comb_attr :\n",
" o = risk(data=dfc,cols=cols,flag='CONTROLLED',num_runs=5)\n",
" #\n",
" # adding the policy\n",
" x = [1* dfc.columns.isin(cols) for i in range(o.shape[0])]\n",
" o = o.join(pd.DataFrame(x,columns = dfc.columns))\n",
" #\n",
" # Write this to excel notebook\n",
" o.to_excel(writer,\"-\".join(cols))\n",
"# break\n",
" \n",
"\n",
"# p = p.rename(columns={'marketer_x':'sample marketer'})\n",
"# p.index = np.arange(p.shape[0]).astype(np.int64)\n",
"\n",
"writer.save()"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>person_id</th>\n",
" <th>year_of_birth</th>\n",
" <th>month_of_birth</th>\n",
" <th>day_of_birth</th>\n",
" <th>birth_datetime</th>\n",
" <th>race_concept_id</th>\n",
" <th>ethnicity_concept_id</th>\n",
" <th>location_id</th>\n",
" <th>care_site_id</th>\n",
" <th>person_source_value</th>\n",
" <th>...</th>\n",
" <th>gender_source_concept_id</th>\n",
" <th>race_source_value</th>\n",
" <th>ethnicity_source_value</th>\n",
" <th>sex_at_birth</th>\n",
" <th>birth_date</th>\n",
" <th>race</th>\n",
" <th>zip</th>\n",
" <th>city</th>\n",
" <th>state</th>\n",
" <th>gender</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" </tbody>\n",
"</table>\n",
"<p>0 rows × 21 columns</p>\n",
"</div>"
],
"text/plain": [
"Empty DataFrame\n",
"Columns: [person_id, year_of_birth, month_of_birth, day_of_birth, birth_datetime, race_concept_id, ethnicity_concept_id, location_id, care_site_id, person_source_value, gender_source_value, gender_source_concept_id, race_source_value, ethnicity_source_value, sex_at_birth, birth_date, race, zip, city, state, gender]\n",
"Index: []\n",
"\n",
"[0 rows x 21 columns]"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x = [1* dfc.columns.isin(cols) for i in range(o.shape[0])]\n",
"o.join(pd.DataFrame(x,columns = dfc.columns))\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'columns' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-6-8e7b9895361f>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mcolumns\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'columns' is not defined"
]
}
],
"source": [
"columns\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.15rc1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

View File

@ -1,95 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" skiping ...\n",
" skiping ...\n",
" skiping ...\n",
" skiping ...\n",
" skiping ...\n",
" skiping ...\n",
" skiping ...\n"
]
},
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\"\"\"\n",
" This notebook is designed to generate SQL syntax all the quasi-identifiers for the patients in the database\n",
" The resulting SQL will be run against bigquery to produce a table with every record mapping to a patient\n",
" \n",
"\"\"\"\n",
"\n",
"from risk import *\n",
"ihandle = UtilHandler(path='/home/steve/dev/google-cloud-sdk/accounts/curation-prod.json',dataset='combined20180822',key_field='person_id',key_table='person',filter=['person','observation'])\n",
"r = ihandle.migrate_tables()\n",
"len(r)\n"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"u' SELECT person.person_id , person.year_of_birth , person.month_of_birth , person.day_of_birth , person.birth_datetime , person.race_concept_id , person.ethnicity_concept_id , person.location_id , person.care_site_id , person.person_source_value , person.gender_source_value , person.gender_source_concept_id , person.race_source_value , person.ethnicity_source_value , basic_observation.sex_at_birth AS sex_at_birth1 , basic_observation.birth_date AS birth_date1 , basic_observation.race AS race1 , basic_observation.zip AS zip1 , basic_observation.city AS city1 , basic_observation.state AS state1 , basic_observation.gender AS gender1 FROM (select * from deid_image.person ) as person INNER JOIN (select * from deid_image.basic_observation ) as basic_observation ON basic_observation.person_id = person.person_id '"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ihandle = UtilHandler(path='/home/steve/dev/google-cloud-sdk/accounts/curation-test.json',dataset='deid_image',key_field='person_id',key_table='person',filter=['person','basic_observation'])\n",
"ihandle.create_table().replace('\\n',' ')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.15rc1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

View File

@ -1,385 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"\"\"\"\n",
"The experiments here describe medical/family history as they associate with risk measures\n",
"Additionally we will have fractional risk assessments\n",
"\"\"\"\n",
"import pandas as pd\n",
"import numpy as np\n",
"from pandas_risk import *\n",
"dfm = pd.read_gbq(\"SELECT * FROM deid_risk.registered_medical_history_dec_001\",private_key='/home/steve/dev/google-cloud-sdk/accounts/curation-test.json')\n",
"dff = pd.read_gbq(\"SELECT * FROM deid_risk.registered_family_history_dec_001\",private_key='/home/steve/dev/google-cloud-sdk/accounts/curation-test.json')\n",
"df = pd.read_gbq(\"SELECT person_id, birth_date,city,state,home_owner,race,ethnicity,gender,birth_place,marital_status,orientation,education,employment_status,income,travel_abroad_6_months,active_duty_status FROM deid_risk.registered_dec_01\",private_key='/home/steve/dev/google-cloud-sdk/accounts/curation-test.json')"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
"med_cols = np.random.choice(list(set(dfm.columns.tolist()) - set(['person_id'])),3).tolist()\n",
"fam_cols = np.random.choice(list(set(dff.columns.tolist()) - set(['person_id'])),3).tolist()\n",
"medical = pd.merge(df,dfm[med_cols+['person_id']],on='person_id')\n",
"family = pd.merge(df,dff[fam_cols + ['person_id']],on='person_id')\n",
"_tmp = pd.merge(dfm[med_cols +['person_id']],dff[fam_cols+['person_id']])\n",
"data = pd.merge(df,_tmp,on='person_id')"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>field_count</th>\n",
" <th>flag</th>\n",
" <th>group_count</th>\n",
" <th>marketer</th>\n",
" <th>prosecutor</th>\n",
" <th>unique_row_ratio</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>21</td>\n",
" <td>full history</td>\n",
" <td>115308</td>\n",
" <td>0.992691</td>\n",
" <td>1.0</td>\n",
" <td>0.987663</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>18</td>\n",
" <td>medical</td>\n",
" <td>115306</td>\n",
" <td>0.992674</td>\n",
" <td>1.0</td>\n",
" <td>0.987629</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>18</td>\n",
" <td>family</td>\n",
" <td>115304</td>\n",
" <td>0.992656</td>\n",
" <td>1.0</td>\n",
" <td>0.987594</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>15</td>\n",
" <td>no-history</td>\n",
" <td>115300</td>\n",
" <td>0.992622</td>\n",
" <td>1.0</td>\n",
" <td>0.987526</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>3</td>\n",
" <td>medical-only</td>\n",
" <td>27</td>\n",
" <td>0.000232</td>\n",
" <td>0.5</td>\n",
" <td>0.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>3</td>\n",
" <td>family-only</td>\n",
" <td>146</td>\n",
" <td>0.001257</td>\n",
" <td>1.0</td>\n",
" <td>0.000551</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" field_count flag group_count marketer prosecutor \\\n",
"0 21 full history 115308 0.992691 1.0 \n",
"1 18 medical 115306 0.992674 1.0 \n",
"2 18 family 115304 0.992656 1.0 \n",
"3 15 no-history 115300 0.992622 1.0 \n",
"4 3 medical-only 27 0.000232 0.5 \n",
"5 3 family-only 146 0.001257 1.0 \n",
"\n",
" unique_row_ratio \n",
"0 0.987663 \n",
"1 0.987629 \n",
"2 0.987594 \n",
"3 0.987526 \n",
"4 0.000000 \n",
"5 0.000551 "
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pd.concat([data.deid.evaluate(flag='full history',cols= list(set(data.columns.tolist()) - set(['person_id'])) )\n",
" ,medical.deid.evaluate(flag='medical',cols=list( set(medical.columns.tolist() ) - set(['person_id']) ) )\n",
" ,family.deid.evaluate(flag='family',cols=list( set(family.columns.tolist() ) - set(['person_id']) ) )\n",
" ,df.deid.evaluate(flag='no-history',cols=list( set(df.columns.tolist() ) - set(['person_id']) ) )\n",
" , dfm.deid.evaluate(flag='medical-only',cols=med_cols )\n",
" , dff.deid.evaluate(flag='family-only',cols=fam_cols )\n",
" ],ignore_index=True)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from __future__ import division\n",
"def evaluate(df) :\n",
" cols = list(set(df.columns.tolist()) - set(['person_id']))\n",
" \n",
" portions = np.round(np.random.random_sample(4),3).tolist() + np.arange(5,105,5).tolist()\n",
" \n",
" N = df.shape[0] - 1\n",
" portions = np.divide(np.multiply(portions,N),100).astype(np.int64)\n",
" portions = np.unique([n for n in portions if n > 1])\n",
" \n",
" r = pd.DataFrame()\n",
" for num_rows in portions :\n",
" \n",
" indices = np.random.choice(N,num_rows,replace=False)\n",
"# print (indices.size / N)\n",
" flag = \" \".join([str( np.round(100*indices.size/ N,2)),'%'])\n",
" r = r.append(df.loc[indices].deid.evaluate(cols=cols,flag=flag,min_group_size=2))\n",
" return r"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>field_count</th>\n",
" <th>flag</th>\n",
" <th>group_count</th>\n",
" <th>marketer</th>\n",
" <th>prosecutor</th>\n",
" <th>unique_row_ratio</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>11</td>\n",
" <td>UNFLAGGED</td>\n",
" <td>114886</td>\n",
" <td>0.989058</td>\n",
" <td>1.0</td>\n",
" <td>0.980535</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" field_count flag group_count marketer prosecutor unique_row_ratio\n",
"0 11 UNFLAGGED 114886 0.989058 1.0 0.980535"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cols = list(set (df.columns.tolist()) - set(['person_id']))\n",
"df[['race','state','gender_identity','ethnicity','marital_status','education','orientation','sex_at_birth','birth_date','travel_abroad_6_months','active_duty_status']].deid.evaluate()"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['person_id',\n",
" 'HearingVision_FarSightedness',\n",
" 'HearingVision_Glaucoma',\n",
" 'Digestive_Pancreatitis']"
]
},
"execution_count": 68,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#\n",
"# This is the merge with medical history\n",
"\n",
"cols = ['person_id'] + np.random.choice(dfm.columns[1:],3,replace=False).tolist()\n",
"p = pd.merge(df,dfm[cols],on='person_id')\n",
"cols\n",
"# # cols = list(set(p.columns.tolist()) - set(['person_id']))\n",
"# evaluate(p) #p.deid.explore(cols=cols,num_runs=100)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"cols = list( set(dfm.columns.tolist()) - set(['person_id']))\n",
"cols = np.random.choice(cols,3,replace=False).tolist()\n",
"p = pd.merge(dfm[['person_id']+cols],df)\n",
"fcols = list(set(p.columns.tolist()) - set(['person_id']))\n",
"# dfm[cols].deid.evaluate(cols=list( set(cols) - set(['person_id'])))"
]
},
{
"cell_type": "markdown",
"metadata": {
"variables": {
" \" ; \".join(cols)": "InfectiousDiseases_HepatitisC ; Cancer_StomachCancer ; Circulatory_Hypertension",
" p.shape[0] ": "116157",
" p[fcols].deid.evaluate() ": "<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>field_count</th>\n <th>flag</th>\n <th>group_count</th>\n <th>marketer</th>\n <th>prosecutor</th>\n <th>unique_row_ratio</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>37</td>\n <td>UNFLAGGED</td>\n <td>115397</td>\n <td>0.993457</td>\n <td>1.0</td>\n <td>0.98886</td>\n </tr>\n </tbody>\n</table>\n</div>"
}
},
"source": [
"### Medical History\n",
"\n",
" We randomly select three a tributes {{ \" ; \".join(cols)}} . \n",
" The dataset associated risk evaluation contains {{ p.shape[0] }} records\n",
"{{ p[fcols].deid.evaluate() }}\n",
"\n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['person_id',\n",
" 'InfectiousDiseases_Tuberculosis',\n",
" 'SkeletalMuscular_Fibromyalgia',\n",
" 'Cancer_ProstateCancer']"
]
},
"execution_count": 52,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cols"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3"
]
},
"execution_count": 67,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# dfm[cols[1:]].head()\n",
"np.sum(dfm.fillna(' ').groupby(cols[1:],as_index=False).size().values <= 1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.15rc1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

View File

@ -1,293 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"\"\"\"\n",
" This notebook is intended to show how to use the risk framework:\n",
" There are two basic usages:\n",
" 1. Experiment\n",
" \n",
" Here the framework will select a number of random fields other than the patient id and compute risk for the selection.\n",
" This will repeat over a designated number of runs.\n",
" \n",
" The parameters to pass to enable this mode are id=<patient id>,nun_runs=<number of runs>\n",
" 2. Assessment\n",
" \n",
" Here the framework assumes you are only interested in a list of quasi identifiers and will run the evaluation once for a given list of quasi identifiers.\n",
" The parameters to enable this mode are id=<patient id>,quasi_id=<list of quasi ids>\n",
"\"\"\"\n",
"import os\n",
"import pandas as pd\n",
"import numpy as np\n",
"\n",
"\n",
"#\n",
"#-- Loading a template file\n",
"# The example taken a de-identification white-paper\n",
"# http://www.ehealthinformation.ca/wp-content/uploads/2014/08/2009-De-identification-PA-whitepaper1.pdf\n",
"#\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"from io import StringIO\n",
"csv = \"\"\"\n",
"id,sex,age,profession,drug_test\n",
"1,M,37,doctor,-\n",
"2,F,28,doctor,+\n",
"3,M,37,doctor,-\n",
"4,M,28,doctor,+\n",
"5,M,28,doctor,-\n",
"6,M,37,doctor,-\n",
"\"\"\"\n",
"f = StringIO()\n",
"f.write(unicode(csv))\n",
"f.seek(0)\n",
"MY_DATAFRAME = pd.read_csv(f) "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"\"\"\"\n",
" Here's the pandas_risk code verbatim. \n",
" NOTE: \n",
"\"\"\"\n",
"@pd.api.extensions.register_dataframe_accessor(\"deid\")\n",
"class deid :\n",
" \"\"\"\n",
" This class is a deidentification class that will compute risk (marketer, prosecutor) given a pandas dataframe\n",
" \"\"\"\n",
" def __init__(self,df):\n",
" self._df = df\n",
" \n",
" def risk(self,**args):\n",
" \"\"\"\n",
" @param id name of patient field \n",
" @params num_runs number of runs (default will be 100)\n",
" @params quasi_id \tlist of quasi identifiers to be used (this will only perform a single run)\n",
" \"\"\"\n",
" \n",
" id = args['id']\n",
" if 'quasi_id' in args :\n",
" num_runs = 1\n",
" columns = list(set(args['quasi_id'])- set(id) )\n",
" else :\n",
" num_runs = args['num_runs'] if 'num_runs' in args else 100\n",
" columns = list(set(self._df.columns) - set([id]))\n",
" r = pd.DataFrame() \n",
" k = len(columns)\n",
" for i in range(0,num_runs) :\n",
" #\n",
" # let's chose a random number of columns and compute marketer and prosecutor risk\n",
" # Once the fields are selected we run a groupby clause\n",
" #\n",
" if 'quasi_id' not in args :\n",
" n = np.random.randint(2,k) #-- number of random fields we are picking\n",
" ii = np.random.choice(k,n,replace=False)\n",
" cols = np.array(columns)[ii].tolist()\n",
" else:\n",
" cols \t= columns\n",
" n \t= len(cols)\n",
" x_ = self._df.groupby(cols).count()[id].values\n",
" r = r.append(\n",
" pd.DataFrame(\n",
" [\n",
" {\n",
" \"selected\":n,\n",
" \"marketer\": x_.size / np.float64(np.sum(x_)),\n",
" \"prosecutor\":1 / np.float64(np.min(x_))\n",
"\n",
" }\n",
" ]\n",
" )\n",
" )\n",
" g_size = x_.size\n",
" n_ids = np.float64(np.sum(x_))\n",
"\n",
" return r"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>marketer</th>\n",
" <th>prosecutor</th>\n",
" <th>selected</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.500000</td>\n",
" <td>1.0</td>\n",
" <td>2</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.500000</td>\n",
" <td>1.0</td>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.500000</td>\n",
" <td>1.0</td>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.333333</td>\n",
" <td>1.0</td>\n",
" <td>2</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.333333</td>\n",
" <td>0.5</td>\n",
" <td>2</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" marketer prosecutor selected\n",
"0 0.500000 1.0 2\n",
"0 0.500000 1.0 3\n",
"0 0.500000 1.0 3\n",
"0 0.333333 1.0 2\n",
"0 0.333333 0.5 2"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#\n",
"# Lets us compute risk here for a random any random selection of quasi identifiers\n",
"# We will run this experiment 5 times\n",
"#\n",
"MY_DATAFRAME.deid.risk(id='id',num_runs=5)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>marketer</th>\n",
" <th>prosecutor</th>\n",
" <th>selected</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.5</td>\n",
" <td>1.0</td>\n",
" <td>3</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" marketer prosecutor selected\n",
"0 0.5 1.0 3"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#\n",
"# In this scenario we are just interested in sex,profession,age\n",
"#\n",
"MY_DATAFRAME.deid.risk(id='id',quasi_id=['age','sex','profession'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.15rc1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}