notebooks

This commit is contained in:
Steve Nyemba 2018-12-11 17:43:16 -06:00
parent 43cbd12a1f
commit 0b16ce94cc
4 changed files with 944 additions and 1 deletions

238
notebooks/Untitled.ipynb Normal file
View File

@ -0,0 +1,238 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import itertools \n",
"import pandas as pd\n",
"import numpy as np\n",
"# from pandas_risk import *\n",
"from time import time\n",
"import os\n",
"\n",
"attr = ['gender','race','zip','year_of_birth']\n",
"comb_attr = [\n",
" ['zip' ,'gender', 'birth_datetime', 'race'], \n",
" ['zip', 'gender', 'year_of_birth', 'race'], \n",
" ['gender','race','zip'],\n",
" ['race','year_of_birth','zip']\n",
"]\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"SQL_CONTROLLED=\"SELECT * FROM deid_risk.basic_risk60k\"\n",
"dfc = pd.read_gbq(SQL_CONTROLLED,private_key='/home/steve/dev/google-cloud-sdk/accounts/curation-test.json')\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def risk(**args):\n",
" Yi = args['data']\n",
" Yi = Yi.fillna(' ')\n",
" sizes = args['prop'] if 'prop' in args else np.arange(5,100,5)\n",
" FLAG = args['flag'] if 'flag' in args else 'UNFLAGGED'\n",
" N = args['num_runs']\n",
" if 'cols' in args :\n",
" columns = args['cols']\n",
" else:\n",
" columns = list(set(Yi.columns.tolist()) - set(['person_id']))\n",
" p = pd.DataFrame()\n",
" y_i= pd.DataFrame({\"group_size\":Yi.groupby(columns,as_index=False).size()}).reset_index()\n",
" for index in sizes :\n",
" for n in np.repeat(index,N):\n",
" \n",
" # we will randomly sample n% rows from the dataset\n",
" i = np.random.choice(Yi.shape[0],((Yi.shape[0] * n)/100),replace=False)\n",
" x_i= pd.DataFrame(Yi).loc[i] \n",
" risk = x_i.deid.risk(id='person_id',quasi_id = columns)\n",
" x_i = pd.DataFrame({\"group_size\":x_i.groupby(columns,as_index=False).size()}).reset_index()\n",
"\n",
"\n",
" r = pd.merge(x_i,y_i,on=columns,how='inner')\n",
" if r.shape[0] == 0 :\n",
" continue\n",
" r['marketer'] = r.apply(lambda row: (row.group_size_x / np.float64(row.group_size_y)) /np.sum(x_i.group_size) ,axis=1)\n",
" r['sample %'] = np.repeat(n,r.shape[0])\n",
" r['tier'] = np.repeat(FLAG,r.shape[0])\n",
" r['sample marketer'] = np.repeat(risk['marketer'].values[0],r.shape[0])\n",
" # r['patient_count'] = np.repeat(r.shape[0],r.shape[0])\n",
" r = r.groupby(['sample %','tier','sample marketer'],as_index=False).sum()[['sample %','marketer','sample marketer','tier']]\n",
" p = p.append(r)\n",
" p.index = np.arange(p.shape[0]).astype(np.int64)\n",
" return p\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from pandas_risk import *\n",
"o = pd.DataFrame()\n",
"PATH=\"out/experiment-phase-2.xlsx\"\n",
"writer = pd.ExcelWriter(PATH,engine='xlsxwriter')\n",
"comb_attr = [\n",
" ['zip' ,'gender', 'birth_datetime', 'race'], \n",
" ['zip', 'gender', 'year_of_birth', 'race'], \n",
" ['gender','race','zip'],\n",
" ['race','year_of_birth','zip']\n",
"]\n",
"\n",
"for cols in comb_attr :\n",
" o = risk(data=dfc,cols=cols,flag='CONTROLLED',num_runs=5)\n",
" #\n",
" # adding the policy\n",
" x = [1* dfc.columns.isin(cols) for i in range(o.shape[0])]\n",
" o = o.join(pd.DataFrame(x,columns = dfc.columns))\n",
" #\n",
" # Write this to excel notebook\n",
" o.to_excel(writer,\"-\".join(cols))\n",
"# break\n",
" \n",
"\n",
"# p = p.rename(columns={'marketer_x':'sample marketer'})\n",
"# p.index = np.arange(p.shape[0]).astype(np.int64)\n",
"\n",
"writer.save()"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>person_id</th>\n",
" <th>year_of_birth</th>\n",
" <th>month_of_birth</th>\n",
" <th>day_of_birth</th>\n",
" <th>birth_datetime</th>\n",
" <th>race_concept_id</th>\n",
" <th>ethnicity_concept_id</th>\n",
" <th>location_id</th>\n",
" <th>care_site_id</th>\n",
" <th>person_source_value</th>\n",
" <th>...</th>\n",
" <th>gender_source_concept_id</th>\n",
" <th>race_source_value</th>\n",
" <th>ethnicity_source_value</th>\n",
" <th>sex_at_birth</th>\n",
" <th>birth_date</th>\n",
" <th>race</th>\n",
" <th>zip</th>\n",
" <th>city</th>\n",
" <th>state</th>\n",
" <th>gender</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" </tbody>\n",
"</table>\n",
"<p>0 rows × 21 columns</p>\n",
"</div>"
],
"text/plain": [
"Empty DataFrame\n",
"Columns: [person_id, year_of_birth, month_of_birth, day_of_birth, birth_datetime, race_concept_id, ethnicity_concept_id, location_id, care_site_id, person_source_value, gender_source_value, gender_source_concept_id, race_source_value, ethnicity_source_value, sex_at_birth, birth_date, race, zip, city, state, gender]\n",
"Index: []\n",
"\n",
"[0 rows x 21 columns]"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x = [1* dfc.columns.isin(cols) for i in range(o.shape[0])]\n",
"o.join(pd.DataFrame(x,columns = dfc.columns))\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'columns' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-6-8e7b9895361f>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mcolumns\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'columns' is not defined"
]
}
],
"source": [
"columns\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.15rc1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -177,7 +177,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
"version": "2.7.15rc1"
},
"varInspector": {
"cols": {

View File

@ -0,0 +1,95 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" skiping ...\n",
" skiping ...\n",
" skiping ...\n",
" skiping ...\n",
" skiping ...\n",
" skiping ...\n",
" skiping ...\n"
]
},
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\"\"\"\n",
" This notebook is designed to generate SQL syntax all the quasi-identifiers for the patients in the database\n",
" The resulting SQL will be run against bigquery to produce a table with every record mapping to a patient\n",
" \n",
"\"\"\"\n",
"\n",
"from risk import *\n",
"ihandle = UtilHandler(path='/home/steve/dev/google-cloud-sdk/accounts/curation-prod.json',dataset='combined20180822',key_field='person_id',key_table='person',filter=['person','observation'])\n",
"r = ihandle.migrate_tables()\n",
"len(r)\n"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"u' SELECT person.person_id , person.year_of_birth , person.month_of_birth , person.day_of_birth , person.birth_datetime , person.race_concept_id , person.ethnicity_concept_id , person.location_id , person.care_site_id , person.person_source_value , person.gender_source_value , person.gender_source_concept_id , person.race_source_value , person.ethnicity_source_value , basic_observation.sex_at_birth AS sex_at_birth1 , basic_observation.birth_date AS birth_date1 , basic_observation.race AS race1 , basic_observation.zip AS zip1 , basic_observation.city AS city1 , basic_observation.state AS state1 , basic_observation.gender AS gender1 FROM (select * from deid_image.person ) as person INNER JOIN (select * from deid_image.basic_observation ) as basic_observation ON basic_observation.person_id = person.person_id '"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ihandle = UtilHandler(path='/home/steve/dev/google-cloud-sdk/accounts/curation-test.json',dataset='deid_image',key_field='person_id',key_table='person',filter=['person','basic_observation'])\n",
"ihandle.create_table().replace('\\n',' ')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.15rc1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

610
notebooks/experiments.ipynb Normal file

File diff suppressed because one or more lines are too long