bug fix and adding usage

This commit is contained in:
Steve L. Nyemba -- The Architect 2018-10-02 12:38:24 -05:00
parent cb58675cd3
commit 47f94974c9
2 changed files with 209 additions and 310 deletions

View File

@ -1,34 +1,16 @@
# deid-risk # deid-risk
This project is intended to compute an estimated value of risk for a given database. The code below extends a data-frame by adding it the ability to compute de-identification risk (marketer, prosecutor).
Because data-frames can connect to any database/file it will be the responsibility of the user to load the dataset into a data-frame.
1. Pull meta data of the database and create a dataset via joins Basic examples that illustrate usage of the the framework are in the notebook folder. The example is derived from
2. Generate the dataset with random selection of features [http://ehelthinformation.ca](http://www.ehealthinformation.ca/wp-content/uploads/2014/08/2009-De-identification-PA-whitepaper1.pdf)
3. Compute risk via SQL using group by
## Python environment
The following are the dependencies needed to run the code: Dependencies:
numpy
pandas
pandas Limitations:
numpy
pandas-gbq
google-cloud-bigquery
## Usage
**Generate The merged dataset**
python risk.py create --i_dataset <in dataset|schema> --o_dataset <out dataset|schema> --table <name> --path <bigquery-key-file> --key <patient-id-field-name> [--file ]
**Compute risk (marketer, prosecutor)**
python risk.py compute --i_dataset <dataset> --table <name> --path <bigquery-key-file> --key <patient-id-field-name>
## Limitations
- It works against bigquery for now
@TODO: @TODO:
- Need to write a transport layer (database interface)
- Support for referential integrity, so one table can be selected and a dataset derived given referential integrity
- Add support for journalist risk - Add support for journalist risk

View File

@ -2,294 +2,121 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 4,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"dev-deid-600@aou-res-deid-vumc-test.iam.gserviceaccount.com df0ac049-d5b6-416f-ab3c-6321eda919d6 2018-09-25 08:18:34.829000+00:00 DONE\n"
]
}
],
"source": [ "source": [
"\"\"\"\n",
" This notebook is intended to show how to use the risk framework:\n",
" There are two basic usages:\n",
" 1. Experiment\n",
" \n",
" Here the framework will select a number of random fields other than the patient id and compute risk for the selection.\n",
" This will repeat over a designated number of runs.\n",
" \n",
" The parameters to pass to enable this mode are id=<patient id>,nun_runs=<number of runs>\n",
" 2. Assessment\n",
" \n",
" Here the framework assumes you are only interested in a list of quasi identifiers and will run the evaluation once for a given list of quasi identifiers.\n",
" The parameters to enable this mode are id=<patient id>,quasi_id=<list of quasi ids>\n",
"\"\"\"\n",
"import os\n",
"import pandas as pd\n", "import pandas as pd\n",
"import numpy as np\n", "import numpy as np\n",
"from google.cloud import bigquery as bq\n",
"\n", "\n",
"client = bq.Client.from_service_account_json('/home/steve/dev/google-cloud-sdk/accounts/vumc-test.json')\n", "\n",
"# pd.read_gbq(query=\"select * from raw.observation limit 10\",private_key='/home/steve/dev/google-cloud-sdk/accounts/vumc-test.json')\n", "#\n",
"jobs = client.list_jobs()\n", "#-- Loading a template file\n",
"for job in jobs :\n", "# The example taken a de-identification white-paper\n",
"# print dir(job)\n", "# http://www.ehealthinformation.ca/wp-content/uploads/2014/08/2009-De-identification-PA-whitepaper1.pdf\n",
" print job.user_email,job.job_id,job.started, job.state\n", "#\n",
" break" "\n",
"import pandas as pd\n",
"import numpy as np\n",
"from io import StringIO\n",
"csv = \"\"\"\n",
"id,sex,age,profession,drug_test\n",
"1,M,37,doctor,-\n",
"2,F,28,doctor,+\n",
"3,M,37,doctor,-\n",
"4,M,28,doctor,+\n",
"5,M,28,doctor,-\n",
"6,M,37,doctor,-\n",
"\"\"\"\n",
"f = StringIO()\n",
"f.write(unicode(csv))\n",
"f.seek(0)\n",
"MY_DATAFRAME = pd.read_csv(f) "
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 33, "execution_count": 2,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [
"xo = ['person_id','date_of_birth','race']\n",
"xi = ['person_id','value_as_number','value_source_value']"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"def get_tables(client,id,fields=[]):\n",
" \"\"\"\n",
" getting table lists from google\n",
" \"\"\"\n",
" r = []\n",
" ref = client.dataset(id)\n",
" tables = list(client.list_tables(ref))\n",
" for table in tables :\n",
" ref = table.reference\n",
" schema = client.get_table(ref).schema\n",
" names = [f.name for f in schema]\n",
" x = list(set(names) & set(fields))\n",
" if x :\n",
" r.append({\"name\":table.table_id,\"fields\":names})\n",
" return r\n",
" \n",
"def get_fields(**args):\n",
" \"\"\"\n",
" This function will generate a random set of fields from two tables. Tables are structured as follows \n",
" {name,fields:[],\"y\":}, with \n",
" name table name (needed to generate sql query)\n",
" fields list of field names, used in the projection\n",
" y name of the field to be joined.\n",
" @param xo candidate table in the join\n",
" @param xi candidate table in the join\n",
" @param join field by which the tables can be joined.\n",
" \"\"\"\n",
" # The set operation will remove redundancies in the field names (not sure it's a good idea)\n",
"# xo = args['xo']['fields']\n",
"# xi = args['xi']['fields']\n",
"# zi = args['xi']['name']\n",
"# return list(set([ \".\".join([args['xo']['name'],name]) for name in xo]) | set(['.'.join([args['xi']['name'],name]) for name in xi if name != args['join']]) )\n",
" xo = args['xo']\n",
" fields = [\".\".join([args['xo']['name'],name]) for name in args['xo']['fields']]\n",
" if not isinstance(args['xi'],list) :\n",
" x_ = [args['xi']]\n",
" else:\n",
" x_ = args['xi']\n",
" for xi in x_ :\n",
" fields += (['.'.join([xi['name'], name]) for name in xi['fields'] if name != args['join']])\n",
" return fields\n",
"def generate_sql(**args):\n",
" \"\"\"\n",
" This function will generate the SQL query for the resulting join\n",
" \"\"\"\n",
" \n",
" xo = args['xo']\n",
" x_ = args['xi']\n",
" xo_name = \".\".join([args['prefix'],xo['name'] ]) if 'prefix' in args else xo['name']\n",
" SQL = \"SELECT :fields FROM :xo.name \".replace(\":xo.name\",xo_name)\n",
" if not isinstance(x_,list):\n",
" x_ = [x_]\n",
" f = []#[\".\".join([args['xo']['name'],args['join']] )] \n",
" INNER_JOINS = []\n",
" for xi in x_ :\n",
" xi_name = \".\".join([args['prefix'],xi['name'] ]) if 'prefix' in args else xi['name']\n",
" JOIN_SQL = \"INNER JOIN :xi.name ON \".replace(':xi.name',xi_name)\n",
" value = \".\".join([xi['name'],args['join']])\n",
" f.append(value) \n",
" \n",
" ON_SQL = \"\"\n",
" tmp = []\n",
" for term in f :\n",
" ON_SQL = \":xi.name.:ofield = :xo.name.:ofield\".replace(\":xo.name\",xo['name'])\n",
" ON_SQL = ON_SQL.replace(\":xi.name.:ofield\",term).replace(\":ofield\",args['join'])\n",
" tmp.append(ON_SQL)\n",
" INNER_JOINS += [JOIN_SQL + \" AND \".join(tmp)]\n",
" return SQL + \" \".join(INNER_JOINS)\n",
"def get_final_sql(**args):\n",
" xo = args['xo']\n",
" xi = args['xi']\n",
" join=args['join']\n",
" prefix = args['prefix'] if 'prefix' in args else ''\n",
" fields = get_fields (xo=xo,xi=xi,join=join)\n",
" k = len(fields)\n",
" n = np.random.randint(2,k) #-- number of fields to select\n",
" i = np.random.randint(0,k,size=n)\n",
" fields = [name for name in fields if fields.index(name) in i]\n",
" base_sql = generate_sql(xo=xo,xi=xi,prefix)\n",
" SQL = \"\"\"\n",
" SELECT AVERAGE(count),size,n as selected_features,k as total_features\n",
" FROM(\n",
" SELECT COUNT(*) as count,count(:join) as pop,sum(:n) as N,sum(:k) as k,:fields\n",
" FROM (:sql)\n",
" GROUP BY :fields\n",
" ) \n",
" order by 1\n",
" \n",
" \"\"\".replace(\":sql\",base_sql)\n",
"# sql = \"SELECT :fields FROM :xo.name INNER JOIN :xi.name ON :xi.name.:xi.y = :xo.y \"\n",
"# fields = \",\".join(get_fields(xo=xi,xi=xi,join=xi['y']))\n",
" \n",
" \n",
"# sql = sql.replace(\":fields\",fields).replace(\":xo.name\",xo['name']).replace(\":xi.name\",xi['name'])\n",
"# sql = sql.replace(\":xi.y\",xi['y']).replace(\":xo.y\",xo['y'])\n",
"# return sql\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [],
"source": [
"xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race','value_as_number']}\n",
"xi = [{\"name\":\"measurement\",\"fields\":['person_id','value_as_number','value_source_value']}] #,{\"name\":\"observation\",\"fields\":[\"person_id\",\"value_as_string\",\"observation_source_value\"]}]\n",
"# generate_sql(xo=xo,xi=xi,join=\"person_id\",prefix='raw')\n",
"fields = get_fields(xo=xo,xi=xi,join='person_id')\n",
"ofields = list(fields)\n",
"k = len(fields)\n",
"n = np.random.randint(2,k) #-- number of fields to select\n",
"i = np.random.randint(0,k,size=n)\n",
"fields = [name for name in fields if fields.index(name) in i]"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['person.race', 'person.value_as_number', 'measurement.value_source_value']"
]
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"fields\n"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'SELECT person_id,value_as_number,measurements.value_source_value,measurements.value_as_number,value_source_value FROM person INNER JOIN measurements ON measurements.person_id = person_id '"
]
},
"execution_count": 55,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race'],\"y\":\"person_id\"}\n",
"xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value'],\"y\":\"person_id\"}\n",
"generate_sql(xo=xo,xi=xi)"
]
},
{
"cell_type": "code",
"execution_count": 59,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[('a', 'b'), ('a', 'c'), ('b', 'c')]"
]
},
"execution_count": 59,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"\"\"\"\n", "\"\"\"\n",
" We are designing a process that will take two tables that will generate \n", " Here's the pandas_risk code verbatim. \n",
" NOTE: \n",
"\"\"\"\n", "\"\"\"\n",
"import itertools\n", "@pd.api.extensions.register_dataframe_accessor(\"deid\")\n",
"list(itertools.combinations(['a','b','c'],2))" "class deid :\n",
" \"\"\"\n",
" This class is a deidentification class that will compute risk (marketer, prosecutor) given a pandas dataframe\n",
" \"\"\"\n",
" def __init__(self,df):\n",
" self._df = df\n",
" \n",
" def risk(self,**args):\n",
" \"\"\"\n",
" @param id name of patient field \n",
" @params num_runs number of runs (default will be 100)\n",
" @params quasi_id \tlist of quasi identifiers to be used (this will only perform a single run)\n",
" \"\"\"\n",
" \n",
" id = args['id']\n",
" if 'quasi_id' in args :\n",
" num_runs = 1\n",
" columns = list(set(args['quasi_id'])- set(id) )\n",
" else :\n",
" num_runs = args['num_runs'] if 'num_runs' in args else 100\n",
" columns = list(set(self._df.columns) - set([id]))\n",
" r = pd.DataFrame() \n",
" k = len(columns)\n",
" for i in range(0,num_runs) :\n",
" #\n",
" # let's chose a random number of columns and compute marketer and prosecutor risk\n",
" # Once the fields are selected we run a groupby clause\n",
" #\n",
" if 'quasi_id' not in args :\n",
" n = np.random.randint(2,k) #-- number of random fields we are picking\n",
" ii = np.random.choice(k,n,replace=False)\n",
" cols = np.array(columns)[ii].tolist()\n",
" else:\n",
" cols \t= columns\n",
" n \t= len(cols)\n",
" x_ = self._df.groupby(cols).count()[id].values\n",
" r = r.append(\n",
" pd.DataFrame(\n",
" [\n",
" {\n",
" \"selected\":n,\n",
" \"marketer\": x_.size / np.float64(np.sum(x_)),\n",
" \"prosecutor\":1 / np.float64(np.min(x_))\n",
"\n",
" }\n",
" ]\n",
" )\n",
" )\n",
" g_size = x_.size\n",
" n_ids = np.float64(np.sum(x_))\n",
"\n",
" return r"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([1, 3, 0, 0])"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#\n",
"# find every table with person id at the very least or a subset of fields\n",
"#\n",
"np.random.randint(0,4,size=4)"
]
},
{
"cell_type": "code",
"execution_count": 90,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['a']"
]
},
"execution_count": 90,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"list(set(['a','b']) & set(['a']))"
]
},
{
"cell_type": "code",
"execution_count": 120,
"metadata": {},
"outputs": [],
"source": [
"x_ = 1"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"x_ = pd.DataFrame({\"group\":[1,1,1,1,1], \"size\":[2,1,1,1,1]})"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -313,35 +140,125 @@
" <thead>\n", " <thead>\n",
" <tr style=\"text-align: right;\">\n", " <tr style=\"text-align: right;\">\n",
" <th></th>\n", " <th></th>\n",
" <th>size</th>\n", " <th>marketer</th>\n",
" </tr>\n", " <th>prosecutor</th>\n",
" <tr>\n", " <th>selected</th>\n",
" <th>group</th>\n",
" <th></th>\n",
" </tr>\n", " </tr>\n",
" </thead>\n", " </thead>\n",
" <tbody>\n", " <tbody>\n",
" <tr>\n", " <tr>\n",
" <th>1</th>\n", " <th>0</th>\n",
" <td>1.2</td>\n", " <td>0.500000</td>\n",
" <td>1.0</td>\n",
" <td>2</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.500000</td>\n",
" <td>1.0</td>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.500000</td>\n",
" <td>1.0</td>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.333333</td>\n",
" <td>1.0</td>\n",
" <td>2</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.333333</td>\n",
" <td>0.5</td>\n",
" <td>2</td>\n",
" </tr>\n", " </tr>\n",
" </tbody>\n", " </tbody>\n",
"</table>\n", "</table>\n",
"</div>" "</div>"
], ],
"text/plain": [ "text/plain": [
" size\n", " marketer prosecutor selected\n",
"group \n", "0 0.500000 1.0 2\n",
"1 1.2" "0 0.500000 1.0 3\n",
"0 0.500000 1.0 3\n",
"0 0.333333 1.0 2\n",
"0 0.333333 0.5 2"
] ]
}, },
"execution_count": 12, "execution_count": 7,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": [
"x_.groupby(['group']).mean()\n" "#\n",
"# Lets us compute risk here for a random any random selection of quasi identifiers\n",
"# We will run this experiment 5 times\n",
"#\n",
"MY_DATAFRAME.deid.risk(id='id',num_runs=5)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>marketer</th>\n",
" <th>prosecutor</th>\n",
" <th>selected</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.5</td>\n",
" <td>1.0</td>\n",
" <td>3</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" marketer prosecutor selected\n",
"0 0.5 1.0 3"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#\n",
"# In this scenario we are just interested in sex,profession,age\n",
"#\n",
"MY_DATAFRAME.deid.risk(id='id',quasi_id=['age','sex','profession'])"
] ]
}, },
{ {