Dask ttest between2 rows for all columns in dataframe

113 views Asked by At

I wrote the following function to calculate statistical tests between two categories across all pandas columns in parallel -

I was able to extract the categories using dask, but I need to compute and use pandas to get the ttest (or other statistical test) I was wondering if anyone has an idea to how to use dask to run not only the categorization , but the t-test for each column in parallel) Below is my code:

import numpy as np
import pandas as pd
import dask
from scipy.stats import ttest_ind 
from scipy.stats import ttest_rel 
from scipy.stats import kstest 

df = pd.DataFrame({
'var1'      : np.random.randint(0, 1000000, 1000000),
'var2'      : np.random.randint(0, 1000000, 1000000),
'var3'      : np.random.randint(0, 1000000, 1000000),
'Category'   : np.random.randint(0, 2, 1000000) 
})


custom_list = dask.dataframe.Aggregation('custom_test', 
                                         chunk= lambda s: s.apply(lambda x:list(x)),
                                         agg = lambda s0 :s0.obj.groupby(level=list(range(s0.obj.index.nlevels))).sum(),
                                         finalize= lambda s1 :s1.apply(lambda x: x))

def testCustom(x, test=kstest, **args):
    x=list(x)
    return test(x[0],x[1])

def diffDiffrentialCategory(df, catcol='Category', test=ttest_ind, pVal=0.05, chunksize=10000, **args):
    ddf=dask.dataframe.from_pandas(df,chunksize=chunksize)

    df1=ddf.groupby(catcol).aggregate(custom_list).compute()
    # I'd like to work directly on df1=ddf.groupby(catcol).aggregate(custom_list) w/o compute()
    df1=pd.DataFrame.from_records(df1.apply(testCustom, test=test)).set_index(df1.columns).rename(columns={0:'statistic', 1:'p-value'})
    return df1[df1['p-value']<=pVal]

Appreciate the help/advice

Jano

0

There are 0 answers