Efficiently locate group-wise constant columns in a data.frame

901 views Asked by At

How can I efficiently extract group-wise constant columns from a data frame? I've included an plyr implementation below to make precise what I'm trying to do, but it's slow. How can I do it as efficiently as possible? (Ideally without splitting the data frame at all).

base <- data.frame(group = 1:1000, a = sample(1000), b = sample(1000))
df <- data.frame(
  base[rep(seq_len(nrow(base)), length = 1e6), ], 
  c = runif(1e6), 
  d = runif(1e6)
)


is.constant <- function(x) length(unique(x)) == 1
constant_cols <- function(x) head(Filter(is.constant, x), 1)
system.time(constant <- ddply(df, "group", constant_cols))
#   user  system elapsed 
# 20.531   1.670  22.378 
stopifnot(identical(names(constant), c("group", "a", "b")))
stopifnot(nrow(constant) == 1000)

In my real use case (deep inside ggplot2) there may be an arbitrary number of constant and non-constant columns. The size of the data in the example is about the right order of magnitude.

6

There are 6 answers

2
hadley On BEST ANSWER

Inspired by @Joran's answer, here's similar strategy that's a little faster (1 s vs 1.5 s on my machine)

changed <- function(x) c(TRUE, x[-1] != x[-n])

constant_cols2 <- function(df,grp){
  df <- df[order(df[,grp]),]
  n <- nrow(df)
  changes <- lapply(df, changed)

  vapply(changes[-1], identical, changes[[1]], FUN.VALUE = logical(1))
}
system.time(cols <- constant_cols2(df, "group")) # about 1 s

system.time(constant <- df[changed(df$group), cols])
#   user  system elapsed 
#  1.057   0.230   1.314 

stopifnot(identical(names(constant), c("group", "a", "b")))
stopifnot(nrow(constant) == 1000)

It has the same flaws though, in that it won't detect columns that are have the same values for adjacent groups (e.g. df$f <- 1)

With a bit more thinking plus @David's ideas:

constant_cols3 <- function(df, grp) {
  # If col == TRUE and group == FALSE, not constant
  matching_breaks <- function(group, col) {
    !any(col & !group)
  }

  n <- nrow(df)
  changed <- function(x) c(TRUE, x[-1] != x[-n])

  df <- df[order(df[,grp]),]
  changes <- lapply(df, changed)
  vapply(changes[-1], matching_breaks, group = changes[[1]], 
    FUN.VALUE = logical(1))
}

system.time(x <- constant_cols3(df, "group"))
#   user  system elapsed 
#  1.086   0.221   1.413 

And that gives the correct result.

2
jebyrnes On

(edit: better answer)

What about something like

is.constant<-function(x) length(which(x==x[1])) == length(x)

This seems to be a nice improvement. Compare the following.

> a<-rnorm(5000000)

> system.time(is.constant(a))
   user  system elapsed 
  0.039   0.010   0.048 
> 
> system.time(is.constantOld(a))
   user  system elapsed 
  1.049   0.084   1.125 
2
joran On

(Edited to possibly address the issue of consecutive groups with the same value)

I'm tentatively submitting this answer, but I haven't completely convinced myself that it will correctly identify within group constant columns in all cases. But it's definitely faster (and can probably be improved):

constant_cols1 <- function(df,grp){
    df <- df[order(df[,grp]),]

    #Adjust values based on max diff in data
    rle_group <- rle(df[,grp])
    vec <- rep(rep(c(0,ceiling(diff(range(df)))),
               length.out = length(rle_group$lengths)),
               times = rle_group$lengths)
    m <- matrix(vec,nrow = length(vec),ncol = ncol(df)-1)
    df_new <- df
    df_new[,-1] <- df[,-1] + m

    rles <- lapply(df_new,FUN = rle)
    nms <- names(rles)
    tmp <- sapply(rles[nms != grp],
                  FUN = function(x){identical(x$lengths,rles[[grp]]$lengths)})
    return(tmp)
}

My basic idea was to use rle, obviously.

4
Jared On

I'm not sure if this is exactly what you are looking for, but it identifies columns a and b.

require(data.table)
is.constant <- function(x) identical(var(x), 0)
dtOne <- data.table(df)
system.time({dtTwo <- dtOne[, lapply(.SD, is.constant), by=group]
result <- apply(X=dtTwo[, list(a, b, c, d)], 2, all)
result <- result[result == TRUE] })
stopifnot(identical(names(result), c("a", "b"))) 
result
0
dholstius On

How fast does is.unsorted(x) fail for non-constant x? Sadly I don't have access to R at the moment. Also seems that's not your bottleneck though.

2
David F On

A bit slower than what hadley suggested above, but I think it should handle the case of equal adjacent groups

findBreaks <- function(x) cumsum(rle(x)$lengths)

constantGroups <- function(d, groupColIndex=1) {
  d <- d[order(d[, groupColIndex]), ]
  breaks <- lapply(d, findBreaks)
  groupBreaks <- breaks[[groupColIndex]]
  numBreaks <- length(groupBreaks)
  isSubset <- function(x) length(x) <= numBreaks && length(setdiff(x, groupBreaks)) == 0
  unlist(lapply(breaks[-groupColIndex], isSubset))
}

The intuition is that if a column is constant groupwise then the breaks in the column values (sorted by the group value) will be a subset of the breaks in the group value.

Now, compare it with hadley's (with small modification to ensure n is defined)

# df defined as in the question

n <- nrow(df)
changed <- function(x) c(TRUE, x[-1] != x[-n])

constant_cols2 <- function(df,grp){
  df <- df[order(df[,grp]),]
  changes <- lapply(df, changed)
  vapply(changes[-1], identical, changes[[1]], FUN.VALUE = logical(1))
}

> system.time(constant_cols2(df, 1))
   user  system elapsed 
  1.779   0.075   1.869 
> system.time(constantGroups(df))
   user  system elapsed 
  2.503   0.126   2.614 
> df$f <- 1
> constant_cols2(df, 1)
    a     b     c     d     f 
 TRUE  TRUE FALSE FALSE FALSE 
> constantGroups(df)
    a     b     c     d     f 
 TRUE  TRUE FALSE FALSE  TRUE