Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- library(stats)
- library(methods)
- library(gsDesign, quietly=TRUE)
- #
- # An R script to simulate the ITT group of the PIII INSPIRE confirmatory trial,
- # an adaptive trial with sample size re-estimation at interim - v.2.4
- #
- # (c) 2018 Germain Garand <germain.garand@laposte.net>
- #
- # Core specifications of INSPIRE
- # from public documents
- # ONTXCorporatePresentation_Noblecon13.pdf and ONTX Corporate Presentation Oct 2018.pdf (version 1)
- # 2-sided Type-I error for ITT population
- alpha <- 0.0397
- # Type-II error (80% power)
- beta <- 0.2
- # Optimistic Hazard Ratio target (60% improvement in OS, HR=0.625)
- hr_op <- 1/1.60
- # Pessimistic Hazard Ratio target (37.5% improvement in OS, HR=0.73)
- hr_p <- 1/1.375
- # hazard ratio for null hypothesis
- hr0 <- 1
- # proportion of patients in arm 1
- pA <- 2/3
- # probability of event happening during the course of study
- # (here 100% since what we want is events number, not patients number)
- pE <- 1
- # Cox PH model estimate of n of patients needed in a fixed design
- n_fix=((qnorm(1-alpha/2)+qnorm(1-beta))/(log(hr_op)-log(hr0)))^2/(pA*(1-pA)*pE)
- message("Number of events required to power the fixed optimistic design (CoxPH model): ", ceiling(n_fix))
- # Cox PH estimate of n of pts needed in the worst case scenario
- n_fix_max=((qnorm(1-alpha/2)+qnorm(1-beta))/(log(hr_p)-log(hr0)))^2/(pA*(1-pA)*pE)
- message("Number of events required to power the fixed pessimistic design (CoxPH model): ", ceiling(n_fix_max))
- # Now let's see how this fixed design will evolve if we turn it
- # into an adaptive design.
- # Natural parameter value null and alternate hypothesis values
- delta0 <- 0
- delta1 <- 1
- # timing of interim analysis for underlying group sequential design
- timing <- .5
- # upper spending function
- sfu <- sfHSD
- # upper spending function parameter
- sfupar <- -14
- # maximum sample size inflation
- maxinflation <- 2
- # assumed enrollment overrrun at IA
- overrun <- 0
- # interim z-values for plotting
- rg <- c(0.5, 2)
- z <- seq(rg[1], rg[2] ,0.025)
- # Fixed design sample size (input the result from our COX estimate)
- n.fix <- n_fix
- # conditional power interval where sample
- # size is to be adjusted
- # FIXME
- cpadj <- c(.36,.8)
- # targeted Type II error when adapting sample size
- #
- # -- Onconova corporate presenation documents sometimes mentioned
- # -- the Power was "raised to 90%" as a result of the sample size re-estimation
- # -- but that information has been removed (happened in August 2018 and October 2018)
- # -- so it might be an error.. or otherwise untrustable.
- # -- Might want to try with both .1 and .2 value
- # --
- # -- The most conservative case is betastar=beta (Power target unchanged)
- betastar <- .2
- # combination test (built-in options are: z2Z, z2NC, z2Fisher)
- z2 <- z2NC
- # use the above parameters to generate
- # a 2-stage group sequential design
- gs<-gsDesign(k=2,n.fix=n.fix,timing=timing,sfu=sfu,sfupar=sfupar,
- alpha=alpha/2,beta=beta,delta0=delta0,delta1=delta1)
- gs_size <- gs$n.I[2]
- message("Corresponding Adaptive Design initial sample size: ", ceiling(gs_size))
- # extend this to a conditional power design with sample size re-estimation
- xx <- ssrCP(x=gs,z1=z,overrun=overrun,beta=betastar,cpadj=cpadj,
- maxinc=maxinflation, z2=z2)
- # effective sample increase at interim
- incr <- 288
- # corresponding z1 at interim..
- z1_i <- xx$dat$z1[ tail(which(xx$dat$n2 > incr-1), 1) ]
- message("Z1 at interim: ", z1_i)
- # ..express that as a proportion of effect size
- theta_i <- z1_i / sqrt(xx$x$n.I[1])
- delta_ratio=round(theta_i/xx$x$delta,2)
- message("Proportion of effect size reached at interim: ", delta_ratio)
- # ..and as a Conditional Power
- cp_i <- condPower(z1=z1_i,x=xx$x,cpadj=xx$cpadj,n2=xx$x$n.I[2]-xx$x$n.I[1])
- message("Conditional Power at interim: ", round(cp_i,2))
- # Let's estimate the probability of success of the adaptive design
- # after SS re-estimation by creating an ad-hoc sequential design
- # that is close to the re-sampled trial parameters and look for theta_i effect
- #
- # FIXME
- incr_adj <- incr*(1-0.0278)
- timing_adj <- (gs_size/2)/incr
- gs2<-gsDesign(k=2,n.fix=incr_adj,timing=timing_adj,sfu=sfu,sfupar=sfupar,
- alpha=alpha/2,beta=betastar,delta0=delta0,delta1=delta1)
- message("------------- ITT Group --------------")
- y <- gsProbability(theta=theta_i, d=gs2)
- print(y)
- # Now let's just get a reading on the VHR group
- # We know it's 70% of ITT, but we don't know its performance,
- # so let's just assume theta is the same as the ITT to get a
- # nice worst case
- # beware only part of the type-I error was allocated to this group
- alpha_vhr <- 0.01
- gs3<-gsDesign(k=2, n.fix=incr_adj*.7,timing=timing_adj,sfu=sfu,sfupar=sfupar,
- alpha=alpha_vhr/2,beta=betastar,delta0=delta0,delta1=delta1)
- message("------ VHR Group (assuming worst case: theta ITT = theta VHR) -------")
- y2 <- gsProbability(theta=theta_i, d=gs3)
- print(y2)
- ############ Plot the design ###############
- # plot the stage 2 sample size
- plot(xx, z1ticks=seq(rg[1], rg[2], by=.1), xlaboffset=-.1, ylab="Number of required death events",
- main=paste("Resampling rule for ",(1-betastar)*100, "% power target"))
- ############# Add some graph overlays ##############
- lines(x=c(0, z1_i, z1_i), y=c(incr,incr, 0), col=2, lty=2)
- off1 <- incr+5
- off2 <- off1-100
- text(x=c(1,z1_i-.5), y=c(off1,off2), col=2,
- labels=c(paste("Effective events target increase: ", ceiling((gs_size/incr)*100),"% (",incr,")..", sep=""),
- paste("..points to Effect Size at ", delta_ratio*100, "% of target ->", sep="")))
- # overall line for max sample size
- nalt <- maxinflation*gs_size
- lines(x=par("usr")[1:2],y=c(nalt,nalt),lty=2, col=3)
- text(x=c(1.3), y=c((gs_size*2)-7), col=3, labels=paste("Maximum events target increase: 100% (", ceiling(gs_size*2), " events)", sep=""))
- ############# Absolute Worst Case Probablity Computation #########
- # We would now like to study the case where the maximum inflation would
- # in fact be equal to the effective target increase.
- # That would mean the worst possible conditional power is the lower bound
- # of the promising zone.
- #
- # FIXME: in that case the lower boundary CP should be a tad higher eg. .39 see Pocock/Mehta 2000 p.9
- # Should be able to compute the precise value at some point
- worst_z1 <- xx$dat$z1[ match( nalt, xx$dat$n2 ) ]
- worst_theta <- worst_z1 / sqrt(xx$x$n.I[1])
- worst_delta_r=round(worst_theta/xx$x$delta,2)
- message("Worst possible Z1: ", worst_z1)
- message("Worst possible Theta value:",worst_theta)
- message("Worst possible effect ratio:",worst_delta_r)
- message("--------------- Worst possible probability of ITT success ---------")
- worst_y <- gsProbability(theta=worst_theta, d=gs2)
- print(worst_y)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement