Advertisement
Regional_Push

Solutions

Jul 23rd, 2021
48
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 19.85 KB | None | 0 0
  1. ---
  2. title: "Solutions"
  3. output:
  4. html_document:
  5. toc: yes
  6. ---
  7.  
  8. ## Required packages
  9.  
  10. ```{r, message=FALSE}
  11. library(GENESIS)
  12. library(SeqArray)
  13. library(SeqVarTools)
  14. library(dplyr)
  15. library(ggplot2)
  16. library(Biobase)
  17. ```
  18.  
  19. ```{r}
  20. # make qqPlot function
  21. qqPlot <- function(pval) {
  22. pval <- pval[!is.na(pval)]
  23. n <- length(pval)
  24. x <- 1:n
  25. dat <- data.frame(obs=sort(pval),
  26. exp=x/n,
  27. upper=qbeta(0.025, x, rev(x)),
  28. lower=qbeta(0.975, x, rev(x)))
  29.  
  30. ggplot(dat, aes(-log10(exp), -log10(obs))) +
  31. geom_line(aes(-log10(exp), -log10(upper)), color="gray") +
  32. geom_line(aes(-log10(exp), -log10(lower)), color="gray") +
  33. geom_point() +
  34. geom_abline(intercept=0, slope=1, color="red") +
  35. xlab(expression(paste(-log[10], "(expected P)"))) +
  36. ylab(expression(paste(-log[10], "(observed P)"))) +
  37. theme_bw()
  38. }
  39. ```
  40.  
  41. ## GDS format
  42.  
  43. 1. Set a filter selecting only multi-allelic variants. Inspect their genotypes using the different methods you learned above. Use the `alleleDosage` method to find dosage for the second (and third, etc.) alternate allele.
  44.  
  45. ```{r exercise_gds}
  46. # open a connection to the GDS file again
  47. gdsfile <- "AnalysisFiles/1KG_phase3_subset_chr1.gds"
  48. gdsfmt::showfile.gds(closeall=TRUE) # make sure file is not already open
  49. gds <- seqOpen(gdsfile)
  50. ```
  51.  
  52. ```{r}
  53. # set your filter
  54. n <- seqNumAllele(gds)
  55. multi.allelic <- which(n > 2)
  56. seqSetFilter(gds, variant.sel=multi.allelic)
  57. ```
  58.  
  59. ```{r}
  60. geno <- seqGetData(gds, "genotype")
  61. dim(geno)
  62. geno[,1:5,1:5]
  63. ```
  64.  
  65. ```{r}
  66. geno <- getGenotype(gds)
  67. dim(geno)
  68. head(geno)
  69. ```
  70.  
  71. ```{r}
  72. geno <- getGenotypeAlleles(gds)
  73. head(geno)
  74. ```
  75.  
  76. ```{r}
  77. # count of reference alleles
  78. dos <- refDosage(gds)
  79. head(dos)
  80. ```
  81.  
  82. ```{r}
  83. # count of *any* alternate alleles
  84. dos <- altDosage(gds)
  85. head(dos)
  86. ```
  87.  
  88. ```{r}
  89. # count of the first alternate allele
  90. dos <- alleleDosage(gds, n=1)
  91. head(dos)
  92. ```
  93.  
  94. ```{r}
  95. # count of the third alternate allele
  96. dos <- alleleDosage(gds, n=3)
  97. head(dos)
  98. ```
  99.  
  100. ```{r}
  101. # count of *each* of the alternate alleles
  102. # returns multiple columns per variant
  103. dos <- expandedAltDosage(gds)
  104. head(dos)
  105. ```
  106.  
  107. 2. Use the `hwe` function in SeqVarTools to run a Hardy-Weinberg Equilibrium test on each variant. Identify a variant with low p-value and inspect its genotypes. (Note that the HWE test is only valid for biallelic variants, and will return `NA` for multiallelic variants.)
  108.  
  109. ```{r exercise_hwe}
  110. # reset the filter to all variants
  111. seqResetFilter(gds)
  112. ```
  113.  
  114. ```{r}
  115. # run HWE test
  116. hwe.res <- hwe(gds)
  117.  
  118. # identify variants with small p-values
  119. lowp <- !is.na(hwe.res$p) & hwe.res$p < 1e-4
  120. head(hwe.res[lowp,])
  121. ```
  122.  
  123. ```{r}
  124. # look at the results for the most significant variant
  125. minp <- which.min(hwe.res$p)
  126. hwe.res[minp,]
  127. ```
  128.  
  129. ```{r}
  130. # look at the genotypes of the most significant variant
  131. seqSetFilter(gds, variant.id=minp)
  132. table(getGenotype(gds))
  133. table(altDosage(gds))
  134. ```
  135.  
  136. ```{r}
  137. seqClose(gds)
  138. ```
  139.  
  140. ## Association tests - Part I
  141.  
  142. 1. As discussed in the lecture, we recommend a fully adjusted two-stage inverse Normalization procedure for fitting the null model when phenotypes have non-Normal distributions. Using the `two.stage` option in `fitNullModel`, fit a two-stage null model. Compare these residuals with the residuals from the original null model.
  143.  
  144. To run the fully adjusted two.stage null model, we simply set the `two.stage` option to `TRUE`. The `norm.option` parameter determines if the inverse Normalization should be done with all samples together (`"all"`) or within each `group.var` group separately (`"by.group"`).
  145.  
  146. ```{r}
  147. # load the sample annotation data
  148. annot <- get(load("AnalysisFiles/sample_phenotype_annotation.RData"))
  149. annot
  150. ```
  151.  
  152. ```{r null_model_two_stage}
  153. nullmod.twostage <- fitNullModel(annot,
  154. outcome="height",
  155. covars=c("sex", "age", "study"),
  156. group.var="study",
  157. two.stage = TRUE,
  158. norm.option = "all",
  159. verbose=FALSE)
  160. save(nullmod.twostage, file="OutputFiles/null_model_two_stage.RData")
  161. ```
  162.  
  163. ```{r}
  164. # description of the model we fit
  165. nullmod.twostage$model
  166. ```
  167.  
  168. Compare the marginal residuals
  169.  
  170. ```{r}
  171. # load the original null model
  172. nullmod <- get(load("AnalysisFiles/null_model.RData"))
  173. ```
  174.  
  175. ```{r}
  176. # merge the data for plotting
  177. pdat <- merge(nullmod$fit, nullmod.twostage$fit,
  178. by = 'sample.id', suffixes = c('.orig', '.twostage'))
  179. pdat <- merge(pdat, pData(annot), by = 'sample.id')
  180. ```
  181.  
  182. ```{r}
  183. # distribution of residuals - original null model
  184. ggplot(pdat, aes(x = resid.marginal.orig)) +
  185. geom_density(aes(color = study)) +
  186. geom_density(size = 2)
  187. ```
  188.  
  189. ```{r}
  190. # distribution of residuals - two stage null model
  191. ggplot(pdat, aes(x = resid.marginal.twostage)) +
  192. geom_density(aes(color = study)) +
  193. geom_density(size = 2)
  194. ```
  195.  
  196. ```{r}
  197. # compare residuals
  198. ggplot(pdat, aes(x = resid.marginal.orig, y = resid.marginal.twostage, color = study)) +
  199. geom_point() +
  200. geom_abline(intercept = 0, slope = 1)
  201. ```
  202.  
  203. There is not much difference in the residual here because the distribution of height is not far from Normal to begin. See [Sofer et al.](https://onlinelibrary.wiley.com/doi/10.1002/gepi.22188) for more information on the fully adjusted two-stage model.
  204.  
  205.  
  206. 2. GENESIS also supports testing binary (e.g. case/control) outcomes. We can fit a null model using logistic regression by specifying the argument `family=binomial` in the `fitNullModel` function. Use the `status` column in the sample annotation to fit a null model for simulated case/control status, with `sex` and `Population` as covariates. Run single-variant association tests using this model and make a QQ plot of all variants with MAC >= 5.
  207.  
  208. When testing binary outcomes, we should fit our null model using logistic regression. To do so, we simply set the argument `family=binomial` in `fitNullModel`. Note that the parameter `group.var` is no longer relevant here, as the logistic model specifies the mean-variance relationship.
  209.  
  210. ```{r}
  211. # load the sample annotation data
  212. annot <- get(load("AnalysisFiles/sample_phenotype_annotation.RData"))
  213.  
  214. # load and prepare the genotype data
  215. gdsfile <- "AnalysisFiles/1KG_phase3_subset_chr1.gds"
  216. gdsfmt::showfile.gds(closeall=TRUE) # make sure file is not already open
  217. gds <- seqOpen(gdsfile)
  218. seqData <- SeqVarData(gds, sampleData=annot)
  219. iterator <- SeqVarBlockIterator(seqData, verbose=FALSE)
  220. ```
  221.  
  222.  
  223. ```{r exercise_logistic}
  224. # fit the null model with logistic regression
  225. nullmod.status <- fitNullModel(annot,
  226. outcome="status",
  227. covars=c("sex", "Population"),
  228. family=binomial,
  229. verbose=FALSE)
  230. ```
  231.  
  232. ```{r}
  233. # run the single-variant association test
  234. assoc.status <- assocTestSingle(iterator, nullmod.status, test="Score")
  235. ```
  236.  
  237. ```{r}
  238. head(assoc.status)
  239. ```
  240.  
  241. ```{r}
  242. # make a QQ plot
  243. qqPlot(assoc.status$Score.pval[assoc.status$MAC >= 5])
  244. ```
  245.  
  246. Extra: in samples with highly imbalanced case:control ratios, the Score test can perform poorly for low frequency variants. Saddlepoint approximation (SPA) can be used to improve p-value calculations, and is available in GENESIS by setting the argument `test=Score.SPA` in `assocTestSingle`. See [Dey et al.](https://www.cell.com/ajhg/fulltext/S0002-9297(17)30201-X) and [Zhou et al.](https://www.nature.com/articles/s41588-018-0184-y) for details on using SPA in GWAS.
  247.  
  248. ```{r}
  249. seqClose(seqData)
  250. ```
  251.  
  252.  
  253. ## Association tests - Part II
  254.  
  255. 1. Perform a sliding window SKAT test for the outcome status. Adjust your model for the covariates sex and study. When performing your SKAT test, use all variants with alternate allele frequency < 20%, and use the Wu weights to give larger weights to rarer variants. Use the same `windowSize` and `windowShift` as in the examples. How many windows have >1 variant? Make a QQ plot of the SKAT p-values.
  256.  
  257. The first step is to fit our null model -- since our outcome, status, is a binary variable, we must fit a logistic regression null model using the `family = binomial` argument. The second step is to create our `SeqVarWindowIterator` object. The third step is to perform the SKAT test using `assocTestAggregate` -- we can set the maximum alternate allele frequency with the `AF.max` argument, and we can set the variant weights with the `weight.beta` argument.
  258.  
  259. ```{r}
  260. # load the sample annotation data
  261. annot <- get(load("AnalysisFiles/sample_phenotype_annotation.RData"))
  262.  
  263. # load and prepare the genotype data
  264. gdsfile <- "AnalysisFiles/1KG_phase3_subset_chr1.gds"
  265. gdsfmt::showfile.gds(closeall=TRUE) # make sure file is not already open
  266. gds <- seqOpen(gdsfile)
  267. seqData <- SeqVarData(gds, sampleData=annot)
  268. ```
  269.  
  270. ```{r exercise_sliding}
  271. nullmod.status <- fitNullModel(annot,
  272. outcome="status",
  273. covars=c("sex", "study"),
  274. family=binomial,
  275. verbose=FALSE)
  276. ```
  277.  
  278. ```{r}
  279. iterator <- SeqVarWindowIterator(seqData, windowSize=10000, windowShift=5000, verbose=FALSE)
  280.  
  281. # run the SKAT test
  282. assoc <- assocTestAggregate(iterator,
  283. nullmod,
  284. test="SKAT",
  285. AF.max=0.2,
  286. weight.beta=c(1,25),
  287. verbose = FALSE)
  288. ```
  289.  
  290. ```{r}
  291. # results for each window
  292. head(assoc$results)
  293. ```
  294.  
  295. ```{r}
  296. # how many variants in each window?
  297. table(assoc$results$n.site)
  298. ```
  299.  
  300. ```{r}
  301. # variant details for windows with > 1 variant
  302. idx <- which(assoc$results$n.site > 1)
  303. head(assoc$variantInfo[idx])
  304. ```
  305.  
  306. ```{r}
  307. # make a QQ plot of the SKAT test p-values
  308. qqPlot(assoc$results$pval)
  309. ```
  310.  
  311. ```{r}
  312. seqClose(seqData)
  313. ```
  314.  
  315. ## Ancestry and Relatedness Inference
  316.  
  317. 1. Perform a second PC-AiR analysis, this time using the PC-Relate kinship matrix as the kinship estimates (you should still use the KING-robust matrix for the ancestry divergence estimates). How does the unrelated set compare to the first PC-AiR analysis?
  318.  
  319. We run the second PC-AiR analysis the same as the first, except using the PC-Relate kinship matrix we created above (`pcrelMat`) as the input to parameter `kinobj` -- this means that we are using the PC-Relate estimates instead of the KING estimates to identify related pairs of samples. In the solution shown here, we also demonstrate that a `SeqVarData` object can be used as input, but we need to specify the variants to use in the analysis using the `snp.include` parameter.
  320.  
  321. ```{r}
  322. # load and prepare genotype data
  323. gdsfile <- "AnalysisFiles/1KG_phase3_subset.gds"
  324. gds <- seqOpen(gdsfile)
  325. seqData <- SeqVarData(gds)
  326.  
  327. # load list of LD pruned variants
  328. pruned <- get(load("AnalysisFiles/ld_pruned_variants.RData"))
  329.  
  330. # load the KING robust kinship estimates
  331. kingMat <- get(load("AnalysisFiles/king_matrix.RData"))
  332.  
  333. # load the PC-Relate kinship estimates from the first iteration
  334. pcrelMat <- get(load("AnalysisFiles/pcrelate_matrix_round1.RData"))
  335. ```
  336.  
  337. ```{r pcair2}
  338. # run PC-AiR
  339. pca2 <- pcair(seqData,
  340. kinobj=pcrelMat,
  341. kin.thresh=2^(-9/2),
  342. divobj=kingMat,
  343. div.thresh=-2^(-9/2),
  344. snp.include = pruned)
  345. ```
  346.  
  347. ```{r}
  348. names(pca2)
  349. ```
  350.  
  351. ```{r}
  352. # the unrelated set of samples
  353. length(pca2$unrels)
  354.  
  355. # the related set of samples
  356. length(pca2$rels)
  357. ```
  358.  
  359. ```{r}
  360. # extract the top 10 PCs and make a data.frame
  361. pcs2 <- data.frame(pca2$vectors[,1:10])
  362. colnames(pcs2) <- paste0('PC', 1:10)
  363. pcs2$sample.id <- pca2$sample.id
  364. head(pcs2)
  365.  
  366. save(pcs2, file="OutputFiles/pcs.RData")
  367. ```
  368.  
  369. We see that there are now 1,070 samples in the unrelated set, as opposed to 1,040 in the first PC-AiR analysis. This indicates that KING-robust likely overestimated relatedness for some pairs due to bias from ancestry admixture.
  370.  
  371. 2. Make a parallel coordinates plot of the top 10 PC-AiR PCs. How does this compare to the plot from the first iteration? How many PCs seem to reflect ancestry?
  372.  
  373. We can reuse the code from above to make the parallel coordinates plot.
  374.  
  375. ```{r pcair2_parcoord, message = FALSE}
  376. library(Biobase)
  377. sampfile <- "AnalysisFiles/sample_annotation.RData"
  378. annot <- get(load(sampfile))
  379.  
  380. pc.df <- as.data.frame(pcs2)
  381. names(pc.df) <- 1:ncol(pcs2)
  382. pc.df$sample.id <- row.names(pcs2)
  383.  
  384. library(dplyr)
  385. annot <- pData(annot) %>%
  386. dplyr::select(sample.id, Population)
  387. pc.df <- left_join(pcs2, annot, by="sample.id")
  388.  
  389. library(GGally)
  390. library(RColorBrewer)
  391. pop.cols <- setNames(brewer.pal(12, "Paired"),
  392. c("ACB", "ASW", "CEU", "GBR", "CHB", "JPT", "CLM", "MXL", "LWK", "YRI", "GIH", "PUR"))
  393. ggparcoord(pc.df, columns=1:10, groupColumn="Population", scale="uniminmax") +
  394. scale_color_manual(values=pop.cols) +
  395. xlab("PC") + ylab("")
  396. ```
  397.  
  398. The plot looks a bit cleaner than the one from the first PC-AiR analysis. Clearly, PCs 1-4 are reflective of ancestry here. In the prior analysis, PC7 also seemed to pick up some component of ancestry.
  399.  
  400. 3. Perform a second PC-Relate analysis, this time using the new PC-AiR PCs to adjust for ancestry. Make a hexbin plot of estimated kinship vs IBD0.
  401.  
  402. We run the second PC-Relate analysis the same as the first, except using the new PC-AiR PCs that we just generated to adjust for ancestry, and using the new PC-AiR unrelated set as our `training.set`.
  403.  
  404. ```{r pcrelate2}
  405. # filter the GDS object to our LD-pruned variants
  406. seqSetFilter(seqData, variant.id=pruned)
  407. iterator <- SeqVarBlockIterator(seqData, verbose=FALSE)
  408.  
  409. # run PC-Relate
  410. pcrel2 <- pcrelate(iterator,
  411. pcs=pca2$vectors[,1:4],
  412. training.set=pca2$unrels)
  413.  
  414. save(pcrel2, file="OutputFiles/pcrelate_kinship.RData")
  415. ```
  416.  
  417. ```{r pcrelate2_plot}
  418. ggplot(pcrel2$kinBtwn, aes(k0, kin)) +
  419. geom_hline(yintercept=2^(-seq(3,9,2)/2), linetype="dashed", color="grey") +
  420. geom_hex(bins = 100) +
  421. geom_abline(intercept = 0.25, slope = -0.25) +
  422. ylab("kinship estimate") +
  423. theme_bw()
  424. ```
  425.  
  426. ```{r}
  427. seqClose(seqData)
  428. ```
  429.  
  430. ## Mixed models
  431.  
  432. 1. Perform a single-variant association test for `status`. Adjust for sex, age, study, ancestry, and kinship in the model. Don't forget to consider the `family` parameter. Make a QQ plot of the p-values for all variants with MAC >= 5.
  433.  
  434. The first step is to fit the null model. We include the first 4 PCs as covariates in our model to adjust for ancestry, and we include a random effect proportional to the kinship matrix to adjust for genetic relatedness. Recall that with a binary outcome, we set `family = binomial` -- because we have a random effect, this will fit an approximate logistic mixed model using the [GMMAT method](https://www.cell.com/ajhg/fulltext/S0002-9297(16)00063-X).
  435.  
  436. ```{r}
  437. # load the sample annotation data
  438. annot <- get(load("AnalysisFiles/sample_phenotype_pcs.RData"))
  439.  
  440. # load and prepare the genotype data
  441. gdsfile <- "AnalysisFiles/1KG_phase3_subset_chr1.gds"
  442. gdsfmt::showfile.gds(closeall=TRUE) # make sure file is not already open
  443. gds <- seqOpen(gdsfile)
  444. seqData <- SeqVarData(gds, sampleData=annot)
  445. iterator <- SeqVarBlockIterator(seqData, verbose=FALSE)
  446.  
  447. # load the kinship matrix
  448. kinfile <- "AnalysisFiles/pcrelate_kinship.RData"
  449. pcrel <- get(load(kinfile))
  450. kinship <- pcrelateToMatrix(pcrel, scaleKin=2, verbose=FALSE)
  451. ```
  452.  
  453. ```{r exercise_mm_nullmod}
  454. nullmod.status <- fitNullModel(annot,
  455. outcome="status",
  456. covars=c("sex", "age", "study", paste0("PC", 1:4)),
  457. cov.mat=kinship,
  458. family = binomial,
  459. verbose=FALSE)
  460. ```
  461.  
  462. ```{r}
  463. # description of the model we fit
  464. nullmod.status$model
  465. ```
  466.  
  467. ```{r}
  468. # fixed effect regression estimates
  469. nullmod.status$fixef
  470. ```
  471.  
  472. ```{r}
  473. # variance component estimates by group.var
  474. nullmod.status$varComp
  475. ```
  476.  
  477. Now that we have the null model, we perform the single-variant association tests and make the QQ plot the same way as before.
  478.  
  479. ```{r exercise_mm_assoc}
  480. # run the single-variant association test
  481. assoc.status <- assocTestSingle(iterator, nullmod.status, test="Score")
  482. ```
  483.  
  484. ```{r}
  485. head(assoc.status)
  486. ```
  487.  
  488. ```{r}
  489. qqPlot(assoc.status$Score.pval[assoc.status$MAC >= 5])
  490. ```
  491.  
  492. ```{r}
  493. seqClose(seqData)
  494. ```
  495.  
  496.  
  497. ## Variant annotation
  498.  
  499. 1. Using [Annotation Explorer](https://platform.sb.biodatacatalyst.nhlbi.nih.gov/u/biodatacatalyst/annotation-explorer/), generate a new set of aggregation units by setting up the same filtering criteria as in use case 1, but this time use a different CADD phred score cut-off (example: 40, 10) and study how that changes plots under the `interactive plots` tab of Annotation Explorer. For example, how does changing the filtering criteria change the number of aggregation units with no variants? How does the distribution and number of aggregation units in each bin change in the histogram?
  500.  
  501. In general, a more stringent filtering approach will reduce the number of aggregation units which have at least one variant (for example, you will see fewer units with no variants at CADD phred cut-off 10 vs 40). The change in the histogram that shows the total number of aggregation units (Y-axis) in each of the bins with varying variant number ranges (X-axis) depends on the characteristics of the features used for grouping criteria (size of the aggregating regions) and distribution of values of the annotation field used for filtering.
  502.  
  503. Sometimes there is not an obvious or recommended cut-off to implement for an annotation field. Playing with varying filtering criteria can help a user visualize its effects on the aggregation unit characteristics and may assist them in choosing a filtering criteria in an informed way
  504.  
  505.  
  506. ## Annotation informed aggregate association tests
  507.  
  508. 1. Since we are working with a subset of the data, many of the genes listed in `group_id` have a very small number of variants. Create a new set of aggregation units based on position, rather than gene name -- create 10 units that are 1MB long and span all of the chr1 variants by using the TopmedPipeline function `aggregateGRanges`. Run a SKAT test using those units and a `SeqVarRangeIterator` object.
  509.  
  510. ```{r}
  511. # load aggregation units
  512. aggfile <- "AnalysisFiles/variants_by_gene.RData"
  513. aggunit <- get(load(aggfile))
  514. # subset to chromosome 1
  515. aggunit1 <- filter(aggunit, chr == 1)
  516. ```
  517.  
  518.  
  519. ```{r exercise_aggregate}
  520. # minimum variant position
  521. minp <- min(aggunit1$pos)
  522. # maximum variant position
  523. maxp <- max(aggunit1$pos)
  524.  
  525. # create a data frame breaking the position range into 10 pieces
  526. aggByPos <- data.frame(chr=1,
  527. start=seq(minp, maxp-1e6, length.out=10),
  528. end=seq(minp+1e6, maxp, length.out=10))
  529. aggByPos$group_id <- 1:nrow(aggByPos)
  530. dim(aggByPos)
  531. head(aggByPos)
  532. ```
  533.  
  534. ```{r}
  535. aggVarList <- TopmedPipeline::aggregateGRanges(aggByPos)
  536. aggVarList
  537. ```
  538.  
  539. ```{r, message=FALSE}
  540. # load sample annotation
  541. annotfile <- "AnalysisFiles/sample_phenotype_pcs.RData"
  542. annot <- get(load(annotfile))
  543.  
  544. # load and prepare genotype data
  545. gdsfile <- "AnalysisFiles/1KG_phase3_subset_chr1.gds"
  546. gdsfmt::showfile.gds(closeall=TRUE) # make sure file is not already open
  547. gds <- seqOpen(gdsfile)
  548. seqData <- SeqVarData(gds, sampleData=annot)
  549. ```
  550.  
  551. ```{r}
  552. # prepare iterator using defined aggregation units
  553. iterator <- SeqVarRangeIterator(seqData, variantRanges=aggVarList, verbose=FALSE)
  554. ```
  555.  
  556. ```{r}
  557. # run SKAT test by aggregation unit
  558. assoc <- assocTestAggregate(iterator,
  559. nullmod,
  560. test="SKAT",
  561. AF.max=0.1,
  562. weight.beta=c(1,25))
  563. ```
  564.  
  565. ```{r}
  566. assoc$results
  567. ```
  568.  
  569. ```{r}
  570. seqClose(seqData)
  571. ```
  572.  
  573.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement