Révision d973538c src/R/nucleominer.R
b/src/R/nucleominer.R | ||
---|---|---|
153 | 153 |
nuc_width = 160, ##<< Nucleosome width. |
154 | 154 |
only_f = FALSE, ##<< Filter only F reads. |
155 | 155 |
only_r = FALSE, ##<< Filter only R reads. |
156 |
filter_for_coverage = FALSE ##<< Does it filter for plot coverage? |
|
156 |
filter_for_coverage = FALSE, ##<< Does it filter for plot coverage? |
|
157 |
USE_DPLYR = TRUE ##<< Use dplyr lib to filter reads. |
|
157 | 158 |
) { |
158 |
if (only_f) { |
|
159 |
inputs = inputs[inputs[,1]==chr & inputs[,2] >= x_min - nuc_width & inputs[,3] == "F" & inputs[,2] <= x_max + nuc_width,] |
|
160 |
} else if (only_r) { |
|
161 |
inputs = inputs[inputs[,1]==chr & inputs[,2] >= x_min - nuc_width & inputs[,3] == "R" & inputs[,2] <= x_max + nuc_width,] |
|
162 |
} else { |
|
163 |
inputs = inputs[inputs[,1]==chr & inputs[,2] >= x_min - nuc_width & inputs[,2] <= x_max + nuc_width,] |
|
164 |
} |
|
159 |
n = names(inputs) |
|
160 |
|
|
161 |
if (!USE_DPLYR) { |
|
162 |
if (only_f) { |
|
163 |
inputs_out = inputs[inputs[,1]==chr & inputs[,2] >= x_min - nuc_width & inputs[,3] == "F" & inputs[,2] <= x_max + nuc_width,] |
|
164 |
} else if (only_r) { |
|
165 |
inputs_out = inputs[inputs[,1]==chr & inputs[,2] >= x_min - nuc_width & inputs[,3] == "R" & inputs[,2] <= x_max + nuc_width,] |
|
166 |
} else { |
|
167 |
inputs_out = inputs[inputs[,1]==chr & inputs[,2] >= x_min - nuc_width & inputs[,2] <= x_max + nuc_width,] |
|
168 |
} |
|
169 |
} else { |
|
170 |
names(inputs) = c("chr", "pos", "str", "lev") |
|
171 |
if (only_f) { |
|
172 |
inputs_out = filter(inputs, chr == chr, pos >= x_min - nuc_width, str == "F", pos <= x_max + nuc_width) |
|
173 |
} else if (only_r) { |
|
174 |
inputs_out = filter(inputs, chr == chr, pos >= x_min - nuc_width, str == "R" & pos <= x_max + nuc_width) |
|
175 |
} else { |
|
176 |
inputs_out = filter(inputs, chr == chr, pos >= x_min - nuc_width, pos <= x_max + nuc_width) |
|
177 |
} |
|
178 |
# if (!filter_for_coverage) { |
|
179 |
# inputs$corrected_inputs_coords = inputs[,2] + nuc_width/2 * sign_from_strand(inputs[,3]) |
|
180 |
# inputs = filter(inputs, chr == chr, corrected_inputs_coords >= x_min, corrected_inputs_coords <= x_max) |
|
181 |
# inputs$corrected_inputs_coords = NULL |
|
182 |
# } |
|
183 |
} |
|
184 |
|
|
165 | 185 |
if (!filter_for_coverage) { |
166 |
corrected_inputs_coords = inputs[,2] + nuc_width/2 * sign_from_strand(inputs[,3])
|
|
167 |
inputs = inputs[inputs[,1]==chr & corrected_inputs_coords >= x_min & corrected_inputs_coords <= x_max,]
|
|
186 |
corrected_inputs_coords = inputs_out[,2] + nuc_width/2 * sign_from_strand(inputs_out[,3])
|
|
187 |
inputs_out = inputs_out[inputs_out[,1]==chr & corrected_inputs_coords >= x_min & corrected_inputs_coords <= x_max,]
|
|
168 | 188 |
} |
169 |
return(inputs) |
|
189 |
|
|
190 |
names(inputs_out) = n |
|
191 |
return(inputs_out) |
|
170 | 192 |
### Returns filtred inputs. |
171 | 193 |
} |
172 | 194 |
|
... | ... | |
205 | 227 |
} |
206 | 228 |
return(TRUE) |
207 | 229 |
} |
208 |
store_cluster = function(clusters, new_cluster, nb_nucs_in_cluster,nuc_from_track,nb_tracks, min_nuc_center, max_nuc_center) {
|
|
230 |
store_cluster = function(clusters, new_cluster, nb_nucs_in_cluster, nuc_from_track, nb_tracks, min_nuc_center, max_nuc_center) {
|
|
209 | 231 |
if ( nb_nucs_in_cluster==nb_tracks & sum(nuc_from_track)==nb_tracks) { |
210 | 232 |
new_cluster$wp = TRUE |
211 | 233 |
center = (new_cluster$lower_bound + new_cluster$upper_bound) / 2 |
... | ... | |
248 | 270 |
} |
249 | 271 |
i = i+1 |
250 | 272 |
} |
273 |
nb_tracks = length(tf_outs) |
|
251 | 274 |
# print(track_readers) |
252 | 275 |
new_cluster = NULL |
253 | 276 |
nb_nucs_in_cluster = 0 |
254 | 277 |
nuc_from_track = c() |
255 |
for (i in 1:length(tf_outs)){
|
|
278 |
for (i in 1:nb_tracks){
|
|
256 | 279 |
nuc_from_track[i] = FALSE |
257 | 280 |
} |
258 | 281 |
# Start clustering |
... | ... | |
297 | 320 |
} else { |
298 | 321 |
if (!is.null(new_cluster)) { |
299 | 322 |
# store old cluster |
300 |
clusters = store_cluster(clusters, new_cluster, nb_nucs_in_cluster,nuc_from_track,length(tf_outs),min_nuc_center, max_nuc_center)
|
|
323 |
clusters = store_cluster(clusters, new_cluster, nb_nucs_in_cluster, nuc_from_track, nb_tracks, min_nuc_center, max_nuc_center)
|
|
301 | 324 |
} |
302 | 325 |
# Reinit current cluster composition stuff |
303 | 326 |
nb_nucs_in_cluster = 0 |
304 | 327 |
nuc_from_track = c() |
305 |
for (i in 1:length(tf_outs)){
|
|
328 |
for (i in 1:nb_tracks){
|
|
306 | 329 |
nuc_from_track[i] = FALSE |
307 | 330 |
} |
308 | 331 |
# create new cluster |
... | ... | |
331 | 354 |
# store last cluster |
332 | 355 |
if (!is.null(new_cluster)) { |
333 | 356 |
# store old cluster |
334 |
clusters = store_cluster(clusters, new_cluster, nb_nucs_in_cluster,nuc_from_track,length(tf_outs),min_nuc_center, max_nuc_center)
|
|
357 |
clusters = store_cluster(clusters, new_cluster, nb_nucs_in_cluster, nuc_from_track, nb_tracks, min_nuc_center, max_nuc_center)
|
|
335 | 358 |
} |
336 | 359 |
return(list(clusters, llr_scores)) |
337 | 360 |
### Returns a list of clusterized nucleosomes, and all computed llr scores. |
... | ... | |
364 | 387 |
flat_aggregated_intra_strain_nucs = function(# to flat aggregate_intra_strain_nucs function output |
365 | 388 |
### This function builds a dataframe of all clusters obtain from aggregate_intra_strain_nucs function. |
366 | 389 |
partial_strain_maps, ##<< the output of aggregate_intra_strain_nucs function |
367 |
cur_index ##<< the index of the roi involved |
|
390 |
cur_index, ##<< the index of the roi involved |
|
391 |
nb_tracks=3 ##<< the number of replicates |
|
368 | 392 |
) { |
369 | 393 |
if (length(partial_strain_maps) == 0 ){ |
370 | 394 |
print(paste("Empty partial_strain_maps for roi", cur_index, "ands current strain." )) |
... | ... | |
386 | 410 |
tmp_nuc_as_list[["nb_reads"]] = length(all_original_reads) |
387 | 411 |
tmp_nuc_as_list[["nb_nucs"]] = length(tmp_nuc$nucs) |
388 | 412 |
if (tmp_nuc$wp) { |
389 |
tmp_nuc_as_list[["llr_1"]] = signif(tmp_nuc$nucs[[2]]$llr_score,5) |
|
390 |
tmp_nuc_as_list[["llr_2"]] = signif(tmp_nuc$nucs[[3]]$llr_score,5) |
|
413 |
for (i in 1:(nb_tracks-1)) { |
|
414 |
tmp_nuc_as_list[[paste("llr", i, sep="_")]] = signif(tmp_nuc$nucs[[i + 1]]$llr_score,5) |
|
415 |
} |
|
391 | 416 |
} else { |
392 |
tmp_nuc_as_list[["llr_1"]] = NA |
|
393 |
tmp_nuc_as_list[["llr_2"]] = NA |
|
417 |
for (i in 1:(nb_tracks-1)) { |
|
418 |
tmp_nuc_as_list[[paste("llr", i, sep="_")]] = NA |
|
419 |
} |
|
394 | 420 |
} |
395 | 421 |
return(tmp_nuc_as_list) |
396 | 422 |
}) |
... | ... | |
1141 | 1167 |
config=NULL, ##<< GLOBAL config variable |
1142 | 1168 |
big_cur=NULL ##<< A largest region than roi use to filter c2c if it is needed. |
1143 | 1169 |
) { |
1144 |
strain1 = roi$strain_ref |
|
1145 |
if (strain1 == strain2) { |
|
1146 |
roi$length = roi$end - roi$begin + sign(roi$end - roi$begin) * 1 |
|
1170 |
strain1 = roi$strain_ref |
|
1171 |
# Do something or nothing? |
|
1172 |
if (is.null(config$NEUTRAL_TRANSLATE_CUR)) { |
|
1173 |
config$NEUTRAL_TRANSLATE_CUR = FALSE |
|
1174 |
} |
|
1175 |
if (strain1 == strain2 | config$NEUTRAL_TRANSLATE_CUR) { |
|
1176 |
roi$strain_ref = strain2 |
|
1177 |
roi$length = roi$end - roi$begin + sign(roi$end - roi$begin) |
|
1147 | 1178 |
return(roi) |
1148 | 1179 |
} |
1149 |
|
|
1180 |
|
|
1150 | 1181 |
# Extract c2c file |
1151 | 1182 |
if (!is.null(big_cur)) { |
1152 | 1183 |
# Dealing with big_cur |
... | ... | |
1964 | 1995 |
if (plot_wp_nucs | plot_fuzzy_nucs | plot_common_nucs ) { |
1965 | 1996 |
replicates_wp_nucs[[replicate_rank]] = wp_nucs |
1966 | 1997 |
strain = samples[[1]]$strain |
1967 |
wp_maps[[strain]] = flat_aggregated_intra_strain_nucs(wp_nucs, "foo") |
|
1998 |
wp_maps[[strain]] = flat_aggregated_intra_strain_nucs(wp_nucs, "foo", nb_tracks)
|
|
1968 | 1999 |
fuzzy_maps[[strain]] = get_intra_strain_fuzzy(wp_maps[[strain]], as.list(samples[[1]]$roi), samples[[1]]$strain, config=config) |
1969 | 2000 |
|
1970 | 2001 |
if (plot_fuzzy_nucs) { |
Formats disponibles : Unified diff