From 7756aebab1584fd82236f9e6f353157070c757fc Mon Sep 17 00:00:00 2001 From: Eric Coissac Date: Thu, 2 Feb 2023 23:11:08 +0100 Subject: [PATCH] more doc --- doc/_book/search.json | 2 +- doc/_book/tutorial.html | 133 +++++++++++++++++++--------- doc/tutorial.qmd | 125 ++++++++++++++------------ pkg/obitools/obiannotate/options.go | 35 ++++---- 4 files changed, 181 insertions(+), 114 deletions(-) diff --git a/doc/_book/search.json b/doc/_book/search.json index 517e67a..38a2247 100644 --- a/doc/_book/search.json +++ b/doc/_book/search.json @@ -53,7 +53,7 @@ "href": "tutorial.html#step-by-step-analysis", "title": "2  OBITools V4 Tutorial", "section": "2.2 Step by step analysis", - "text": "2.2 Step by step analysis\n\n2.2.1 Recover full sequence reads from forward and reverse partial reads\nWhen using the result of a paired-end sequencing assay with supposedly overlapping forward and reverse reads, the first step is to recover the assembled sequence.\nThe forward and reverse reads of the same fragment are at the same line position in the two fastq files obtained after sequencing. Based on these two files, the assembly of the forward and reverse reads is done with the obipairing utility that aligns the two reads and returns the reconstructed sequence.\nIn our case, the command is:\n\nobipairing --min-identity=0.8 \\\n --min-overlap=10 \\\n -F wolf_data/wolf_F.fastq \\\n -R wolf_data/wolf_R.fastq \\\n > results/wolf.fastq \n\nThe --min-identity and --min-overlap options allow discarding sequences with low alignment quality. If after the aligment, the overlaping parts of the reads is shorter than 10 base pairs or the similarity over this aligned region is below 80% of identity, in the output file, the forward and reverse reads are not aligned but concatenated, and the value of the mode attribute in the sequence header is set to joined instead of alignment.\n\n\n2.2.2 Remove unaligned sequence records\nUnaligned sequences (:pymode=joined) cannot be used. The following command allows removing them from the dataset:\n\nobigrep -p 'annotations.mode != \"join\"' \\\n results/wolf.fastq > results/wolf.ali.fastq\n\nThe -p requires a go like expression. annotations.mode != \"join\" means that if the value of the mode annotation of a sequence is different from join, the corresponding sequence record will be kept.\nThe first sequence record of wolf.ali.fastq can be obtained using the following command line:\n\nhead -n 4 results/wolf.ali.fastq\n\nThe folling piece of code appears on thew window of tour terminal.\n@HELIUM_000100422_612GNAAXX:7:108:5640:3823#0/1 {\"ali_dir\":\"left\",\"ali_length\":62,\"mode\":\"alignment\",\"pairing_mismatches\":{\"(T:26)->(G:13)\":62,\"(T:34)->(G:18)\":48},\"score\":484,\"score_norm\":0.968,\"seq_a_single\":46,\"seq_ab_match\":60,\"seq_b_single\":46}\nccgcctcctttagataccccactatgcttagccctaaacacaagtaattaatataacaaaattgttcgccagagtactaccggcaatagcttaaaactcaaaggacttggcggtgctttatacccttctagaggagcctgttctaaggaggcgg\n+\nCCCCCCCBCCCCCCCCCCCCCCCCCCCCCCBCCCCCBCCCCCCC results/wolf.ali.assigned.fastq\n\nThis command creates two files:\n\nunidentified.fastq containing all the sequence records that were not assigned to a sample/marker combination\nwolf.ali.assigned.fastq containing all the sequence records that were properly assigned to a sample/marker combination\n\nNote that each sequence record of the wolf.ali.assigned.fastq file contains only the barcode sequence as the sequences of primers and tags are removed by the obimultiplex program. Information concerning the experiment, sample, primers and tags is added as attributes in the sequence header.\nFor instance, the first sequence record of wolf.ali.assigned.fastq is:\n@HELIUM_000100422_612GNAAXX:7:108:5640:3823#0/1_sub[28..127] {\"ali_dir\":\"left\",\"ali_length\":62,\"direction\":\"direct\",\"experiment\":\"wolf_diet\",\"forward_match\":\"ttagataccccactatgc\",\"forward_mismatches\":0,\"forward_primer\":\"ttagataccccactatgc\",\"forward_tag\":\"gcctcct\",\"mode\":\"alignment\",\"pairing_mismatches\":{\"(T:26)->(G:13)\":35,\"(T:34)->(G:18)\":21},\"reverse_match\":\"tagaacaggctcctctag\",\"reverse_mismatches\":0,\"reverse_primer\":\"tagaacaggctcctctag\",\"reverse_tag\":\"gcctcct\",\"sample\":\"29a_F260619\",\"score\":484,\"score_norm\":0.968,\"seq_a_single\":46,\"seq_ab_match\":60,\"seq_b_single\":46}\nttagccctaaacacaagtaattaatataacaaaattgttcgccagagtactaccggcaatagcttaaaactcaaaggacttggcggtgctttataccctt\n+\nCCCBCCCCCBCCCCCCC results/wolf.ali.assigned.uniq.fasta\n\nNote that obiuniq returns a fasta file.\nThe first sequence record of wolf.ali.assigned.uniq.fasta is:\n>HELIUM_000100422_612GNAAXX:7:93:6991:1942#0/1_sub[28..126] {\"ali_dir\":\"left\",\"ali_length\":63,\"count\":1,\"direction\":\"reverse\",\"experiment\":\"wolf_diet\",\"forward_match\":\"ttagataccccactatgc\",\"forward_mismatches\":0,\"forward_primer\":\"ttagataccccactatgc\",\"forward_tag\":\"gaatatc\",\"merged_sample\":{\"26a_F040644\":1},\"mode\":\"alignment\",\"pairing_mismatches\":{\"(A:10)->(G:34)\":76,\"(C:06)->(A:34)\":58},\"reverse_match\":\"tagaacaggctcctctag\",\"reverse_mismatches\":0,\"reverse_primer\":\"tagaacaggctcctctag\",\"reverse_tag\":\"gaatatc\",\"score\":730,\"score_norm\":0.968,\"seq_a_single\":45,\"seq_ab_match\":61,\"seq_b_single\":45}\nttagccctaaacataaacattcaataaacaagaatgttcgccagagaactactagcaaca\ngcctgaaactcaaaggacttggcggtgctttatatccct\nThe run of obiuniq has added two key=values entries in the header of the fasta sequence:\n\n\"merged_sample\":{\"29a_F260619\":1}: this sequence have been found once in a single sample called 29a_F260619\n\"count\":1 : the total count for this sequence is \\(1\\)\n\nTo keep only these two attributes, we can use the obiannotate command:\n\nobiannotate -k count -k merged_sample \\\n results/wolf.ali.assigned.uniq.fasta \\\n > results/wolf.ali.assigned.simple.fasta\n\nThe first five sequence records of wolf.ali.assigned.simple.fasta become:\n>HELIUM_000100422_612GNAAXX:7:26:18930:11105#0/1_sub[28..127] {\"count\":1,\"merged_sample\":{\"29a_F260619\":1}}\nttagccctaaacacaagtaattaatataacaaaatwattcgcyagagtactacmggcaat\nagctyaaarctcamagrwcttggcggtgctttataccctt\n>HELIUM_000100422_612GNAAXX:7:58:5711:11399#0/1_sub[28..127] {\"count\":1,\"merged_sample\":{\"29a_F260619\":1}}\nttagccctaaacacaagtaattaatataacaaaattattcgccagagtwctaccgssaat\nagcttaaaactcaaaggactgggcggtgctttataccctt\n>HELIUM_000100422_612GNAAXX:7:100:15836:9304#0/1_sub[28..127] {\"count\":1,\"merged_sample\":{\"29a_F260619\":1}}\nttagccctaaacatagataattacacaaacaaaattgttcaccagagtactagcggcaac\nagcttaaaactcaaaggacttggcggtgctttataccctt\n>HELIUM_000100422_612GNAAXX:7:55:13242:9085#0/1_sub[28..126] {\"count\":4,\"merged_sample\":{\"26a_F040644\":4}}\nttagccctaaacataaacattcaataaacaagagtgttcgccagagtactactagcaaca\ngcctgaaactcaaaggacttggcggtgctttacatccct\n>HELIUM_000100422_612GNAAXX:7:86:8429:13723#0/1_sub[28..127] {\"count\":7,\"merged_sample\":{\"15a_F730814\":5,\"29a_F260619\":2}}\nttagccctaaacacaagtaattaatataacaaaattattcgccagagtactaccggcaat\nagcttaaaactcaaaggactcggcggtgctttataccctt\n\n\n2.2.5 Denoise the sequence dataset\nTo have a set of sequences assigned to their corresponding samples does not mean that all sequences are biologically meaningful i.e. some of these sequences can contains PCR and/or sequencing errors, or chimeras.\n\nTag the sequences for PCR errors (sequence variants)\nThe obiclean program tags sequence variants as potential error generated during PCR amplification. We ask it to keep the head sequences (-H option) that are sequences which are not variants of another sequence with a count greater than 5% of their own count (-r 0.05 option).\n\nobiclean -s sample -r 0.05 -H \\\n results/wolf.ali.assigned.simple.fasta \\\n > results/wolf.ali.assigned.simple.clean.fasta \n\nOne of the sequence records of wolf.ali.assigned.simple.clean.fasta is:\n>HELIUM_000100422_612GNAAXX:7:66:4039:8016#0/1_sub[28..127] {\"count\":17,\"merged_sample\":{\"13a_F730603\":17},\"obiclean_head\":true,\"obiclean_headcount\":1,\"obiclean_internalcount\":0,\"obi\nclean_samplecount\":1,\"obiclean_singletoncount\":0,\"obiclean_status\":{\"13a_F730603\":\"h\"},\"obiclean_weight\":{\"13a_F730603\":25}}\nctagccttaaacacaaatagttatgcaaacaaaactattcgccagagtactaccggcaac\nagcccaaaactcaaaggacttggcggtgcttcacaccctt\nTo remove such sequences as much as possible, we first discard rare sequences and then rsequence variants that likely correspond to artifacts.\n\n\nGet some statistics about sequence counts\n\nobicount results/wolf.ali.assigned.simple.clean.fasta\n\ntime=\"2023-01-31T22:44:30+01:00\" level=info msg=\"Appending results/wolf.ali.assigned.simple.clean.fasta file\\n\"\n 2749 36409 273387\n\n\nThe dataset contains \\(4313\\) sequences variant corresponding to 42452 sequence reads. Most of the variants occur only a single time in the complete dataset and are usualy named singletons\n\nobigrep -p 'sequence.Count() == 1' results/wolf.ali.assigned.simple.clean.fasta \\\n | obicount\n\ntime=\"2023-01-31T22:44:30+01:00\" level=info msg=\"Appending results/wolf.ali.assigned.simple.clean.fasta file\\n\"\ntime=\"2023-01-31T22:44:30+01:00\" level=info msg=\"Reading sequences from stdin in guessed\\n\"\ntime=\"2023-01-31T22:44:30+01:00\" level=info msg=\"On output use JSON headers\"\n 2309 2309 229920\n\n\nIn that dataset sigletons corresponds to \\(3511\\) variants.\nUsing R and the ROBIFastread package able to read headers of the fasta files produced by OBITools, we can get more complete statistics on the distribution of occurrencies.\n\nlibrary(ROBIFastread)\nlibrary(ggplot2)\n\nseqs <- read_obifasta(\"results/wolf.ali.assigned.simple.clean.fasta\",keys=\"count\")\n\nggplot(data = seqs, mapping=aes(x = count)) +\n geom_histogram(bins=100) +\n scale_y_sqrt() +\n scale_x_sqrt() +\n geom_vline(xintercept = 10, col=\"red\", lty=2) +\n xlab(\"number of occurrencies of a variant\") \n\n\n\n\nIn a similar way it is also possible to plot the distribution of the sequence length.\n\nggplot(data = seqs, mapping=aes(x = nchar(sequence))) +\n geom_histogram() +\n scale_y_log10() +\n geom_vline(xintercept = 80, col=\"red\", lty=2) +\n xlab(\"sequence lengths in base pair\")\n\n\n\n\n\n\nKeep only the sequences having a count greater or equal to 10 and a length shorter than 80 bp\nBased on the previous observation, we set the cut-off for keeping sequences for further analysis to a count of 10. To do this, we use the obigrep command. The -p 'count>=10' option means that the python expression :pycount>=10 must be evaluated to :pyTrue for each sequence to be kept. Based on previous knowledge we also remove sequences with a length shorter than 80 bp (option -l) as we know that the amplified 12S-V5 barcode for vertebrates must have a length around 100bp.\n\nobigrep -l 80 -p 'sequence.Count() >= 10' results/wolf.ali.assigned.simple.clean.fasta \\\n > results/wolf.ali.assigned.simple.clean.c10.l80.fasta\n\nThe first sequence record of results/wolf.ali.assigned.simple.clean.c10.l80.fasta is:\n>HELIUM_000100422_612GNAAXX:7:22:2603:18023#0/1_sub[28..127] {\"count\":12182,\"merged_sample\":{\"15a_F730814\":7559,\"29a_F260619\":4623},\"obiclean_head\":true,\"obiclean_headcount\":2,\"obiclean_internalcount\":0,\"obiclean_samplecount\":2,\"obiclean_singletoncount\":0,\"obiclean_status\":{\"15a_F730814\":\"h\",\"29a_F260619\":\"h\"},\"obiclean_weight\":{\"15a_F730814\":9165,\"29a_F260619\":6275}}\nttagccctaaacacaagtaattaatataacaaaattattcgccagagtactaccggcaat\nagcttaaaactcaaaggacttggcggtgctttataccctt\nAt that time in the data cleanning we have conserved :\n\nobicount results/wolf.ali.assigned.simple.clean.c10.l80.fasta\n\ntime=\"2023-01-31T22:44:32+01:00\" level=info msg=\"Appending results/wolf.ali.assigned.simple.clean.c10.l80.fasta file\\n\"\n 26 31337 2585\n\n\n\n\n\n2.2.6 Taxonomic assignment of sequences\nOnce denoising has been done, the next step in diet analysis is to assign the barcodes to the corresponding species in order to get the complete list of species associated to each sample.\nTaxonomic assignment of sequences requires a reference database compiling all possible species to be identified in the sample. Assignment is then done based on sequence comparison between sample sequences and reference sequences.\n\nDownload the taxonomy\n\nmkdir TAXO\ncd TAXO\ncurl http://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz \\\n | tar -zxvf -\ncd ..\n\n\n\nBuild a reference database\nOne way to build the reference database is to use the ecoPCR program to simulate a PCR and to extract all sequences from the EMBL that may be amplified in silico by the two primers (TTAGATACCCCACTATGC and TAGAACAGGCTCCTCTAG) used for PCR amplification.\nThe full list of steps for building this reference database would then be:\n\nDownload the whole set of EMBL sequences (available from: ftp://ftp.ebi.ac.uk/pub/databases/embl/release/)\nDownload the NCBI taxonomy (available from: ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz)\nFormat them into the ecoPCR format (see obiconvert for how you can produce ecoPCR compatible files)\nUse ecoPCR to simulate amplification and build a reference database based on putatively amplified barcodes together with their recorded taxonomic information\n\nAs step 1 and step 3 can be really time-consuming (about one day), we alredy provide the reference database produced by the following commands so that you can skip its construction. Note that as the EMBL database and taxonomic data can evolve daily, if you run the following commands you may end up with quite different results.\nAny utility allowing file downloading from a ftp site can be used. In the following commands, we use the commonly used wget Unix command.\n\nDownload the sequences\n> mkdir EMBL\n> cd EMBL\n> wget -nH --cut-dirs=4 -Arel_std_\\*.dat.gz -m ftp://ftp.ebi.ac.uk/pub/databases/embl/release/\n> cd ..\n\n\nDownload the taxonomy\n> mkdir TAXO\n> cd TAXO\n> wget ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz\n> tar -zxvf taxdump.tar.gz\n> cd ..\n\n\nUse obipcr to simulate an in silico` PCR\n> obipcr -d ./ECODB/embl_last -e 3 -l 50 -L 150 \\ \n TTAGATACCCCACTATGC TAGAACAGGCTCCTCTAG > v05.ecopcr\nNote that the primers must be in the same order both in wolf_diet_ngsfilter.txt and in the obipcr command.\n\n\nClean the database\n\nfilter sequences so that they have a good taxonomic description at the species, genus, and family levels (obigrep command command below).\nremove redundant sequences (obiuniq command below).\nensure that the dereplicated sequences have a taxid at the family level (obigrep command below).\nensure that sequences each have a unique identification (obiannotate command below)\n\n> obigrep -d embl_last --require-rank=species \\\n --require-rank=genus --require-rank=family v05.ecopcr > v05_clean.fasta\n\n> obiuniq -d embl_last \\ \n v05_clean.fasta > v05_clean_uniq.fasta\n\n> obigrep -d embl_last --require-rank=family \\ \n v05_clean_uniq.fasta > v05_clean_uniq_clean.fasta\n\n> obiannotate --uniq-id v05_clean_uniq_clean.fasta > db_v05.fasta\nobirefidx -t TAXO wolf_data/db_v05_r117.fasta > results/db_v05_r117.indexed.fasta\n\n\nWarning\n\nFrom now on, for the sake of clarity, the following commands will use the filenames of the files provided with the tutorial. If you decided to run the last steps and use the files you have produced, you'll have to use db_v05.fasta instead of db_v05_r117.fasta and embl_last instead of embl_r117\n\n\n\n\n\n2.2.7 Assign each sequence to a taxon\nOnce the reference database is built, taxonomic assignment can be carried out using the obitag command.\n\nobitag -t TAXO -R wolf_data/db_v05_r117.indexed.fasta \\\n results/wolf.ali.assigned.simple.clean.c10.l80.fasta \\\n > results/wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta\n\nThe obitag adds several attributes in the sequence record header, among them:\n\nobitag_bestmatch=ACCESSION where ACCESSION is the id of hte sequence in the reference database that best aligns to the query sequence;\nobitag_bestid=FLOAT where FLOAT*100 is the percentage of identity between the best match sequence and the query sequence;\ntaxid=TAXID where TAXID is the final assignation of the sequence by obitag\nscientific_name=NAME where NAME is the scientific name of the assigned taxid.\n\nThe first sequence record of wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta is:\n>HELIUM_000100422_612GNAAXX:7:81:18704:12346#0/1_sub[28..126] {\"count\":88,\"merged_sample\":{\"26a_F040644\":88},\"obiclean_head\":true,\"obiclean_headcount\":1,\"obiclean_internalcount\":0,\"obiclean_samplecount\":1,\"obiclean_singletoncount\":0,\"obiclean_status\":{\"26a_F040644\":\"h\"},\"obiclean_weight\":{\"26a_F040644\":208},\"obitag_bestid\":0.9207920792079208,\"obitag_bestmatch\":\"AY769263\",\"obitag_difference\":8,\"obitag_match_count\":1,\"obitag_rank\":\"clade\",\"scientific_name\":\"Boreoeutheria\",\"taxid\":1437010}\nttagccctaaacataaacattcaataaacaagaatgttcgccagaggactactagcaata\ngcttaaaactcaaaggacttggcggtgctttatatccct\n\n\n2.2.8 Generate the final result table\nSome unuseful attributes can be removed at this stage.\n\nobiclean_head\nobiclean_headcount\nobiclean_internalcount\nobiclean_samplecount\nobiclean_singletoncount\n\n\nobiannotate --delete-tag=obiclean_head \\\n --delete-tag=obiclean_headcount \\\n --delete-tag=obiclean_internalcount \\\n --delete-tag=obiclean_samplecount \\\n --delete-tag=obiclean_singletoncount \\\n results/wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta \\\n > results/wolf.ali.assigned.simple.clean.c10.l80.taxo.ann.fasta\n\nThe first sequence record of wolf.ali.assigned.simple.c10.l80.clean.taxo.ann.fasta is then:\n>HELIUM_000100422_612GNAAXX:7:84:16335:5083#0/1_sub[28..126] {\"count\":96,\"merged_sample\":{\"26a_F040644\":11,\"29a_F260619\":85},\"obiclean_status\":{\"26a_F040644\":\"s\",\"29a_F260619\":\"h\"},\"obiclean_weight\":{\"26a_F040644\":14,\"29a_F260619\":110},\"obitag_bestid\":0.9595959595959596,\"obitag_bestmatch\":\"AC187326\",\"obitag_difference\":4,\"obitag_match_count\":1,\"obitag_rank\":\"subspecies\",\"scientific_name\":\"Canis lupus familiaris\",\"taxid\":9615}\nttagccctaaacataagctattccataacaaaataattcgccagagaactactagcaaca\ngattaaacctcaaaggacttggcagtgctttatacccct\n\nThis file contains 26 sequences. You can deduce the diet of each sample:\n\n\n13a_F730603: Cervus elaphus\n15a_F730814: Capreolus capreolus\n26a_F040644: Marmota sp. (according to the location, it is Marmota marmota)\n29a_F260619: Capreolus capreolus\n\n\n\nNote that we also obtained a few wolf sequences although a wolf-blocking oligonucleotide was used.\n\n\n\n\nRiaz, Tiayyba, Wasim Shehzad, Alain Viari, François Pompanon, Pierre Taberlet, and Eric Coissac. 2011. “ecoPrimers: inference of new DNA barcode markers from whole genome sequence analysis.” Nucleic Acids Research 39 (21): e145. https://doi.org/10.1093/nar/gkr732.\n\n\nSeguritan, V, and F Rohwer. 2001. “FastGroup: a program to dereplicate libraries of 16S rDNA sequences.” BMC Bioinformatics 2 (October): 9. https://doi.org/10.1186/1471-2105-2-9.\n\n\nShehzad, Wasim, Tiayyba Riaz, Muhammad A Nawaz, Christian Miquel, Carole Poillot, Safdar A Shah, Francois Pompanon, Eric Coissac, and Pierre Taberlet. 2012. “Carnivore diet analysis based on next-generation sequencing: Application to the leopard cat (Prionailurus bengalensis) in Pakistan.” Molecular Ecology 21 (8): 1951–65. https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1365-294X.2011.05424.x." + "text": "2.2 Step by step analysis\n\n2.2.1 Recover full sequence reads from forward and reverse partial reads\nWhen using the result of a paired-end sequencing assay with supposedly overlapping forward and reverse reads, the first step is to recover the assembled sequence.\nThe forward and reverse reads of the same fragment are at the same line position in the two fastq files obtained after sequencing. Based on these two files, the assembly of the forward and reverse reads is done with the obipairing utility that aligns the two reads and returns the reconstructed sequence.\nIn our case, the command is:\n\nobipairing --min-identity=0.8 \\\n --min-overlap=10 \\\n -F wolf_data/wolf_F.fastq \\\n -R wolf_data/wolf_R.fastq \\\n > results/wolf.fastq \n\nThe --min-identity and --min-overlap options allow discarding sequences with low alignment quality. If after the aligment, the overlaping parts of the reads is shorter than 10 base pairs or the similarity over this aligned region is below 80% of identity, in the output file, the forward and reverse reads are not aligned but concatenated, and the value of the mode attribute in the sequence header is set to joined instead of alignment.\n\n\n2.2.2 Remove unaligned sequence records\nUnaligned sequences (:pymode=joined) cannot be used. The following command allows removing them from the dataset:\n\nobigrep -p 'annotations.mode != \"join\"' \\\n results/wolf.fastq > results/wolf.ali.fastq\n\nThe -p requires a go like expression. annotations.mode != \"join\" means that if the value of the mode annotation of a sequence is different from join, the corresponding sequence record will be kept.\nThe first sequence record of wolf.ali.fastq can be obtained using the following command line:\n\nhead -n 4 results/wolf.ali.fastq\n\nThe folling piece of code appears on thew window of tour terminal.\n@HELIUM_000100422_612GNAAXX:7:108:5640:3823#0/1 {\"ali_dir\":\"left\",\"ali_length\":62,\"mode\":\"alignment\",\"pairing_mismatches\":{\"(T:26)->(G:13)\":62,\"(T:34)->(G:18)\":48},\"score\":484,\"score_norm\":0.968,\"seq_a_single\":46,\"seq_ab_match\":60,\"seq_b_single\":46}\nccgcctcctttagataccccactatgcttagccctaaacacaagtaattaatataacaaaattgttcgccagagtactaccggcaatagcttaaaactcaaaggacttggcggtgctttatacccttctagaggagcctgttctaaggaggcgg\n+\nCCCCCCCBCCCCCCCCCCCCCCCCCCCCCCBCCCCCBCCCCCCC results/wolf.ali.assigned.fastq\n\nThis command creates two files:\n\nunidentified.fastq containing all the sequence records that were not assigned to a sample/marker combination\nwolf.ali.assigned.fastq containing all the sequence records that were properly assigned to a sample/marker combination\n\nNote that each sequence record of the wolf.ali.assigned.fastq file contains only the barcode sequence as the sequences of primers and tags are removed by the obimultiplex program. Information concerning the experiment, sample, primers and tags is added as attributes in the sequence header.\nFor instance, the first sequence record of wolf.ali.assigned.fastq is:\n@HELIUM_000100422_612GNAAXX:7:108:5640:3823#0/1_sub[28..127] {\"ali_dir\":\"left\",\"ali_length\":62,\"direction\":\"direct\",\"experiment\":\"wolf_diet\",\"forward_match\":\"ttagataccccactatgc\",\"forward_mismatches\":0,\"forward_primer\":\"ttagataccccactatgc\",\"forward_tag\":\"gcctcct\",\"mode\":\"alignment\",\"pairing_mismatches\":{\"(T:26)->(G:13)\":35,\"(T:34)->(G:18)\":21},\"reverse_match\":\"tagaacaggctcctctag\",\"reverse_mismatches\":0,\"reverse_primer\":\"tagaacaggctcctctag\",\"reverse_tag\":\"gcctcct\",\"sample\":\"29a_F260619\",\"score\":484,\"score_norm\":0.968,\"seq_a_single\":46,\"seq_ab_match\":60,\"seq_b_single\":46}\nttagccctaaacacaagtaattaatataacaaaattgttcgccagagtactaccggcaatagcttaaaactcaaaggacttggcggtgctttataccctt\n+\nCCCBCCCCCBCCCCCCC results/wolf.ali.assigned.uniq.fasta\n\nNote that obiuniq returns a fasta file.\nThe first sequence record of wolf.ali.assigned.uniq.fasta is:\n>HELIUM_000100422_612GNAAXX:7:93:6991:1942#0/1_sub[28..126] {\"ali_dir\":\"left\",\"ali_length\":63,\"count\":1,\"direction\":\"reverse\",\"experiment\":\"wolf_diet\",\"forward_match\":\"ttagataccccactatgc\",\"forward_mismatches\":0,\"forward_primer\":\"ttagataccccactatgc\",\"forward_tag\":\"gaatatc\",\"merged_sample\":{\"26a_F040644\":1},\"mode\":\"alignment\",\"pairing_mismatches\":{\"(A:10)->(G:34)\":76,\"(C:06)->(A:34)\":58},\"reverse_match\":\"tagaacaggctcctctag\",\"reverse_mismatches\":0,\"reverse_primer\":\"tagaacaggctcctctag\",\"reverse_tag\":\"gaatatc\",\"score\":730,\"score_norm\":0.968,\"seq_a_single\":45,\"seq_ab_match\":61,\"seq_b_single\":45}\nttagccctaaacataaacattcaataaacaagaatgttcgccagagaactactagcaaca\ngcctgaaactcaaaggacttggcggtgctttatatccct\nThe run of obiuniq has added two key=values entries in the header of the fasta sequence:\n\n\"merged_sample\":{\"29a_F260619\":1}: this sequence have been found once in a single sample called 29a_F260619\n\"count\":1 : the total count for this sequence is \\(1\\)\n\nTo keep only these two attributes, we can use the obiannotate command:\n\nobiannotate -k count -k merged_sample \\\n results/wolf.ali.assigned.uniq.fasta \\\n > results/wolf.ali.assigned.simple.fasta\n\nThe first five sequence records of wolf.ali.assigned.simple.fasta become:\n>HELIUM_000100422_612GNAAXX:7:26:18930:11105#0/1_sub[28..127] {\"count\":1,\"merged_sample\":{\"29a_F260619\":1}}\nttagccctaaacacaagtaattaatataacaaaatwattcgcyagagtactacmggcaat\nagctyaaarctcamagrwcttggcggtgctttataccctt\n>HELIUM_000100422_612GNAAXX:7:58:5711:11399#0/1_sub[28..127] {\"count\":1,\"merged_sample\":{\"29a_F260619\":1}}\nttagccctaaacacaagtaattaatataacaaaattattcgccagagtwctaccgssaat\nagcttaaaactcaaaggactgggcggtgctttataccctt\n>HELIUM_000100422_612GNAAXX:7:100:15836:9304#0/1_sub[28..127] {\"count\":1,\"merged_sample\":{\"29a_F260619\":1}}\nttagccctaaacatagataattacacaaacaaaattgttcaccagagtactagcggcaac\nagcttaaaactcaaaggacttggcggtgctttataccctt\n>HELIUM_000100422_612GNAAXX:7:55:13242:9085#0/1_sub[28..126] {\"count\":4,\"merged_sample\":{\"26a_F040644\":4}}\nttagccctaaacataaacattcaataaacaagagtgttcgccagagtactactagcaaca\ngcctgaaactcaaaggacttggcggtgctttacatccct\n>HELIUM_000100422_612GNAAXX:7:86:8429:13723#0/1_sub[28..127] {\"count\":7,\"merged_sample\":{\"15a_F730814\":5,\"29a_F260619\":2}}\nttagccctaaacacaagtaattaatataacaaaattattcgccagagtactaccggcaat\nagcttaaaactcaaaggactcggcggtgctttataccctt\n\n\n2.2.5 Denoise the sequence dataset\nTo have a set of sequences assigned to their corresponding samples does not mean that all sequences are biologically meaningful i.e. some of these sequences can contains PCR and/or sequencing errors, or chimeras.\n\nTag the sequences for PCR errors (sequence variants)\nThe obiclean program tags sequence variants as potential error generated during PCR amplification. We ask it to keep the head sequences (-H option) that are sequences which are not variants of another sequence with a count greater than 5% of their own count (-r 0.05 option).\n\nobiclean -s sample -r 0.05 -H \\\n results/wolf.ali.assigned.simple.fasta \\\n > results/wolf.ali.assigned.simple.clean.fasta \n\nOne of the sequence records of wolf.ali.assigned.simple.clean.fasta is:\n>HELIUM_000100422_612GNAAXX:7:66:4039:8016#0/1_sub[28..127] {\"count\":17,\"merged_sample\":{\"13a_F730603\":17},\"obiclean_head\":true,\"obiclean_headcount\":1,\"obiclean_internalcount\":0,\"obi\nclean_samplecount\":1,\"obiclean_singletoncount\":0,\"obiclean_status\":{\"13a_F730603\":\"h\"},\"obiclean_weight\":{\"13a_F730603\":25}}\nctagccttaaacacaaatagttatgcaaacaaaactattcgccagagtactaccggcaac\nagcccaaaactcaaaggacttggcggtgcttcacaccctt\nTo remove such sequences as much as possible, we first discard rare sequences and then rsequence variants that likely correspond to artifacts.\n\n\nGet some statistics about sequence counts\n\nobicount results/wolf.ali.assigned.simple.clean.fasta\n\ntime=\"2023-02-02T23:07:30+01:00\" level=info msg=\"Appending results/wolf.ali.assigned.simple.clean.fasta file\\n\"\n 2749 36409 273387\n\n\nThe dataset contains \\(4313\\) sequences variant corresponding to 42452 sequence reads. Most of the variants occur only a single time in the complete dataset and are usualy named singletons\n\nobigrep -p 'sequence.Count() == 1' results/wolf.ali.assigned.simple.clean.fasta \\\n | obicount\n\ntime=\"2023-02-02T23:07:30+01:00\" level=info msg=\"Reading sequences from stdin in guessed\\n\"\ntime=\"2023-02-02T23:07:30+01:00\" level=info msg=\"Appending results/wolf.ali.assigned.simple.clean.fasta file\\n\"\ntime=\"2023-02-02T23:07:30+01:00\" level=info msg=\"On output use JSON headers\"\n 2309 2309 229912\n\n\nIn that dataset sigletons corresponds to \\(3511\\) variants.\nUsing R and the ROBIFastread package able to read headers of the fasta files produced by OBITools, we can get more complete statistics on the distribution of occurrencies.\n\nlibrary(ROBIFastread)\nlibrary(ggplot2)\n\nseqs <- read_obifasta(\"results/wolf.ali.assigned.simple.clean.fasta\",keys=\"count\")\n\nggplot(data = seqs, mapping=aes(x = count)) +\n geom_histogram(bins=100) +\n scale_y_sqrt() +\n scale_x_sqrt() +\n geom_vline(xintercept = 10, col=\"red\", lty=2) +\n xlab(\"number of occurrencies of a variant\") \n\n\n\n\nIn a similar way it is also possible to plot the distribution of the sequence length.\n\nggplot(data = seqs, mapping=aes(x = nchar(sequence))) +\n geom_histogram() +\n scale_y_log10() +\n geom_vline(xintercept = 80, col=\"red\", lty=2) +\n xlab(\"sequence lengths in base pair\")\n\n\n\n\n\n\nKeep only the sequences having a count greater or equal to 10 and a length shorter than 80 bp\nBased on the previous observation, we set the cut-off for keeping sequences for further analysis to a count of 10. To do this, we use the obigrep command. The -p 'count>=10' option means that the python expression :pycount>=10 must be evaluated to :pyTrue for each sequence to be kept. Based on previous knowledge we also remove sequences with a length shorter than 80 bp (option -l) as we know that the amplified 12S-V5 barcode for vertebrates must have a length around 100bp.\n\nobigrep -l 80 -p 'sequence.Count() >= 10' results/wolf.ali.assigned.simple.clean.fasta \\\n > results/wolf.ali.assigned.simple.clean.c10.l80.fasta\n\nThe first sequence record of results/wolf.ali.assigned.simple.clean.c10.l80.fasta is:\n>HELIUM_000100422_612GNAAXX:7:22:2603:18023#0/1_sub[28..127] {\"count\":12182,\"merged_sample\":{\"15a_F730814\":7559,\"29a_F260619\":4623},\"obiclean_head\":true,\"obiclean_headcount\":2,\"obiclean_internalcount\":0,\"obiclean_samplecount\":2,\"obiclean_singletoncount\":0,\"obiclean_status\":{\"15a_F730814\":\"h\",\"29a_F260619\":\"h\"},\"obiclean_weight\":{\"15a_F730814\":9165,\"29a_F260619\":6275}}\nttagccctaaacacaagtaattaatataacaaaattattcgccagagtactaccggcaat\nagcttaaaactcaaaggacttggcggtgctttataccctt\nAt that time in the data cleanning we have conserved :\n\nobicount results/wolf.ali.assigned.simple.clean.c10.l80.fasta\n\ntime=\"2023-02-02T23:07:31+01:00\" level=info msg=\"Appending results/wolf.ali.assigned.simple.clean.c10.l80.fasta file\\n\"\n 26 31337 2585\n\n\n\n\n\n2.2.6 Taxonomic assignment of sequences\nOnce denoising has been done, the next step in diet analysis is to assign the barcodes to the corresponding species in order to get the complete list of species associated to each sample.\nTaxonomic assignment of sequences requires a reference database compiling all possible species to be identified in the sample. Assignment is then done based on sequence comparison between sample sequences and reference sequences.\n\nDownload the taxonomy\nIt is always possible to download the complete taxonomy from NCBI using the following commands.\n\nmkdir TAXO\ncd TAXO\ncurl http://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz \\\n | tar -zxvf -\ncd ..\n\nFor people have a low speed internet connection, a copy of the taxdump.tar.gz file is provided in the wolf_data directory. The NCBI taxonomy is dayly updated, but the one provided here is ok for running this tutorial.\nTo build the TAXO directory from the provided taxdump.tar.gz, you need to execute the following commands\n\nmkdir TAXO\ncd TAXO\ntar zxvf wolf_data/taxdump.tar.gz \ncd ..\n\n\n\nBuild a reference database\nOne way to build the reference database is to use the obipcr program to simulate a PCR and extract all sequences from a general purpose DNA database such as genbank or EMBL that can be amplified in silico by the two primers (here TTAGATACCCCACTATGC and TAGAACAGGCTCCTCTAG) used for PCR amplification.\nThe two steps to build this reference database would then be\n\nToday, the easiest database to download is Genbank. But this will take you more than a day and occupy more than half a terabyte on your hard drive. In the wolf_data directory, a shell script called download_gb.sh is provided to perform this task. It requires that the programs wget2 and curl are available on your computer.\nUse obipcr to simulate amplification and build a reference database based on the putatively amplified barcodes and their recorded taxonomic information.\n\nAs these steps can take a long time (about a day for the download and an hour for the PCR), we already provide the reference database produced by the following commands so you can skip its construction. Note that as the Genbank and taxonomic database evolve frequently, if you run the following commands you may get different results.\n\nDownload the sequences\n\nmkdir genbank\ncd genbank\n../wolf_data/install_gb.sh\ncd ..\n\nDO NOT RUN THIS COMMAND EXCEPT IF YOU ARE REALLY CONSIENT OF THE TIME AND DISK SPACE REQUIRED.\n\n\nUse obipcr to simulate an in silico` PCR\n\nobipcr -t TAXO -e 3 -l 50 -L 150 \\ \n --forward TTAGATACCCCACTATGC \\\n --reverse TAGAACAGGCTCCTCTAG \\\n --no-order \\\n genbank/Release-251/gb*.seq.gz\n > results/v05.pcr.fasta\n\nNote that the primers must be in the same order both in wolf_diet_ngsfilter.txt and in the obipcr command. The part of the path indicating the Genbank release can change. Please check in your genbank directory the exact name of your release.\n\n\nClean the database\n\nfilter sequences so that they have a good taxonomic description at the species, genus, and family levels (obigrep command command below).\nremove redundant sequences (obiuniq command below).\nensure that the dereplicated sequences have a taxid at the family level (obigrep command below).\nensure that sequences each have a unique identification (obiannotate command below)\n\n\nobigrep -t TAXO \\\n --require-rank species \\\n --require-rank genus \\\n --require-rank family \\\n results/v05.ecopcr > results/v05_clean.fasta\n\nobiuniq -c taxid \\\n results/v05_clean.fasta \\\n > results/v05_clean_uniq.fasta\n\nobirefidx -t TAXO results/v05_clean_uniq.fasta \\\n > results/v05_clean_uniq.indexed.fasta\n\n\n\nWarning\n\nFrom now on, for the sake of clarity, the following commands will use the filenames of the files provided with the tutorial. If you decided to run the last steps and use the files you have produced, you'll have to use results/v05_clean_uniq.indexed.fasta instead of wolf_data/db_v05_r117.indexed.fasta.\n\n\n\n\n\n2.2.7 Assign each sequence to a taxon\nOnce the reference database is built, taxonomic assignment can be carried out using the obitag command.\n\nobitag -t TAXO -R wolf_data/db_v05_r117.indexed.fasta \\\n results/wolf.ali.assigned.simple.clean.c10.l80.fasta \\\n > results/wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta\n\nThe obitag adds several attributes in the sequence record header, among them:\n\nobitag_bestmatch=ACCESSION where ACCESSION is the id of hte sequence in the reference database that best aligns to the query sequence;\nobitag_bestid=FLOAT where FLOAT*100 is the percentage of identity between the best match sequence and the query sequence;\ntaxid=TAXID where TAXID is the final assignation of the sequence by obitag\nscientific_name=NAME where NAME is the scientific name of the assigned taxid.\n\nThe first sequence record of wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta is:\n>HELIUM_000100422_612GNAAXX:7:81:18704:12346#0/1_sub[28..126] {\"count\":88,\"merged_sample\":{\"26a_F040644\":88},\"obiclean_head\":true,\"obiclean_headcount\":1,\"obiclean_internalcount\":0,\"obiclean_samplecount\":1,\"obiclean_singletoncount\":0,\"obiclean_status\":{\"26a_F040644\":\"h\"},\"obiclean_weight\":{\"26a_F040644\":208},\"obitag_bestid\":0.9207920792079208,\"obitag_bestmatch\":\"AY769263\",\"obitag_difference\":8,\"obitag_match_count\":1,\"obitag_rank\":\"clade\",\"scientific_name\":\"Boreoeutheria\",\"taxid\":1437010}\nttagccctaaacataaacattcaataaacaagaatgttcgccagaggactactagcaata\ngcttaaaactcaaaggacttggcggtgctttatatccct\n\n\n2.2.8 Generate the final result table\nSome unuseful attributes can be removed at this stage.\n\nobiclean_head\nobiclean_headcount\nobiclean_internalcount\nobiclean_samplecount\nobiclean_singletoncount\n\n\nobiannotate --delete-tag=obiclean_head \\\n --delete-tag=obiclean_headcount \\\n --delete-tag=obiclean_internalcount \\\n --delete-tag=obiclean_samplecount \\\n --delete-tag=obiclean_singletoncount \\\n results/wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta \\\n > results/wolf.ali.assigned.simple.clean.c10.l80.taxo.ann.fasta\n\nThe first sequence record of wolf.ali.assigned.simple.c10.l80.clean.taxo.ann.fasta is then:\n>HELIUM_000100422_612GNAAXX:7:84:16335:5083#0/1_sub[28..126] {\"count\":96,\"merged_sample\":{\"26a_F040644\":11,\"29a_F260619\":85},\"obiclean_status\":{\"26a_F040644\":\"s\",\"29a_F260619\":\"h\"},\"obiclean_weight\":{\"26a_F040644\":14,\"29a_F260619\":110},\"obitag_bestid\":0.9595959595959596,\"obitag_bestmatch\":\"AC187326\",\"obitag_difference\":4,\"obitag_match_count\":1,\"obitag_rank\":\"subspecies\",\"scientific_name\":\"Canis lupus familiaris\",\"taxid\":9615}\nttagccctaaacataagctattccataacaaaataattcgccagagaactactagcaaca\ngattaaacctcaaaggacttggcagtgctttatacccct\n\n\n2.2.9 Looking at the data in R\n\nlibrary(ROBIFastread)\nlibrary(vegan)\n\nLe chargement a nécessité le package : permute\n\n\nLe chargement a nécessité le package : lattice\n\n\nThis is vegan 2.6-4\n\nlibrary(magrittr)\n \n\ndiet_data <- read_obifasta(\"results/wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta\") \ndiet_data %<>% extract_features(\"obitag_bestmatch\",\"obitag_rank\",\"scientific_name\",'taxid')\n\ndiet_tab <- extract_readcount(diet_data,key=\"obiclean_weight\")\ndiet_tab\n\n4 x 26 sparse Matrix of class \"dgCMatrix\"\n\n\n [[ suppressing 26 column names 'HELIUM_000100422_612GNAAXX:7:30:17945:19531#0/1_sub[28..126]', 'HELIUM_000100422_612GNAAXX:7:94:16908:11285#0/1_sub[28..127]', 'HELIUM_000100422_612GNAAXX:7:100:4828:3492#0/1_sub[28..127]' ... ]]\n\n\n \n26a_F040644 43 . . . . 88 . 52 208 15 31 . . 14 481 72 17 . .\n13a_F730603 . 8409 22 1 . . . . . . . 20 . . 19 . . 15 .\n29a_F260619 . . . 13 353 . 391 . . . . . 6275 . 1 . . . 44\n15a_F730814 . . . . . . . . . . . . 9165 . 5 . . . .\n \n26a_F040644 12830 14 . . 18 . .\n13a_F730603 . . . 9 . . 25\n29a_F260619 . 110 16 . . 25 .\n15a_F730814 . . . 4 . . .\n\n\n\nThis file contains 26 sequences. You can deduce the diet of each sample:\n\n\n13a_F730603: Cervus elaphus\n15a_F730814: Capreolus capreolus\n26a_F040644: Marmota sp. (according to the location, it is Marmota marmota)\n29a_F260619: Capreolus capreolus\n\n\n\nNote that we also obtained a few wolf sequences although a wolf-blocking oligonucleotide was used.\n\n\n\n\nRiaz, Tiayyba, Wasim Shehzad, Alain Viari, François Pompanon, Pierre Taberlet, and Eric Coissac. 2011. “ecoPrimers: inference of new DNA barcode markers from whole genome sequence analysis.” Nucleic Acids Research 39 (21): e145. https://doi.org/10.1093/nar/gkr732.\n\n\nSeguritan, V, and F Rohwer. 2001. “FastGroup: a program to dereplicate libraries of 16S rDNA sequences.” BMC Bioinformatics 2 (October): 9. https://doi.org/10.1186/1471-2105-2-9.\n\n\nShehzad, Wasim, Tiayyba Riaz, Muhammad A Nawaz, Christian Miquel, Carole Poillot, Safdar A Shah, Francois Pompanon, Eric Coissac, and Pierre Taberlet. 2012. “Carnivore diet analysis based on next-generation sequencing: Application to the leopard cat (Prionailurus bengalensis) in Pakistan.” Molecular Ecology 21 (8): 1951–65. https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1365-294X.2011.05424.x." }, { "objectID": "commands.html#specifying-the-input-files-to-obitools-commands", diff --git a/doc/_book/tutorial.html b/doc/_book/tutorial.html index 2350622..758392f 100644 --- a/doc/_book/tutorial.html +++ b/doc/_book/tutorial.html @@ -231,6 +231,7 @@ div.csl-indent {
  • 2.2.6 Taxonomic assignment of sequences
  • 2.2.7 Assign each sequence to a taxon
  • 2.2.8 Generate the final result table
  • +
  • 2.2.9 Looking at the data in R
  • @@ -430,7 +431,7 @@ agcccaaaactcaaaggacttggcggtgcttcacaccctt
    obicount results/wolf.ali.assigned.simple.clean.fasta
    -
    time="2023-01-31T22:44:30+01:00" level=info msg="Appending results/wolf.ali.assigned.simple.clean.fasta file\n"
    +
    time="2023-02-02T23:07:30+01:00" level=info msg="Appending results/wolf.ali.assigned.simple.clean.fasta file\n"
      2749 36409 273387
    @@ -439,10 +440,10 @@ agcccaaaactcaaaggacttggcggtgcttcacaccctt
    obigrep -p 'sequence.Count() == 1' results/wolf.ali.assigned.simple.clean.fasta \
         | obicount
    -
    time="2023-01-31T22:44:30+01:00" level=info msg="Appending results/wolf.ali.assigned.simple.clean.fasta file\n"
    -time="2023-01-31T22:44:30+01:00" level=info msg="Reading sequences from stdin in guessed\n"
    -time="2023-01-31T22:44:30+01:00" level=info msg="On output use JSON headers"
    - 2309 2309 229920
    +
    time="2023-02-02T23:07:30+01:00" level=info msg="Reading sequences from stdin in guessed\n"
    +time="2023-02-02T23:07:30+01:00" level=info msg="Appending results/wolf.ali.assigned.simple.clean.fasta file\n"
    +time="2023-02-02T23:07:30+01:00" level=info msg="On output use JSON headers"
    + 2309 2309 229912

    In that dataset sigletons corresponds to \(3511\) variants.

    @@ -490,7 +491,7 @@ agcttaaaactcaaaggacttggcggtgctttataccctt
    obicount results/wolf.ali.assigned.simple.clean.c10.l80.fasta
    -
    time="2023-01-31T22:44:32+01:00" level=info msg="Appending results/wolf.ali.assigned.simple.clean.c10.l80.fasta file\n"
    +
    time="2023-02-02T23:07:31+01:00" level=info msg="Appending results/wolf.ali.assigned.simple.clean.c10.l80.fasta file\n"
      26 31337 2585
    @@ -502,6 +503,7 @@ agcttaaaactcaaaggacttggcggtgctttataccctt

    Taxonomic assignment of sequences requires a reference database compiling all possible species to be identified in the sample. Assignment is then done based on sequence comparison between sample sequences and reference sequences.

    Download the taxonomy

    +

    It is always possible to download the complete taxonomy from NCBI using the following commands.

    mkdir TAXO
     cd TAXO
    @@ -509,39 +511,45 @@ agcttaaaactcaaaggacttggcggtgctttataccctt
    | tar -zxvf - cd ..
    +

    For people have a low speed internet connection, a copy of the taxdump.tar.gz file is provided in the wolf_data directory. The NCBI taxonomy is dayly updated, but the one provided here is ok for running this tutorial.

    +

    To build the TAXO directory from the provided taxdump.tar.gz, you need to execute the following commands

    +
    +
    mkdir TAXO
    +cd TAXO
    +tar zxvf wolf_data/taxdump.tar.gz 
    +cd ..
    +

    Build a reference database

    -

    One way to build the reference database is to use the ecoPCR <scripts/ecoPCR> program to simulate a PCR and to extract all sequences from the EMBL that may be amplified in silico by the two primers (TTAGATACCCCACTATGC and TAGAACAGGCTCCTCTAG) used for PCR amplification.

    -

    The full list of steps for building this reference database would then be:

    +

    One way to build the reference database is to use the obipcr program to simulate a PCR and extract all sequences from a general purpose DNA database such as genbank or EMBL that can be amplified in silico by the two primers (here TTAGATACCCCACTATGC and TAGAACAGGCTCCTCTAG) used for PCR amplification.

    +

    The two steps to build this reference database would then be

      -
    1. Download the whole set of EMBL sequences (available from: ftp://ftp.ebi.ac.uk/pub/databases/embl/release/)
    2. -
    3. Download the NCBI taxonomy (available from: ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz)
    4. -
    5. Format them into the ecoPCR format (see obiconvert <scripts/obiconvert> for how you can produce ecoPCR compatible files)
    6. -
    7. Use ecoPCR to simulate amplification and build a reference database based on putatively amplified barcodes together with their recorded taxonomic information
    8. +
    9. Today, the easiest database to download is Genbank. But this will take you more than a day and occupy more than half a terabyte on your hard drive. In the wolf_data directory, a shell script called download_gb.sh is provided to perform this task. It requires that the programs wget2 and curl are available on your computer.

    10. +
    11. Use obipcr to simulate amplification and build a reference database based on the putatively amplified barcodes and their recorded taxonomic information.

    -

    As step 1 and step 3 can be really time-consuming (about one day), we alredy provide the reference database produced by the following commands so that you can skip its construction. Note that as the EMBL database and taxonomic data can evolve daily, if you run the following commands you may end up with quite different results.

    -

    Any utility allowing file downloading from a ftp site can be used. In the following commands, we use the commonly used wget Unix command.

    +

    As these steps can take a long time (about a day for the download and an hour for the PCR), we already provide the reference database produced by the following commands so you can skip its construction. Note that as the Genbank and taxonomic database evolve frequently, if you run the following commands you may get different results.

    Download the sequences
    -
    > mkdir EMBL
    -> cd EMBL
    -> wget -nH --cut-dirs=4 -Arel_std_\*.dat.gz -m ftp://ftp.ebi.ac.uk/pub/databases/embl/release/
    -> cd ..
    -
    -
    -
    Download the taxonomy
    -
    > mkdir TAXO
    -> cd TAXO
    -> wget ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
    -> tar -zxvf taxdump.tar.gz
    -> cd ..
    +
    +
    mkdir genbank
    +cd genbank
    +../wolf_data/install_gb.sh
    +cd ..
    +
    +

    DO NOT RUN THIS COMMAND EXCEPT IF YOU ARE REALLY CONSIENT OF THE TIME AND DISK SPACE REQUIRED.

    Use obipcr to simulate an in silico` PCR
    -
    > obipcr -d ./ECODB/embl_last -e 3 -l 50 -L 150 \ 
    -  TTAGATACCCCACTATGC TAGAACAGGCTCCTCTAG > v05.ecopcr
    -

    Note that the primers must be in the same order both in wolf_diet_ngsfilter.txt and in the obipcr command.

    +
    +
    obipcr -t TAXO -e 3 -l 50 -L 150 \ 
    +       --forward TTAGATACCCCACTATGC \
    +       --reverse TAGAACAGGCTCCTCTAG \
    +       --no-order \
    +       genbank/Release-251/gb*.seq.gz
    +       > results/v05.pcr.fasta
    +
    +

    Note that the primers must be in the same order both in wolf_diet_ngsfilter.txt and in the obipcr command. The part of the path indicating the Genbank release can change. Please check in your genbank directory the exact name of your release.

    Clean the database
    @@ -551,22 +559,25 @@ agcttaaaactcaaaggacttggcggtgctttataccctt
  • ensure that the dereplicated sequences have a taxid at the family level (obigrep command below).
  • ensure that sequences each have a unique identification (obiannotate command below)
  • -
    > obigrep -d embl_last --require-rank=species \
    -  --require-rank=genus --require-rank=family v05.ecopcr > v05_clean.fasta
    -
    -> obiuniq -d embl_last \ 
    -  v05_clean.fasta > v05_clean_uniq.fasta
    +
    +
    obigrep -t TAXO \
    +          --require-rank species \
    +          --require-rank genus \
    +          --require-rank family \
    +          results/v05.ecopcr > results/v05_clean.fasta
     
    -> obigrep -d embl_last --require-rank=family \ 
    -  v05_clean_uniq.fasta > v05_clean_uniq_clean.fasta
    -
    -> obiannotate --uniq-id v05_clean_uniq_clean.fasta > db_v05.fasta
    -

    obirefidx -t TAXO wolf_data/db_v05_r117.fasta > results/db_v05_r117.indexed.fasta

    +obiuniq -c taxid \ + results/v05_clean.fasta \ + > results/v05_clean_uniq.fasta + +obirefidx -t TAXO results/v05_clean_uniq.fasta \ + > results/v05_clean_uniq.indexed.fasta
    +

    Warning

    -

    From now on, for the sake of clarity, the following commands will use the filenames of the files provided with the tutorial. If you decided to run the last steps and use the files you have produced, you'll have to use db_v05.fasta instead of db_v05_r117.fasta and embl_last instead of embl_r117

    +

    From now on, for the sake of clarity, the following commands will use the filenames of the files provided with the tutorial. If you decided to run the last steps and use the files you have produced, you'll have to use results/v05_clean_uniq.indexed.fasta instead of wolf_data/db_v05_r117.indexed.fasta.

    @@ -614,6 +625,48 @@ agcttaaaactcaaaggacttggcggtgctttataccctt
    >HELIUM_000100422_612GNAAXX:7:84:16335:5083#0/1_sub[28..126] {"count":96,"merged_sample":{"26a_F040644":11,"29a_F260619":85},"obiclean_status":{"26a_F040644":"s","29a_F260619":"h"},"obiclean_weight":{"26a_F040644":14,"29a_F260619":110},"obitag_bestid":0.9595959595959596,"obitag_bestmatch":"AC187326","obitag_difference":4,"obitag_match_count":1,"obitag_rank":"subspecies","scientific_name":"Canis lupus familiaris","taxid":9615}
     ttagccctaaacataagctattccataacaaaataattcgccagagaactactagcaaca
     gattaaacctcaaaggacttggcagtgctttatacccct
    + +
    +

    2.2.9 Looking at the data in R

    +
    +
    library(ROBIFastread)
    +library(vegan)
    +
    +
    Le chargement a nécessité le package : permute
    +
    +
    +
    Le chargement a nécessité le package : lattice
    +
    +
    +
    This is vegan 2.6-4
    +
    +
    library(magrittr)
    + 
    +
    +diet_data <- read_obifasta("results/wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta") 
    +diet_data %<>% extract_features("obitag_bestmatch","obitag_rank","scientific_name",'taxid')
    +
    +diet_tab <- extract_readcount(diet_data,key="obiclean_weight")
    +diet_tab
    +
    +
    4 x 26 sparse Matrix of class "dgCMatrix"
    +
    +
    +
      [[ suppressing 26 column names 'HELIUM_000100422_612GNAAXX:7:30:17945:19531#0/1_sub[28..126]', 'HELIUM_000100422_612GNAAXX:7:94:16908:11285#0/1_sub[28..127]', 'HELIUM_000100422_612GNAAXX:7:100:4828:3492#0/1_sub[28..127]' ... ]]
    +
    +
    +
                                                                                
    +26a_F040644 43    .  .  .   . 88   . 52 208 15 31  .    . 14 481 72 17  .  .
    +13a_F730603  . 8409 22  1   .  .   .  .   .  .  . 20    .  .  19  .  . 15  .
    +29a_F260619  .    .  . 13 353  . 391  .   .  .  .  . 6275  .   1  .  .  . 44
    +15a_F730814  .    .  .  .   .  .   .  .   .  .  .  . 9165  .   5  .  .  .  .
    +                                   
    +26a_F040644 12830  14  . . 18  .  .
    +13a_F730603     .   .  . 9  .  . 25
    +29a_F260619     . 110 16 .  . 25  .
    +15a_F730814     .   .  . 4  .  .  .
    +
    +
    This file contains 26 sequences. You can deduce the diet of each sample:
    diff --git a/doc/tutorial.qmd b/doc/tutorial.qmd index a1db3da..cbf0a1f 100644 --- a/doc/tutorial.qmd +++ b/doc/tutorial.qmd @@ -379,6 +379,8 @@ sequences and reference sequences. #### Download the taxonomy {.unnumbered} +It is always possible to download the complete taxonomy from NCBI using the following commands. + ```{bash} #| output: false mkdir TAXO @@ -387,68 +389,63 @@ curl http://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz \ | tar -zxvf - cd .. ``` - + +For people have a low speed internet connection, a copy of the `taxdump.tar.gz` file is provided in the wolf_data directory. +The NCBI taxonomy is dayly updated, but the one provided here is ok for running this tutorial. + +To build the TAXO directory from the provided `taxdump.tar.gz`, you need to execute the following commands + +```{bash} +#| output: false +mkdir TAXO +cd TAXO +tar zxvf wolf_data/taxdump.tar.gz +cd .. +``` + #### Build a reference database {.unnumbered} -One way to build the reference database is to use the -`ecoPCR `{.interpreted-text role="doc"} program to -simulate a PCR and to extract all sequences from the EMBL that may be -amplified [in silico]{.title-ref} by the two primers -([TTAGATACCCCACTATGC]{.title-ref} and [TAGAACAGGCTCCTCTAG]{.title-ref}) +One way to build the reference database is to use the `obipcr` program to simulate a PCR and extract all sequences from a general purpose DNA database such as genbank or EMBL that can be +amplified *in silico* by the two primers (here **TTAGATACCCCACTATGC** and **TAGAACAGGCTCCTCTAG**) used for PCR amplification. -The full list of steps for building this reference database would then -be: +The two steps to build this reference database would then be -1. Download the whole set of EMBL sequences (available from: - ) -2. Download the NCBI taxonomy (available from: - ) -3. Format them into the ecoPCR format (see - `obiconvert `{.interpreted-text role="doc"} for - how you can produce ecoPCR compatible files) -4. Use ecoPCR to simulate amplification and build a reference database - based on putatively amplified barcodes together with their recorded - taxonomic information +1. Today, the easiest database to download is *Genbank*. But this will take you more than a day and occupy more than half a terabyte on your hard drive. In the `wolf_data` directory, a shell script called `download_gb.sh` is provided to perform this task. It requires that the programs `wget2` and `curl` are available on your computer. -As step 1 and step 3 can be really time-consuming (about one day), we -alredy provide the reference database produced by the following commands -so that you can skip its construction. Note that as the EMBL database -and taxonomic data can evolve daily, if you run the following commands -you may end up with quite different results. +1. Use `obipcr` to simulate amplification and build a reference database based on the putatively amplified barcodes and their recorded taxonomic information. -Any utility allowing file downloading from a ftp site can be used. In -the following commands, we use the commonly used `wget` *Unix* command. +As these steps can take a long time (about a day for the download and an hour for the PCR), we already provide the reference database produced by the following commands so you can skip its construction. Note that as the Genbank and taxonomic database evolve frequently, if you run the following commands you may get different results. ##### Download the sequences {.unnumbered} -``` bash -> mkdir EMBL -> cd EMBL -> wget -nH --cut-dirs=4 -Arel_std_\*.dat.gz -m ftp://ftp.ebi.ac.uk/pub/databases/embl/release/ -> cd .. -``` +```{bash} +#| eval: false +mkdir genbank +cd genbank +../wolf_data/install_gb.sh +cd .. +``` -##### Download the taxonomy {.unnumbered} - -``` bash -> mkdir TAXO -> cd TAXO -> wget ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz -> tar -zxvf taxdump.tar.gz -> cd .. -``` +DO NOT RUN THIS COMMAND EXCEPT IF YOU ARE REALLY CONSIENT OF THE TIME AND DISK SPACE REQUIRED. ##### Use obipcr to simulate an in silico\` PCR {.unnumbered} -``` bash -> obipcr -d ./ECODB/embl_last -e 3 -l 50 -L 150 \ - TTAGATACCCCACTATGC TAGAACAGGCTCCTCTAG > v05.ecopcr +```{bash} +#| eval: false +obipcr -t TAXO -e 3 -l 50 -L 150 \ + --forward TTAGATACCCCACTATGC \ + --reverse TAGAACAGGCTCCTCTAG \ + --no-order \ + genbank/Release-251/gb*.seq.gz + > results/v05.pcr.fasta ``` Note that the primers must be in the same order both in `wolf_diet_ngsfilter.txt` and in the `obipcr` command. +The part of the path indicating the *Genbank* release can change. +Please check in your genbank directory the exact name of your release. ##### Clean the database {.unnumbered} @@ -461,20 +458,23 @@ Note that the primers must be in the same order both in 4. ensure that sequences each have a unique identification (`obiannotate` command below) -``` bash -> obigrep -d embl_last --require-rank=species \ - --require-rank=genus --require-rank=family v05.ecopcr > v05_clean.fasta +```{bash} +#| eval: false -> obiuniq -d embl_last \ - v05_clean.fasta > v05_clean_uniq.fasta +obigrep -t TAXO \ + --require-rank species \ + --require-rank genus \ + --require-rank family \ + results/v05.ecopcr > results/v05_clean.fasta -> obigrep -d embl_last --require-rank=family \ - v05_clean_uniq.fasta > v05_clean_uniq_clean.fasta +obiuniq -c taxid \ + results/v05_clean.fasta \ + > results/v05_clean_uniq.fasta -> obiannotate --uniq-id v05_clean_uniq_clean.fasta > db_v05.fasta +obirefidx -t TAXO results/v05_clean_uniq.fasta \ + > results/v05_clean_uniq.indexed.fasta ``` -obirefidx -t TAXO wolf_data/db_v05_r117.fasta > results/db_v05_r117.indexed.fasta ::: warning @@ -485,8 +485,7 @@ Warning From now on, for the sake of clarity, the following commands will use the filenames of the files provided with the tutorial. If you decided to run the last steps and use the files you have produced, you\'ll have to -use `db_v05.fasta` instead of `db_v05_r117.fasta` and `embl_last` -instead of `embl_r117` +use `results/v05_clean_uniq.indexed.fasta` instead of `wolf_data/db_v05_r117.indexed.fasta`. ::: ### Assign each sequence to a taxon @@ -551,7 +550,23 @@ ttagccctaaacataagctattccataacaaaataattcgccagagaactactagcaaca gattaaacctcaaaggacttggcagtgctttatacccct ``` -This file contains 26 sequences. You can deduce the diet of each sample: +### Looking at the data in R + + +```{r} +library(ROBIFastread) +library(vegan) +library(magrittr) + + +diet_data <- read_obifasta("results/wolf.ali.assigned.simple.clean.c10.l80.taxo.fasta") +diet_data %<>% extract_features("obitag_bestmatch","obitag_rank","scientific_name",'taxid') + +diet_tab <- extract_readcount(diet_data,key="obiclean_weight") +diet_tab +``` + +This file contains 26 sequences. You can deduce the diet of each sample: : - 13a_F730603: Cervus elaphus - 15a_F730814: Capreolus capreolus diff --git a/pkg/obitools/obiannotate/options.go b/pkg/obitools/obiannotate/options.go index 9dbabf4..66c781f 100644 --- a/pkg/obitools/obiannotate/options.go +++ b/pkg/obitools/obiannotate/options.go @@ -17,9 +17,9 @@ var _setSeqLength = false var _uniqueID = false func SequenceAnnotationOptionSet(options *getoptions.GetOpt) { - options.BoolVar(&_addRank, "seq-rank", _addRank, - options.Description("Adds a new attribute named seq_rank to the sequence record indicating its entry number in the sequence file."), - ) + // options.BoolVar(&_addRank, "seq-rank", _addRank, + // options.Description("Adds a new attribute named seq_rank to the sequence record indicating its entry number in the sequence file."), + // ) options.BoolVar(&_clearAll, "clear", _clearAll, options.Description("Clears all attributes associated to the sequence records."), @@ -29,9 +29,10 @@ func SequenceAnnotationOptionSet(options *getoptions.GetOpt) { options.Description("Adds attribute with seq_length as a key and sequence length as a value."), ) - options.BoolVar(&_uniqueID, "uniq-id", _uniqueID, - options.Description("Forces sequence record ids to be unique."), - ) + // options.BoolVar(&_uniqueID, "uniq-id", _uniqueID, + // options.Description("Forces sequence record ids to be unique."), + // ) + options.StringMapVar(&_toBeRenamed, "rename-tag", 1, 1, options.Alias("R"), options.ArgName("NEW_NAME=OLD_NAME"), @@ -47,10 +48,10 @@ func SequenceAnnotationOptionSet(options *getoptions.GetOpt) { options.ArgName("RANK_NAME"), options.Description("Adds taxonomic annotation at taxonomic rank .")) - options.StringVar(&_tagList, "tag-list", _tagList, - options.ArgName("FILENAME"), - options.Description(" points to a file containing attribute names"+ - " and values to modify for specified sequence records.")) + // options.StringVar(&_tagList, "tag-list", _tagList, + // options.ArgName("FILENAME"), + // options.Description(" points to a file containing attribute names"+ + // " and values to modify for specified sequence records.")) options.StringSliceVar(&_keepOnly, "keep", 1, 1, options.Alias("k"), @@ -111,14 +112,8 @@ func CLIHasToBeKeptAttributes() bool { return len(_keepOnly) > 0 } -func CLIToBeKeptAttributes() map[string]bool { - d := make(map[string]bool, len(_keepOnly)) - - for _, v := range _keepOnly { - d[v] = true - } - - return d +func CLIToBeKeptAttributes() []string { + return _keepOnly } func CLIHasTaxonAtRank() bool { @@ -131,4 +126,8 @@ func CLITaxonAtRank() []string { func CLIHasSetLengthFlag() bool { return _setSeqLength +} + +func CLIHasClearAllFlag() bool { + return _clearAll } \ No newline at end of file