Compare commits
115 Commits
v3.0.0-bet
...
v3.0.1b3
Author | SHA1 | Date | |
---|---|---|---|
bd38449f2d | |||
904823c827 | |||
af68a1024c | |||
425fe25bd2 | |||
d48aed38d4 | |||
5e32f8523e | |||
8f1d94fd24 | |||
38f42cb0fb | |||
7f0f63cf26 | |||
cba78111c9 | |||
41fbae7b6c | |||
ad1fd3c341 | |||
fbf0f7dfb6 | |||
fda0edd0d8 | |||
382e37a6ae | |||
5cc3e29f75 | |||
a8e2aee281 | |||
13adb479d3 | |||
8ba7acdfe1 | |||
38051b1e4f | |||
52a2e21b38 | |||
d27a5b9115 | |||
20bd3350b4 | |||
2e191372d7 | |||
112e12cab0 | |||
b9b4cec5b5 | |||
199f3772e8 | |||
422a6450fa | |||
137c109f86 | |||
b6648ae81e | |||
f6dffbecfe | |||
c4696ac865 | |||
11a0945a9b | |||
f23c40c905 | |||
f99fc13b75 | |||
1da6aac1b8 | |||
159803b40a | |||
7dcbc34017 | |||
db2202c8b4 | |||
d33ff97846 | |||
1dcdf69f1f | |||
dec114eed6 | |||
f36691053b | |||
f2aa5fcf8b | |||
bccb3e6874 | |||
f5a17bea68 | |||
e28507639a | |||
e6feac93fe | |||
50b292b489 | |||
24a737aa55 | |||
8aa455ad8a | |||
46ca693ca9 | |||
9a9afde113 | |||
8dd403a118 | |||
9672f01c6a | |||
ed9549acfb | |||
9ace9989c4 | |||
a3ebe5f118 | |||
9100e14899 | |||
ccda0661ce | |||
aab59f2214 | |||
ade1107b42 | |||
9c7d24406f | |||
03bc9915f2 | |||
24b1dab573 | |||
7593673f3f | |||
aa01236cae | |||
49b8810a76 | |||
7a39df54c0 | |||
09e483b0d6 | |||
14a2579173 | |||
36a8aaa92e | |||
a17eb445c2 | |||
e4a32788c2 | |||
2442cc80bf | |||
aa836b2ace | |||
8776ce22e6 | |||
4aa772c405 | |||
b0b96ac37a | |||
687e42ad22 | |||
5fbbb6d304 | |||
359a9fe237 | |||
f9b6851f75 | |||
29a2652bbf | |||
2a2c233936 | |||
faf8ea9d86 | |||
ffe2485e94 | |||
6094ce2bbc | |||
a7dcf16c06 | |||
f13f8f6165 | |||
b5a29ac413 | |||
efd2b9d338 | |||
ca6e3e7aad | |||
76ed8e18e5 | |||
1d17f28aec | |||
fa834e4b8b | |||
a72fea3cc9 | |||
e9a37d8a6e | |||
ef074f8455 | |||
aec5e69f2c | |||
170ef3f1ba | |||
f999946582 | |||
773b36ec37 | |||
69cb434a6c | |||
55d4f98d60 | |||
0bec2631e8 | |||
e6b6c6fa84 | |||
974528b2e6 | |||
1b346b54f9 | |||
058f2ad8b3 | |||
60bfd3ae8d | |||
67bdee105a | |||
0f745e0113 | |||
da8de52ba4 | |||
4d36538c6e |
@ -6,7 +6,7 @@ recursive-include doc/sphinx/source *.txt *.rst *.py
|
||||
recursive-include doc/sphinx/sphinxext *.py
|
||||
include doc/sphinx/Makefile
|
||||
include doc/sphinx/Doxyfile
|
||||
include README.txt
|
||||
include README.md
|
||||
include requirements.txt
|
||||
include scripts/obi
|
||||
|
||||
|
@ -39,6 +39,12 @@ def __addImportInputOption(optionManager):
|
||||
const=b'fastq',
|
||||
help="Input file is in fastq format")
|
||||
|
||||
group.add_argument('--silva-input',
|
||||
action="store_const", dest="obi:inputformat",
|
||||
default=None,
|
||||
const=b'silva',
|
||||
help="Input file is in SILVA fasta format")
|
||||
|
||||
group.add_argument('--embl-input',
|
||||
action="store_const", dest="obi:inputformat",
|
||||
default=None,
|
||||
|
@ -30,12 +30,12 @@ cdef class ProgressBar:
|
||||
off_t maxi,
|
||||
dict config={},
|
||||
str head="",
|
||||
double seconde=0.1,
|
||||
double seconds=5,
|
||||
cut=False):
|
||||
|
||||
self.starttime = self.clock()
|
||||
self.lasttime = self.starttime
|
||||
self.tickcount = <clock_t> (seconde * CLOCKS_PER_SEC)
|
||||
self.tickcount = <clock_t> (seconds * CLOCKS_PER_SEC)
|
||||
self.freq = 1
|
||||
self.cycle = 0
|
||||
self.arrow = 0
|
||||
|
230
python/obitools3/commands/addtaxids.pyx
Executable file
230
python/obitools3/commands/addtaxids.pyx
Executable file
@ -0,0 +1,230 @@
|
||||
#cython: language_level=3
|
||||
|
||||
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view.view cimport View, Line_selection
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.dms.column.column cimport Column
|
||||
from functools import reduce
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes, str2bytes, tostr
|
||||
from io import BufferedWriter
|
||||
from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
|
||||
ID_COLUMN, \
|
||||
DEFINITION_COLUMN, \
|
||||
QUALITY_COLUMN, \
|
||||
COUNT_COLUMN, \
|
||||
TAXID_COLUMN
|
||||
from obitools3.dms.capi.obitypes cimport OBI_INT
|
||||
from obitools3.dms.capi.obitaxonomy cimport MIN_LOCAL_TAXID
|
||||
import time
|
||||
import math
|
||||
import sys
|
||||
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
|
||||
__title__="Annotate sequences with their corresponding NCBI taxid found from the taxon scientific name."
|
||||
|
||||
|
||||
|
||||
def addOptions(parser):
|
||||
|
||||
addMinimalInputOption(parser)
|
||||
addTaxonomyOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group=parser.add_argument_group('obi addtaxids specific options')
|
||||
|
||||
group.add_argument('-t', '--taxid-tag',
|
||||
action="store",
|
||||
dest="addtaxids:taxid_tag",
|
||||
metavar="<TAXID_TAG>",
|
||||
default=b"TAXID",
|
||||
help="Name of the tag to store the found taxid "
|
||||
"(default: 'TAXID'.")
|
||||
|
||||
group.add_argument('-n', '--taxon-name-tag',
|
||||
action="store",
|
||||
dest="addtaxids:taxon_name_tag",
|
||||
metavar="<SCIENTIFIC_NAME_TAG>",
|
||||
default=b"SCIENTIFIC_NAME",
|
||||
help="Name of the tag giving the scientific name of the taxon "
|
||||
"(default: 'SCIENTIFIC_NAME'.")
|
||||
|
||||
group.add_argument('-g', '--try-genus-match',
|
||||
action="store_true", dest="addtaxids:try_genus_match",
|
||||
default=False,
|
||||
help="Try matching the first word of <SCIENTIFIC_NAME_TAG> when can't find corresponding taxid for a taxon. "
|
||||
"If there is a match it is added in the 'parent_taxid' tag. (Can be used by 'obi taxonomy' to add the taxon under that taxid).")
|
||||
|
||||
group.add_argument('-a', '--restricting-ancestor',
|
||||
action="store",
|
||||
dest="addtaxids:restricting_ancestor",
|
||||
metavar="<RESTRICTING_ANCESTOR>",
|
||||
default=None,
|
||||
help="Enables to restrict the search of taxids under an ancestor specified by its taxid.")
|
||||
|
||||
group.add_argument('-l', '--log-file',
|
||||
action="store",
|
||||
dest="addtaxids:log_file",
|
||||
metavar="<LOG_FILE>",
|
||||
default='',
|
||||
help="Path to a log file to write informations about not found taxids.")
|
||||
|
||||
|
||||
def run(config):
|
||||
|
||||
DMS.obi_atexit()
|
||||
|
||||
logger("info", "obi addtaxids")
|
||||
|
||||
# Open the input
|
||||
input = open_uri(config['obi']['inputURI'])
|
||||
if input is None:
|
||||
raise Exception("Could not read input view")
|
||||
i_dms = input[0]
|
||||
i_view = input[1]
|
||||
i_view_name = input[1].name
|
||||
|
||||
# Open the output: only the DMS, as the output view is going to be created by cloning the input view
|
||||
# (could eventually be done via an open_uri() argument)
|
||||
output = open_uri(config['obi']['outputURI'],
|
||||
input=False,
|
||||
dms_only=True)
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
o_view_name = output[1]
|
||||
|
||||
# stdout output: create temporary view
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
i=0
|
||||
o_view_name = b"temp"
|
||||
while o_view_name in i_dms: # Making sure view name is unique in output DMS
|
||||
o_view_name = o_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
imported_view_name = o_view_name
|
||||
|
||||
# If the input and output DMS are not the same, import the input view in the output DMS before cloning it to modify it
|
||||
# (could be the other way around: clone and modify in the input DMS then import the new view in the output DMS)
|
||||
if i_dms != o_dms:
|
||||
imported_view_name = i_view_name
|
||||
i=0
|
||||
while imported_view_name in o_dms: # Making sure view name is unique in output DMS
|
||||
imported_view_name = i_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], i_view_name, imported_view_name)
|
||||
i_view = o_dms[imported_view_name]
|
||||
|
||||
# Clone output view from input view
|
||||
o_view = i_view.clone(o_view_name)
|
||||
if o_view is None:
|
||||
raise Exception("Couldn't create output view")
|
||||
i_view.close()
|
||||
|
||||
# Open taxonomy
|
||||
taxo_uri = open_uri(config['obi']['taxoURI'])
|
||||
if taxo_uri is None or taxo_uri[2] == bytes:
|
||||
raise Exception("Couldn't open taxonomy")
|
||||
taxo = taxo_uri[1]
|
||||
|
||||
# Initialize the progress bar
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(len(o_view), config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
try:
|
||||
if config['addtaxids']['log_file']:
|
||||
logfile = open(config['addtaxids']['log_file'], 'w')
|
||||
else:
|
||||
logfile = None
|
||||
if config['addtaxids']['try_genus_match']:
|
||||
try_genus = True
|
||||
else:
|
||||
try_genus = False
|
||||
if 'restricting_ancestor' in config['addtaxids']:
|
||||
res_anc = int(config['addtaxids']['restricting_ancestor'])
|
||||
else:
|
||||
res_anc = None
|
||||
taxid_column_name = config['addtaxids']['taxid_tag']
|
||||
parent_taxid_column_name = "PARENT_TAXID" # TODO macro
|
||||
taxon_name_column_name = config['addtaxids']['taxon_name_tag']
|
||||
taxid_column = Column.new_column(o_view, taxid_column_name, OBI_INT)
|
||||
parent_taxid_column = Column.new_column(o_view, parent_taxid_column_name, OBI_INT)
|
||||
taxon_name_column = o_view[taxon_name_column_name]
|
||||
|
||||
found_count = 0
|
||||
not_found_count = 0
|
||||
parent_found_count = 0
|
||||
|
||||
for i in range(len(o_view)):
|
||||
PyErr_CheckSignals()
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
taxon_name = taxon_name_column[i]
|
||||
taxon = taxo.get_taxon_by_name(taxon_name, res_anc)
|
||||
if taxon is not None:
|
||||
taxid_column[i] = taxon.taxid
|
||||
found_count+=1
|
||||
elif try_genus: # try finding genus or other parent taxon from the first word
|
||||
taxon_name_sp = taxon_name.split(b" ")
|
||||
taxon = taxo.get_taxon_by_name(taxon_name_sp[0], res_anc)
|
||||
if taxon is not None:
|
||||
parent_taxid_column[i] = taxon.taxid
|
||||
parent_found_count+=1
|
||||
if logfile:
|
||||
print("Found parent taxon for", tostr(taxon_name), file=logfile)
|
||||
else:
|
||||
not_found_count+=1
|
||||
if logfile:
|
||||
print("No taxid found for", tostr(taxon_name), file=logfile)
|
||||
else:
|
||||
not_found_count+=1
|
||||
if logfile:
|
||||
print("No taxid found for", tostr(taxon_name), file=logfile)
|
||||
|
||||
except Exception, e:
|
||||
raise RollbackException("obi addtaxids error, rollbacking view: "+str(e), o_view)
|
||||
|
||||
if pb is not None:
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
logger("info", "\nTaxids found: "+str(found_count)+"/"+str(len(o_view))+" ("+str(round(found_count*100.0/len(o_view), 2))+"%)")
|
||||
if config['addtaxids']['try_genus_match']:
|
||||
logger("info", "\nParent taxids found: "+str(parent_found_count)+"/"+str(len(o_view))+" ("+str(round(parent_found_count*100.0/len(o_view), 2))+"%)")
|
||||
logger("info", "\nTaxids not found: "+str(not_found_count)+"/"+str(len(o_view))+" ("+str(round(not_found_count*100.0/len(o_view), 2))+"%)")
|
||||
|
||||
# Save command config in View and DMS comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
input_dms_name=[input[0].name]
|
||||
input_view_name=[i_view_name]
|
||||
if 'taxoURI' in config['obi'] and config['obi']['taxoURI'] is not None:
|
||||
input_dms_name.append(config['obi']['taxoURI'].split("/")[-3])
|
||||
input_view_name.append("taxonomy/"+config['obi']['taxoURI'].split("/")[-1])
|
||||
o_view.write_config(config, "addtaxids", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_view), file=sys.stderr)
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(o_dms, imported_view_name)
|
||||
o_dms.close(force=True)
|
||||
i_dms.close(force=True)
|
||||
|
||||
logger("info", "Done.")
|
@ -4,7 +4,7 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view.view cimport View
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes, str2bytes
|
||||
@ -12,6 +12,9 @@ from obitools3.utils cimport tobytes, str2bytes
|
||||
from obitools3.dms.capi.obilcsalign cimport obi_lcs_align_one_column, \
|
||||
obi_lcs_align_two_columns
|
||||
|
||||
from io import BufferedWriter
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
import time
|
||||
import sys
|
||||
|
||||
@ -23,6 +26,7 @@ def addOptions(parser):
|
||||
|
||||
addMinimalInputOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group=parser.add_argument_group('obi align specific options')
|
||||
|
||||
@ -201,20 +205,20 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output")
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
o_dms_name = o_dms.name
|
||||
final_o_view_name = output[1]
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
# If the input and output DMS are not the same, align creating a temporary view in the input dms that will be exported to
|
||||
# If stdout output or the input and output DMS are not the same, align creating a temporary view in the input dms that will be exported to
|
||||
# the right DMS and deleted in the other afterwards.
|
||||
if i_dms != o_dms:
|
||||
temporary_view_name = final_o_view_name
|
||||
i=0
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = final_o_view_name+b"_"+str2bytes(str(i))
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
o_view_name = b"temp"
|
||||
while o_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
o_view_name = o_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
else:
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
# Save command config in View comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
@ -263,8 +267,15 @@ def run(config):
|
||||
View.delete_view(i_dms, i_view_name_2)
|
||||
i_dms_2.close()
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view = o_dms[o_view_name]
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary result view in the input DMS
|
||||
if i_dms != o_dms:
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
o_dms.close(force=True)
|
||||
|
||||
|
@ -6,7 +6,7 @@ from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
|
||||
from obitools3.dms.column.column cimport Column
|
||||
from obitools3.dms.capi.obiview cimport QUALITY_COLUMN
|
||||
from obitools3.dms.capi.obitypes cimport OBI_QUAL
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.libalign._qsassemble import QSolexaReverseAssemble
|
||||
@ -15,7 +15,9 @@ from obitools3.libalign._solexapairend import buildConsensus, buildJoinedSequenc
|
||||
from obitools3.dms.obiseq cimport Nuc_Seq
|
||||
from obitools3.libalign.shifted_ali cimport Kmer_similarity, Ali_shifted
|
||||
from obitools3.dms.capi.obiview cimport REVERSE_SEQUENCE_COLUMN, REVERSE_QUALITY_COLUMN
|
||||
from obitools3.utils cimport str2bytes
|
||||
|
||||
from io import BufferedWriter
|
||||
import sys
|
||||
import os
|
||||
|
||||
@ -29,6 +31,7 @@ def addOptions(parser):
|
||||
|
||||
addMinimalInputOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group = parser.add_argument_group('obi alignpairedend specific options')
|
||||
|
||||
@ -39,12 +42,13 @@ def addOptions(parser):
|
||||
type=str,
|
||||
help="URI to the reverse reads if they are in a different view than the forward reads")
|
||||
|
||||
group.add_argument('--score-min',
|
||||
action="store", dest="alignpairedend:smin",
|
||||
metavar="#.###",
|
||||
default=None,
|
||||
type=float,
|
||||
help="Minimum score for keeping alignments")
|
||||
# group.add_argument('--score-min',
|
||||
# action="store", dest="alignpairedend:smin",
|
||||
# metavar="#.###",
|
||||
# default=None,
|
||||
# type=float,
|
||||
# help="Minimum score for keeping alignments. "
|
||||
# "(for kmer alignment) The score is an approximation of the number of nucleotides matching in the overlap of the alignment.")
|
||||
|
||||
# group.add_argument('-A', '--true-ali',
|
||||
# action="store_true", dest="alignpairedend:trueali",
|
||||
@ -170,18 +174,29 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
|
||||
view = output[1]
|
||||
output_0 = output[0]
|
||||
o_dms = output[0]
|
||||
|
||||
Column.new_column(view, QUALITY_COLUMN, OBI_QUAL) #TODO output URI quality option?
|
||||
|
||||
if 'smin' in config['alignpairedend']:
|
||||
smin = config['alignpairedend']['smin']
|
||||
# stdout output: create temporary view
|
||||
if type(output_0)==BufferedWriter:
|
||||
i_dms = forward.dms # using any dms
|
||||
o_dms = i_dms
|
||||
i=0
|
||||
o_view_name = b"temp"
|
||||
while o_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
o_view_name = o_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view = View_NUC_SEQS.new(o_dms, o_view_name, quality=True)
|
||||
else:
|
||||
smin = 0
|
||||
o_view = output[1]
|
||||
Column.new_column(o_view, QUALITY_COLUMN, OBI_QUAL)
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(entries_len, config, seconde=5)
|
||||
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(entries_len, config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
#if config['alignpairedend']['trueali']:
|
||||
# kmer_ali = False
|
||||
# aligner = buildAlignment
|
||||
@ -190,34 +205,41 @@ def run(config):
|
||||
if type(entries) == list:
|
||||
forward = entries[0]
|
||||
reverse = entries[1]
|
||||
aligner = Kmer_similarity(forward, \
|
||||
view2=reverse, \
|
||||
kmer_size=config['alignpairedend']['kmersize'], \
|
||||
reversed_column=None)
|
||||
if len(forward) == 0 or len(reverse) == 0:
|
||||
aligner = None
|
||||
else:
|
||||
aligner = Kmer_similarity(forward, \
|
||||
view2=reverse, \
|
||||
kmer_size=config['alignpairedend']['kmersize'], \
|
||||
reversed_column=None)
|
||||
else:
|
||||
aligner = Kmer_similarity(entries, \
|
||||
column2=entries[REVERSE_SEQUENCE_COLUMN], \
|
||||
qual_column2=entries[REVERSE_QUALITY_COLUMN], \
|
||||
kmer_size=config['alignpairedend']['kmersize'], \
|
||||
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool
|
||||
if len(entries) == 0:
|
||||
aligner = None
|
||||
else:
|
||||
aligner = Kmer_similarity(entries, \
|
||||
column2=entries[REVERSE_SEQUENCE_COLUMN], \
|
||||
qual_column2=entries[REVERSE_QUALITY_COLUMN], \
|
||||
kmer_size=config['alignpairedend']['kmersize'], \
|
||||
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool
|
||||
|
||||
ba = alignmentIterator(entries, aligner)
|
||||
|
||||
|
||||
i = 0
|
||||
for ali in ba:
|
||||
|
||||
pb(i)
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
|
||||
PyErr_CheckSignals()
|
||||
|
||||
consensus = view[i]
|
||||
consensus = o_view[i]
|
||||
|
||||
if not two_views:
|
||||
seqF = entries[i]
|
||||
else:
|
||||
seqF = forward[i]
|
||||
|
||||
if ali.score > smin and ali.consensus_len > 0 :
|
||||
if ali.overlap_len > 0 :
|
||||
buildConsensus(ali, consensus, seqF)
|
||||
else:
|
||||
if not two_views:
|
||||
@ -225,32 +247,43 @@ def run(config):
|
||||
else:
|
||||
seqR = reverse[i]
|
||||
buildJoinedSequence(ali, seqR, consensus, forward=seqF)
|
||||
|
||||
consensus[b"smin"] = smin
|
||||
|
||||
|
||||
if kmer_ali :
|
||||
ali.free()
|
||||
|
||||
i+=1
|
||||
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
if pb is not None:
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
if kmer_ali :
|
||||
if kmer_ali and aligner is not None:
|
||||
aligner.free()
|
||||
|
||||
# Save command config in View and DMS comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
view.write_config(config, "alignpairedend", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||
output[0].record_command_line(command_line)
|
||||
o_view.write_config(config, "alignpairedend", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(view), file=sys.stderr)
|
||||
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
# If stdout output, delete the temporary imported view used to create the final file
|
||||
if type(output_0)==BufferedWriter:
|
||||
View_NUC_SEQS.delete_view(o_dms, o_view_name)
|
||||
output_0.close()
|
||||
|
||||
# Close all DMS
|
||||
input[0].close(force=True)
|
||||
if two_views:
|
||||
rinput[0].close(force=True)
|
||||
output[0].close(force=True)
|
||||
o_dms.close(force=True)
|
||||
|
||||
logger("info", "Done.")
|
||||
|
@ -4,16 +4,18 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view.view cimport View, Line_selection
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.dms.view import RollbackException
|
||||
from functools import reduce
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes, str2bytes
|
||||
from io import BufferedWriter
|
||||
from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
|
||||
ID_COLUMN, \
|
||||
DEFINITION_COLUMN, \
|
||||
QUALITY_COLUMN, \
|
||||
COUNT_COLUMN
|
||||
COUNT_COLUMN, \
|
||||
TAXID_COLUMN
|
||||
|
||||
import time
|
||||
import math
|
||||
@ -33,6 +35,7 @@ def addOptions(parser):
|
||||
addMinimalInputOption(parser)
|
||||
addTaxonomyOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group=parser.add_argument_group('obi annotate specific options')
|
||||
|
||||
@ -175,8 +178,8 @@ def sequenceTaggerGenerator(config, taxo=None):
|
||||
counter[0]+=1
|
||||
|
||||
for rank in annoteRank:
|
||||
if 'taxid' in seq:
|
||||
taxid = seq['taxid']
|
||||
if TAXID_COLUMN in seq:
|
||||
taxid = seq[TAXID_COLUMN]
|
||||
if taxid is not None:
|
||||
rtaxid = taxo.get_taxon_at_rank(taxid, rank)
|
||||
if rtaxid is not None:
|
||||
@ -190,58 +193,50 @@ def sequenceTaggerGenerator(config, taxo=None):
|
||||
seq['seq_rank']=counter[0]
|
||||
|
||||
for i,v in toSet:
|
||||
#try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
val = eval(v, environ, seq)
|
||||
#except Exception,e: # TODO discuss usefulness of this
|
||||
# if options.onlyValid:
|
||||
# raise e
|
||||
# val = v
|
||||
try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
val = eval(v, environ, seq)
|
||||
except Exception: # set string if not a valid expression
|
||||
val = v
|
||||
seq[i]=val
|
||||
|
||||
if length:
|
||||
seq['seq_length']=len(seq)
|
||||
|
||||
if newId is not None:
|
||||
# try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
val = eval(newId, environ, seq)
|
||||
# except Exception,e:
|
||||
# if options.onlyValid:
|
||||
# raise e
|
||||
# val = newId
|
||||
try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
val = eval(newId, environ, seq)
|
||||
except Exception: # set string if not a valid expression
|
||||
val = newId
|
||||
seq.id=val
|
||||
|
||||
if newDef is not None:
|
||||
# try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
val = eval(newDef, environ, seq)
|
||||
# except Exception,e:
|
||||
# if options.onlyValid:
|
||||
# raise e
|
||||
# val = newDef
|
||||
try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
val = eval(newDef, environ, seq)
|
||||
except Exception: # set string if not a valid expression
|
||||
val = newDef
|
||||
seq.definition=val
|
||||
#
|
||||
|
||||
if newSeq is not None:
|
||||
# try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
val = eval(newSeq, environ, seq)
|
||||
# except Exception,e:
|
||||
# if options.onlyValid:
|
||||
# raise e
|
||||
# val = newSeq
|
||||
try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
val = eval(newSeq, environ, seq)
|
||||
except Exception: # set string if not a valid expression
|
||||
val = newSeq
|
||||
seq.seq=val
|
||||
if 'seq_length' in seq:
|
||||
seq['seq_length']=len(seq)
|
||||
@ -251,15 +246,14 @@ def sequenceTaggerGenerator(config, taxo=None):
|
||||
seq.view.delete_column(QUALITY_COLUMN)
|
||||
|
||||
if run is not None:
|
||||
# try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
eval(run, environ, seq)
|
||||
# except Exception,e:
|
||||
# if options.onlyValid:
|
||||
# raise e
|
||||
try:
|
||||
if taxo is not None:
|
||||
environ = {'taxonomy' : taxo, 'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
else:
|
||||
environ = {'sequence':seq, 'counter':counter[0], 'math':math}
|
||||
eval(run, environ, seq)
|
||||
except Exception,e:
|
||||
raise e
|
||||
|
||||
return sequenceTagger
|
||||
|
||||
@ -286,8 +280,19 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
o_view_name = output[1]
|
||||
|
||||
|
||||
# stdout output: create temporary view
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
i=0
|
||||
o_view_name = b"temp"
|
||||
while o_view_name in i_dms: # Making sure view name is unique in output DMS
|
||||
o_view_name = o_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
imported_view_name = o_view_name
|
||||
|
||||
# If the input and output DMS are not the same, import the input view in the output DMS before cloning it to modify it
|
||||
# (could be the other way around: clone and modify in the input DMS then import the new view in the output DMS)
|
||||
if i_dms != o_dms:
|
||||
@ -298,7 +303,7 @@ def run(config):
|
||||
i+=1
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], i_view_name, imported_view_name)
|
||||
i_view = o_dms[imported_view_name]
|
||||
|
||||
|
||||
# Clone output view from input view
|
||||
o_view = i_view.clone(o_view_name)
|
||||
if o_view is None:
|
||||
@ -315,7 +320,10 @@ def run(config):
|
||||
taxo = None
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(len(o_view), config, seconde=5)
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(len(o_view), config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
try:
|
||||
|
||||
@ -354,14 +362,16 @@ def run(config):
|
||||
sequenceTagger = sequenceTaggerGenerator(config, taxo=taxo)
|
||||
for i in range(len(o_view)):
|
||||
PyErr_CheckSignals()
|
||||
pb(i)
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
sequenceTagger(o_view[i])
|
||||
|
||||
except Exception, e:
|
||||
raise RollbackException("obi annotate error, rollbacking view: "+str(e), o_view)
|
||||
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
if pb is not None:
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# Save command config in View and DMS comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
@ -371,13 +381,19 @@ def run(config):
|
||||
input_dms_name.append(config['obi']['taxoURI'].split("/")[-3])
|
||||
input_view_name.append("taxonomy/"+config['obi']['taxoURI'].split("/")[-1])
|
||||
o_view.write_config(config, "annotate", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||
output[0].record_command_line(command_line)
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_view), file=sys.stderr)
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms:
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(o_dms, imported_view_name)
|
||||
o_dms.close(force=True)
|
||||
i_dms.close(force=True)
|
||||
|
@ -4,14 +4,16 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms.dms cimport DMS
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.dms.capi.build_reference_db cimport build_reference_db
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes, str2bytes
|
||||
from obitools3.dms.view.view cimport View
|
||||
from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
|
||||
|
||||
from io import BufferedWriter
|
||||
import sys
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
|
||||
__title__="Tag a set of sequences for PCR and sequencing errors identification"
|
||||
@ -22,6 +24,7 @@ def addOptions(parser):
|
||||
addMinimalInputOption(parser)
|
||||
addTaxonomyOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group = parser.add_argument_group('obi build_ref_db specific options')
|
||||
|
||||
@ -56,17 +59,20 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output")
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
final_o_view_name = output[1]
|
||||
|
||||
# If the input and output DMS are not the same, build the database creating a temporary view that will be exported to
|
||||
# If stdout output or the input and output DMS are not the same, build the database creating a temporary view that will be exported to
|
||||
# the right DMS and deleted in the other afterwards.
|
||||
if i_dms != o_dms:
|
||||
temporary_view_name = final_o_view_name
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
temporary_view_name = b"temp"
|
||||
i=0
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = final_o_view_name+b"_"+str2bytes(str(i))
|
||||
temporary_view_name = temporary_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
else:
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
@ -80,22 +86,29 @@ def run(config):
|
||||
input_dms_name.append(config['obi']['taxoURI'].split("/")[-3])
|
||||
input_view_name.append("taxonomy/"+config['obi']['taxoURI'].split("/")[-1])
|
||||
comments = View.print_config(config, "build_ref_db", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||
|
||||
|
||||
if build_reference_db(i_dms.name_with_full_path, tobytes(i_view_name), tobytes(taxonomy_name), tobytes(o_view_name), comments, config['build_ref_db']['threshold']) < 0:
|
||||
raise Exception("Error building a reference database")
|
||||
|
||||
# If the input and output DMS are not the same, export result view to output DMS
|
||||
if i_dms != o_dms:
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, final_o_view_name)
|
||||
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view = o_dms[o_view_name]
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
# Save command config in DMS comments
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_dms[final_o_view_name]), file=sys.stderr)
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary result view in the input DMS
|
||||
if i_dms != o_dms:
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
o_dms.close(force=True)
|
||||
|
||||
|
@ -4,7 +4,7 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view.view cimport View
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.optiongroups import addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport str2bytes
|
||||
@ -15,6 +15,7 @@ from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, REVERSE_SEQUENCE_CO
|
||||
from obitools3.dms.capi.obitypes cimport OBI_SEQ, OBI_QUAL
|
||||
from obitools3.dms.column.column cimport Column
|
||||
|
||||
from io import BufferedWriter
|
||||
import time
|
||||
import sys
|
||||
|
||||
@ -27,6 +28,7 @@ __title__="Concatenate views."
|
||||
def addOptions(parser):
|
||||
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group=parser.add_argument_group('obi cat specific options')
|
||||
|
||||
@ -46,9 +48,9 @@ def run(config):
|
||||
|
||||
logger("info", "obi cat")
|
||||
|
||||
# Open the views to concatenate
|
||||
iview_list = []
|
||||
# Check the views to concatenate
|
||||
idms_list = []
|
||||
iview_list = []
|
||||
total_len = 0
|
||||
remove_qual = False
|
||||
remove_rev_qual = False
|
||||
@ -66,8 +68,9 @@ def run(config):
|
||||
if REVERSE_QUALITY_COLUMN not in i_view: # same as above for reverse quality
|
||||
remove_rev_qual = True
|
||||
total_len += len(i_view)
|
||||
iview_list.append(i_view)
|
||||
idms_list.append(i_dms)
|
||||
iview_list.append(i_view.name)
|
||||
i_view.close()
|
||||
|
||||
# Open the output: only the DMS
|
||||
output = open_uri(config['obi']['outputURI'],
|
||||
@ -76,57 +79,79 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
o_view = output[1]
|
||||
|
||||
# stdout output
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
|
||||
# Initialize quality columns and their associated sequence columns if needed
|
||||
if not remove_qual:
|
||||
if NUC_SEQUENCE_COLUMN not in o_view:
|
||||
Column.new_column(o_view, NUC_SEQUENCE_COLUMN, OBI_SEQ)
|
||||
Column.new_column(o_view, QUALITY_COLUMN, OBI_QUAL, associated_column_name=NUC_SEQUENCE_COLUMN, associated_column_version=o_view[NUC_SEQUENCE_COLUMN].version)
|
||||
if not remove_rev_qual:
|
||||
Column.new_column(o_view, REVERSE_SEQUENCE_COLUMN, OBI_SEQ)
|
||||
Column.new_column(o_view, REVERSE_QUALITY_COLUMN, OBI_QUAL, associated_column_name=REVERSE_SEQUENCE_COLUMN, associated_column_version=o_view[REVERSE_SEQUENCE_COLUMN].version)
|
||||
if type(output_0) != BufferedWriter:
|
||||
if not remove_qual:
|
||||
if NUC_SEQUENCE_COLUMN not in o_view:
|
||||
Column.new_column(o_view, NUC_SEQUENCE_COLUMN, OBI_SEQ)
|
||||
Column.new_column(o_view, QUALITY_COLUMN, OBI_QUAL, associated_column_name=NUC_SEQUENCE_COLUMN, associated_column_version=o_view[NUC_SEQUENCE_COLUMN].version)
|
||||
if not remove_rev_qual:
|
||||
Column.new_column(o_view, REVERSE_SEQUENCE_COLUMN, OBI_SEQ)
|
||||
Column.new_column(o_view, REVERSE_QUALITY_COLUMN, OBI_QUAL, associated_column_name=REVERSE_SEQUENCE_COLUMN, associated_column_version=o_view[REVERSE_SEQUENCE_COLUMN].version)
|
||||
|
||||
# Initialize multiple elements columns
|
||||
dict_cols = {}
|
||||
for v in iview_list:
|
||||
for coln in v.keys():
|
||||
if v[coln].nb_elements_per_line > 1:
|
||||
if coln not in dict_cols:
|
||||
dict_cols[coln] = {}
|
||||
dict_cols[coln]['eltnames'] = set(v[coln].elements_names)
|
||||
dict_cols[coln]['nbelts'] = v[coln].nb_elements_per_line
|
||||
dict_cols[coln]['obitype'] = v[coln].data_type_int
|
||||
else:
|
||||
dict_cols[coln]['eltnames'] = set(v[coln].elements_names + list(dict_cols[coln]['eltnames']))
|
||||
dict_cols[coln]['nbelts'] = len(dict_cols[coln]['eltnames'])
|
||||
for coln in dict_cols:
|
||||
Column.new_column(o_view, coln, dict_cols[coln]['obitype'],
|
||||
nb_elements_per_line=dict_cols[coln]['nbelts'], elements_names=list(dict_cols[coln]['eltnames']))
|
||||
if type(output_0)==BufferedWriter:
|
||||
dict_cols = {}
|
||||
for v_uri in config["cat"]["views_to_cat"]:
|
||||
v = open_uri(v_uri)[1]
|
||||
for coln in v.keys():
|
||||
col = v[coln]
|
||||
if v[coln].nb_elements_per_line > 1:
|
||||
if coln not in dict_cols:
|
||||
dict_cols[coln] = {}
|
||||
dict_cols[coln]['eltnames'] = set(v[coln].elements_names)
|
||||
dict_cols[coln]['nbelts'] = v[coln].nb_elements_per_line
|
||||
dict_cols[coln]['obitype'] = v[coln].data_type_int
|
||||
else:
|
||||
dict_cols[coln]['eltnames'] = set(v[coln].elements_names + list(dict_cols[coln]['eltnames']))
|
||||
dict_cols[coln]['nbelts'] = len(dict_cols[coln]['eltnames'])
|
||||
v.close()
|
||||
for coln in dict_cols:
|
||||
Column.new_column(o_view, coln, dict_cols[coln]['obitype'],
|
||||
nb_elements_per_line=dict_cols[coln]['nbelts'], elements_names=list(dict_cols[coln]['eltnames']), dict_column=True)
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(total_len, config, seconde=5)
|
||||
if not config['obi']['noprogressbar']:
|
||||
pb = ProgressBar(total_len, config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
i = 0
|
||||
for v in iview_list:
|
||||
for l in v:
|
||||
for v_uri in config["cat"]["views_to_cat"]:
|
||||
v = open_uri(v_uri)[1]
|
||||
for entry in v:
|
||||
PyErr_CheckSignals()
|
||||
pb(i)
|
||||
o_view[i] = l
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
if type(output_0)==BufferedWriter:
|
||||
rep = repr(entry)
|
||||
output_0.write(str2bytes(rep)+b"\n")
|
||||
else:
|
||||
o_view[i] = entry
|
||||
i+=1
|
||||
v.close()
|
||||
|
||||
# Deletes quality columns if needed
|
||||
if QUALITY_COLUMN in o_view and remove_qual :
|
||||
o_view.delete_column(QUALITY_COLUMN)
|
||||
if REVERSE_QUALITY_COLUMN in o_view and remove_rev_qual :
|
||||
o_view.delete_column(REVERSE_QUALITY_COLUMN)
|
||||
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
if type(output_0)!=BufferedWriter:
|
||||
if QUALITY_COLUMN in o_view and remove_qual :
|
||||
o_view.delete_column(QUALITY_COLUMN)
|
||||
if REVERSE_QUALITY_COLUMN in o_view and remove_rev_qual :
|
||||
o_view.delete_column(REVERSE_QUALITY_COLUMN)
|
||||
|
||||
if pb is not None:
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# Save command config in DMS comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
o_view.write_config(config, "cat", command_line, input_dms_name=[d.name for d in idms_list], input_view_name=[v.name for v in iview_list])
|
||||
o_view.write_config(config, "cat", command_line, input_dms_name=[d.name for d in idms_list], input_view_name=[vname for vname in iview_list])
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
|
@ -4,13 +4,14 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms.dms cimport DMS
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.dms.capi.obiclean cimport obi_clean
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes, str2bytes
|
||||
from obitools3.dms.view.view cimport View
|
||||
from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
|
||||
|
||||
from io import BufferedWriter
|
||||
import sys
|
||||
|
||||
|
||||
@ -21,7 +22,8 @@ def addOptions(parser):
|
||||
|
||||
addMinimalInputOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group = parser.add_argument_group('obi clean specific options')
|
||||
|
||||
group.add_argument('--distance', '-d',
|
||||
@ -36,8 +38,7 @@ def addOptions(parser):
|
||||
dest="clean:sample-tag-name",
|
||||
metavar="<SAMPLE TAG NAME>",
|
||||
type=str,
|
||||
default="merged_sample",
|
||||
help="Name of the tag where sample counts are kept.")
|
||||
help="Name of the tag where merged sample count informations are kept (typically generated by obi uniq, usually MERGED_sample, default: None).")
|
||||
|
||||
group.add_argument('--ratio', '-r',
|
||||
action="store", dest="clean:ratio",
|
||||
@ -89,17 +90,20 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output")
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
final_o_view_name = output[1]
|
||||
|
||||
# If the input and output DMS are not the same, run obiclean creating a temporary view that will be exported to
|
||||
# If stdout output or the input and output DMS are not the same, create a temporary view that will be exported to
|
||||
# the right DMS and deleted in the other afterwards.
|
||||
if i_dms != o_dms:
|
||||
temporary_view_name = final_o_view_name
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
temporary_view_name = b"temp"
|
||||
i=0
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = final_o_view_name+b"_"+str2bytes(str(i))
|
||||
temporary_view_name = temporary_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
else:
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
@ -107,6 +111,9 @@ def run(config):
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
comments = View.print_config(config, "clean", command_line, input_dms_name=[i_dms_name], input_view_name=[i_view_name])
|
||||
|
||||
if 'sample-tag-name' not in config['clean']:
|
||||
config['clean']['sample-tag-name'] = ""
|
||||
|
||||
if obi_clean(i_dms.name_with_full_path, tobytes(i_view_name), tobytes(config['clean']['sample-tag-name']), tobytes(o_view_name), comments, \
|
||||
config['clean']['distance'], config['clean']['ratio'], config['clean']['heads-only'], config['clean']['thread-count']) < 0:
|
||||
raise Exception("Error running obiclean")
|
||||
@ -114,15 +121,22 @@ def run(config):
|
||||
# If the input and output DMS are not the same, export result view to output DMS
|
||||
if i_dms != o_dms:
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, final_o_view_name)
|
||||
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view = o_dms[o_view_name]
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
# Save command config in DMS comments
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_dms[final_o_view_name]), file=sys.stderr)
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary result view in the input DMS
|
||||
if i_dms != o_dms:
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
o_dms.close(force=True)
|
||||
|
||||
|
@ -22,7 +22,7 @@ def addOptions(parser):
|
||||
group.add_argument('-s','--sequence',
|
||||
action="store_true", dest="count:sequence",
|
||||
default=False,
|
||||
help="Prints only the number of sequence records.")
|
||||
help="Prints only the number of sequence records (much faster, default: False).")
|
||||
|
||||
group.add_argument('-a','--all',
|
||||
action="store_true", dest="count:all",
|
||||
|
@ -5,10 +5,10 @@ from obitools3.dms.dms cimport DMS
|
||||
from obitools3.dms.capi.obidms cimport OBIDMS_p
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.dms.capi.obiecopcr cimport obi_ecopcr
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addTaxonomyOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addTaxonomyOption, addNoProgressBarOption
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes
|
||||
from obitools3.utils cimport tobytes, str2bytes
|
||||
from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
|
||||
from obitools3.dms.view import View
|
||||
|
||||
@ -16,6 +16,7 @@ from libc.stdlib cimport malloc, free
|
||||
from libc.stdint cimport int32_t
|
||||
|
||||
import sys
|
||||
from io import BufferedWriter
|
||||
|
||||
|
||||
__title__="in silico PCR"
|
||||
@ -27,6 +28,7 @@ def addOptions(parser):
|
||||
addMinimalInputOption(parser)
|
||||
addTaxonomyOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
|
||||
group = parser.add_argument_group('obi ecopcr specific options')
|
||||
@ -35,12 +37,14 @@ def addOptions(parser):
|
||||
action="store", dest="ecopcr:primer1",
|
||||
metavar='<PRIMER>',
|
||||
type=str,
|
||||
required=True,
|
||||
help="Forward primer, length must be less than or equal to 32")
|
||||
|
||||
group.add_argument('--primer2', '-R',
|
||||
action="store", dest="ecopcr:primer2",
|
||||
metavar='<PRIMER>',
|
||||
type=str,
|
||||
required=True,
|
||||
help="Reverse primer, length must be less than or equal to 32")
|
||||
|
||||
group.add_argument('--error', '-e',
|
||||
@ -167,11 +171,20 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output")
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
o_dms_name = output[0].name
|
||||
o_view_name = output[1]
|
||||
|
||||
# Read taxonomy name
|
||||
taxonomy_name = config['obi']['taxoURI'].split("/")[-1] # Robust in theory
|
||||
|
||||
# If stdout output create a temporary view in the input dms that will be deleted afterwards.
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
o_view_name = b"temp"
|
||||
while o_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
o_view_name = o_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
|
||||
# Save command config in View comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
@ -199,10 +212,21 @@ def run(config):
|
||||
|
||||
free(restrict_to_taxids_p)
|
||||
free(ignore_taxids_p)
|
||||
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view = o_dms[o_view_name]
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_dms[o_view_name]), file=sys.stderr)
|
||||
|
||||
# If stdout output, delete the temporary result view in the input DMS
|
||||
if type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
|
||||
i_dms.close(force=True)
|
||||
o_dms.close(force=True)
|
||||
|
||||
|
@ -4,7 +4,7 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms.dms cimport DMS
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.dms.capi.obiecotag cimport obi_ecotag
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes, str2bytes
|
||||
@ -12,6 +12,7 @@ from obitools3.dms.view.view cimport View
|
||||
from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
|
||||
|
||||
import sys
|
||||
from io import BufferedWriter
|
||||
|
||||
|
||||
__title__="Taxonomic assignment of sequences"
|
||||
@ -22,6 +23,7 @@ def addOptions(parser):
|
||||
addMinimalInputOption(parser)
|
||||
addTaxonomyOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group = parser.add_argument_group('obi ecotag specific options')
|
||||
|
||||
@ -39,6 +41,17 @@ def addOptions(parser):
|
||||
help="Minimum identity to consider for assignment, as a normalized identity, e.g. 0.95 for an identity of 95%%. "
|
||||
"Default: 0.00 (no threshold).")
|
||||
|
||||
group.add_argument('--minimum-circle','-c',
|
||||
action="store", dest="ecotag:bubble_threshold",
|
||||
metavar='<CIRCLE_THRESHOLD>',
|
||||
default=0.99,
|
||||
type=float,
|
||||
help="Minimum identity considered for the assignment circle "
|
||||
"(sequence is assigned to the LCA of all sequences within a similarity circle of the best matches; "
|
||||
"the threshold for this circle is the highest value between <CIRCLE_THRESHOLD> and the best assignment score found for the query sequence). "
|
||||
"Give value as a normalized identity, e.g. 0.95 for an identity of 95%%. "
|
||||
"Default: 0.99.")
|
||||
|
||||
def run(config):
|
||||
|
||||
DMS.obi_atexit()
|
||||
@ -64,9 +77,8 @@ def run(config):
|
||||
ref_view_name = ref[1]
|
||||
|
||||
# Check that the threshold demanded is greater than or equal to the threshold used to build the reference database
|
||||
if config['ecotag']['threshold'] < eval(ref_dms[ref_view_name].comments["ref_db_threshold"]) :
|
||||
print("Error: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).",
|
||||
config['ecotag']['threshold'], ref_dms[ref_view_name].comments["ref_db_threshold"])
|
||||
if config['ecotag']['bubble_threshold'] < eval(ref_dms[ref_view_name].comments["ref_db_threshold"]) :
|
||||
raise Exception(f"Error: The threshold demanded ({config['ecotag']['bubble_threshold']}) is lower than the threshold used to build the reference database ({float(ref_dms[ref_view_name].comments['ref_db_threshold'])}).")
|
||||
|
||||
# Open the output: only the DMS
|
||||
output = open_uri(config['obi']['outputURI'],
|
||||
@ -75,17 +87,19 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output")
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
final_o_view_name = output[1]
|
||||
|
||||
# If the input and output DMS are not the same, run ecotag creating a temporary view that will be exported to
|
||||
# the right DMS and deleted in the other afterwards.
|
||||
if i_dms != o_dms:
|
||||
temporary_view_name = final_o_view_name
|
||||
# If stdout output or the input and output DMS are not the same, create a temporary view that will be exported and deleted.
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
temporary_view_name = b"temp"
|
||||
i=0
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = final_o_view_name+b"_"+str2bytes(str(i))
|
||||
temporary_view_name = temporary_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
else:
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
@ -107,10 +121,11 @@ def run(config):
|
||||
comments = View.print_config(config, "ecotag", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||
|
||||
if obi_ecotag(i_dms.name_with_full_path, tobytes(i_view_name), \
|
||||
tobytes(ref_dms_name), tobytes(ref_view_name), \
|
||||
tobytes(taxo_dms_name), tobytes(taxonomy_name), \
|
||||
tobytes(o_view_name), comments,
|
||||
config['ecotag']['threshold']) < 0:
|
||||
ref_dms.name_with_full_path, tobytes(ref_view_name), \
|
||||
taxo_dms.name_with_full_path, tobytes(taxonomy_name), \
|
||||
tobytes(o_view_name), comments, \
|
||||
config['ecotag']['threshold'], \
|
||||
config['ecotag']['bubble_threshold']) < 0:
|
||||
raise Exception("Error running ecotag")
|
||||
|
||||
# If the input and output DMS are not the same, export result view to output DMS
|
||||
@ -120,11 +135,18 @@ def run(config):
|
||||
# Save command config in DMS comments
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view = o_dms[o_view_name]
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_dms[final_o_view_name]), file=sys.stderr)
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary result view in the input DMS
|
||||
if i_dms != o_dms:
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
o_dms.close(force=True)
|
||||
|
||||
|
@ -59,13 +59,23 @@ def run(config):
|
||||
# Check that the input view has the type NUC_SEQS if needed # TODO discuss, maybe bool property
|
||||
if (output[2] == Nuc_Seq) and (iview.type != b"NUC_SEQS_VIEW") : # Nuc_Seq_Stored? TODO
|
||||
raise Exception("Error: the view to export in fasta or fastq format is not a NUC_SEQS view")
|
||||
|
||||
|
||||
if config['obi']['only'] is not None:
|
||||
withoutskip = min(input[4], config['obi']['only'])
|
||||
else:
|
||||
withoutskip = input[4]
|
||||
|
||||
if config['obi']['skip'] is not None:
|
||||
skip = min(input[4], config['obi']['skip'])
|
||||
else:
|
||||
skip = 0
|
||||
|
||||
# Initialize the progress bar
|
||||
if config['obi']['noprogressbar']:
|
||||
pb = None
|
||||
else:
|
||||
pb = ProgressBar(len(iview), config, seconde=5)
|
||||
|
||||
pb = ProgressBar(withoutskip - skip, config)
|
||||
|
||||
i=0
|
||||
for seq in iview :
|
||||
PyErr_CheckSignals()
|
||||
@ -79,7 +89,7 @@ def run(config):
|
||||
|
||||
if pb is not None:
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# TODO save command in input dms?
|
||||
|
||||
|
@ -4,7 +4,7 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view.view cimport View, Line_selection
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes, str2bytes
|
||||
@ -14,6 +14,7 @@ import time
|
||||
import re
|
||||
import sys
|
||||
import ast
|
||||
from io import BufferedWriter
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
|
||||
@ -28,6 +29,7 @@ def addOptions(parser):
|
||||
addMinimalInputOption(parser)
|
||||
addTaxonomyOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group=parser.add_argument_group("obi grep specific options")
|
||||
|
||||
@ -161,8 +163,7 @@ def obi_eval(compiled_expr, loc_env, line):
|
||||
return obi_eval_result
|
||||
|
||||
|
||||
def Filter_generator(options, tax_filter):
|
||||
#taxfilter = taxonomyFilterGenerator(options)
|
||||
def Filter_generator(options, tax_filter, i_view):
|
||||
|
||||
# Initialize conditions
|
||||
predicates = None
|
||||
@ -171,6 +172,9 @@ def Filter_generator(options, tax_filter):
|
||||
attributes = None
|
||||
if "attributes" in options and len(options["attributes"]) > 0:
|
||||
attributes = options["attributes"]
|
||||
for attribute in attributes:
|
||||
if attribute not in i_view:
|
||||
return None
|
||||
lmax = None
|
||||
if "lmax" in options:
|
||||
lmax = options["lmax"]
|
||||
@ -180,7 +184,7 @@ def Filter_generator(options, tax_filter):
|
||||
invert_selection = options["invert_selection"]
|
||||
id_set = None
|
||||
if "id_list" in options:
|
||||
id_set = set(x.strip() for x in open(options["id_list"]))
|
||||
id_set = set(x.strip() for x in open(options["id_list"], 'rb'))
|
||||
|
||||
# Initialize the regular expression patterns
|
||||
seq_pattern = None
|
||||
@ -196,6 +200,8 @@ def Filter_generator(options, tax_filter):
|
||||
if "attribute_patterns" in options and len(options["attribute_patterns"]) > 0:
|
||||
for p in options["attribute_patterns"]:
|
||||
attribute, pattern = p.split(":", 1)
|
||||
if attribute not in i_view:
|
||||
return None
|
||||
attribute_patterns[tobytes(attribute)] = re.compile(tobytes(pattern))
|
||||
|
||||
def filter(line, loc_env):
|
||||
@ -300,16 +306,21 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
o_dms = output[0]
|
||||
o_view_name_final = output[1]
|
||||
o_view_name = o_view_name_final
|
||||
|
||||
# If the input and output DMS are not the same, create output view in input DMS first, then export it
|
||||
# to output DMS, making sure the temporary view name is unique in the input DMS
|
||||
if i_dms != o_dms:
|
||||
output_0 = output[0]
|
||||
final_o_view_name = output[1]
|
||||
|
||||
# If stdout output or the input and output DMS are not the same, create a temporary view that will be exported and deleted afterwards.
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
temporary_view_name = b"temp"
|
||||
i=0
|
||||
while o_view_name in i_dms:
|
||||
o_view_name = o_view_name_final+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = temporary_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
else:
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
if 'taxoURI' in config['obi'] and config['obi']['taxoURI'] is not None:
|
||||
taxo_uri = open_uri(config["obi"]["taxoURI"])
|
||||
@ -320,33 +331,49 @@ def run(config):
|
||||
taxo = None
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(len(i_view), config, seconde=5)
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(len(i_view), config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
# Apply filter
|
||||
tax_filter = Taxonomy_filter_generator(taxo, config["grep"])
|
||||
filter = Filter_generator(config["grep"], tax_filter)
|
||||
filter = Filter_generator(config["grep"], tax_filter, i_view)
|
||||
selection = Line_selection(i_view)
|
||||
for i in range(len(i_view)):
|
||||
PyErr_CheckSignals()
|
||||
pb(i)
|
||||
line = i_view[i]
|
||||
|
||||
loc_env = {"sequence": line, "line": line, "taxonomy": taxo, "obi_eval_result": False}
|
||||
|
||||
good = filter(line, loc_env)
|
||||
|
||||
if good :
|
||||
|
||||
if filter is None and config["grep"]["invert_selection"]: # all sequences are selected: filter is None if no line will be selected because some columns don't exist
|
||||
for i in range(len(i_view)):
|
||||
PyErr_CheckSignals()
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
selection.append(i)
|
||||
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
elif filter is not None : # filter is None if no line will be selected because some columns don't exist
|
||||
for i in range(len(i_view)):
|
||||
PyErr_CheckSignals()
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
line = i_view[i]
|
||||
|
||||
loc_env = {"sequence": line, "line": line, "taxonomy": taxo, "obi_eval_result": False}
|
||||
|
||||
good = filter(line, loc_env)
|
||||
|
||||
if good :
|
||||
selection.append(i)
|
||||
|
||||
if pb is not None:
|
||||
pb(len(i_view), force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# Create output view with the line selection
|
||||
try:
|
||||
o_view = selection.materialize(o_view_name)
|
||||
except Exception, e:
|
||||
raise RollbackException("obi grep error, rollbacking view: "+str(e), o_view)
|
||||
|
||||
|
||||
logger("info", "Grepped %d entries" % len(o_view))
|
||||
|
||||
# Save command config in View and DMS comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
input_dms_name=[input[0].name]
|
||||
@ -361,14 +388,20 @@ def run(config):
|
||||
# and delete the temporary view in the input DMS
|
||||
if i_dms != o_dms:
|
||||
o_view.close()
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, o_view_name_final)
|
||||
o_view = o_dms[o_view_name_final]
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, final_o_view_name)
|
||||
o_view = o_dms[final_o_view_name]
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_view), file=sys.stderr)
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms:
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
o_dms.close(force=True)
|
||||
i_dms.close(force=True)
|
||||
|
@ -4,14 +4,15 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view.view cimport View, Line_selection
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport str2bytes
|
||||
|
||||
import time
|
||||
import sys
|
||||
|
||||
from io import BufferedWriter
|
||||
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
|
||||
@ -22,6 +23,7 @@ def addOptions(parser):
|
||||
|
||||
addMinimalInputOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group=parser.add_argument_group('obi head specific options')
|
||||
|
||||
@ -53,31 +55,41 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
o_dms = output[0]
|
||||
o_view_name_final = output[1]
|
||||
o_view_name = o_view_name_final
|
||||
output_0 = output[0]
|
||||
final_o_view_name = output[1]
|
||||
|
||||
# If the input and output DMS are not the same, create output view in input DMS first, then export it
|
||||
# to output DMS, making sure the temporary view name is unique in the input DMS
|
||||
if i_dms != o_dms:
|
||||
# If stdout output or the input and output DMS are not the same, create a temporary view that will be exported and deleted.
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
temporary_view_name = b"temp"
|
||||
i=0
|
||||
while o_view_name in i_dms:
|
||||
o_view_name = o_view_name_final+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = temporary_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
else:
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
n = min(config['head']['count'], len(i_view))
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(n, config, seconde=5)
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(n, config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
selection = Line_selection(i_view)
|
||||
|
||||
for i in range(n):
|
||||
PyErr_CheckSignals()
|
||||
pb(i)
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
selection.append(i)
|
||||
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
if pb is not None:
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# Create output view with the line selection
|
||||
try:
|
||||
@ -94,14 +106,20 @@ def run(config):
|
||||
# and delete the temporary view in the input DMS
|
||||
if i_dms != o_dms:
|
||||
o_view.close()
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, o_view_name_final)
|
||||
o_view = o_dms[o_view_name_final]
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, final_o_view_name)
|
||||
o_view = o_dms[final_o_view_name]
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(view), file=sys.stderr)
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms:
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
o_dms.close(force=True)
|
||||
i_dms.close(force=True)
|
||||
|
@ -25,13 +25,16 @@ from obitools3.dms.capi.obiview cimport VIEW_TYPE_NUC_SEQS, \
|
||||
DEFINITION_COLUMN, \
|
||||
QUALITY_COLUMN, \
|
||||
COUNT_COLUMN, \
|
||||
TAXID_COLUMN
|
||||
TAXID_COLUMN, \
|
||||
MERGED_PREFIX, \
|
||||
SCIENTIFIC_NAME_COLUMN
|
||||
|
||||
from obitools3.dms.capi.obidms cimport obi_import_view
|
||||
|
||||
from obitools3.dms.capi.obitypes cimport obitype_t, \
|
||||
OBI_VOID, \
|
||||
OBI_QUAL
|
||||
OBI_QUAL, \
|
||||
OBI_STR
|
||||
|
||||
from obitools3.dms.capi.obierrno cimport obi_errno
|
||||
|
||||
@ -46,6 +49,8 @@ from obitools3.apps.config import logger
|
||||
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
from io import BufferedWriter
|
||||
|
||||
|
||||
__title__="Imports sequences from different formats into a DMS"
|
||||
|
||||
@ -72,8 +77,13 @@ def addOptions(parser):
|
||||
action="store_true", dest="import:preread",
|
||||
default=False,
|
||||
help="Do a first readthrough of the dataset if it contains huge dictionaries (more than 100 keys) for "
|
||||
"a much faster import.")
|
||||
"a much faster import. This option is not recommended and will slow down the import in any other case.")
|
||||
|
||||
group.add_argument('--space-priority',
|
||||
action="store_true", dest="import:space_priority",
|
||||
default=False,
|
||||
help="If importing a view into another DMS, do it by importing each line, saving disk space if the original view "
|
||||
"has a line selection associated.")
|
||||
|
||||
def run(config):
|
||||
|
||||
@ -86,6 +96,7 @@ def run(config):
|
||||
cdef obitype_t new_type
|
||||
cdef bint get_quality
|
||||
cdef bint NUC_SEQS_view
|
||||
cdef bint silva
|
||||
cdef int nb_elts
|
||||
cdef object d
|
||||
cdef View view
|
||||
@ -96,6 +107,8 @@ def run(config):
|
||||
cdef Column seq_col
|
||||
cdef Column qual_col
|
||||
cdef Column old_column
|
||||
cdef Column sci_name_col
|
||||
cdef bytes sci_name
|
||||
cdef bint rewrite
|
||||
cdef dict dcols
|
||||
cdef int skipping
|
||||
@ -129,7 +142,7 @@ def run(config):
|
||||
if entry_count > 0:
|
||||
logger("info", "Importing %d entries", entry_count)
|
||||
else:
|
||||
logger("info", "Importing an unknow number of entries")
|
||||
logger("info", "Importing an unknown number of entries")
|
||||
|
||||
# TODO a bit dirty?
|
||||
if input[2]==Nuc_Seq or input[2]==View_NUC_SEQS:
|
||||
@ -139,7 +152,7 @@ def run(config):
|
||||
else:
|
||||
v = None
|
||||
|
||||
if config['obi']['taxdump'] or isinstance(input[1], View):
|
||||
if config['obi']['taxdump'] or (isinstance(input[1], View) and not config['import']['space_priority']):
|
||||
dms_only=True
|
||||
else:
|
||||
dms_only=False
|
||||
@ -163,21 +176,24 @@ def run(config):
|
||||
taxo.write(taxo_name)
|
||||
taxo.close()
|
||||
o_dms.record_command_line(" ".join(sys.argv[1:]))
|
||||
o_dms.close()
|
||||
o_dms.close(force=True)
|
||||
logger("info", "Done.")
|
||||
return
|
||||
|
||||
# If importing a view between two DMS, use C API
|
||||
if isinstance(input[1], View):
|
||||
# If importing a view between two DMS and not wanting to save space if line selection in original view, use C API
|
||||
if isinstance(input[1], View) and not config['import']['space_priority']:
|
||||
if obi_import_view(input[0].name_with_full_path, o_dms.name_with_full_path, input[1].name, tobytes((config['obi']['outputURI'].split('/'))[-1])) < 0 :
|
||||
input[0].close(force=True)
|
||||
output[0].close(force=True)
|
||||
raise Exception("Error importing a view in a DMS")
|
||||
o_dms.record_command_line(" ".join(sys.argv[1:]))
|
||||
o_dms.close()
|
||||
input[0].close(force=True)
|
||||
output[0].close(force=True)
|
||||
logger("info", "Done.")
|
||||
return
|
||||
|
||||
if entry_count >= 0:
|
||||
pb = ProgressBar(entry_count, config, seconde=5)
|
||||
pb = ProgressBar(entry_count, config)
|
||||
|
||||
NUC_SEQS_view = False
|
||||
if isinstance(output[1], View) :
|
||||
@ -192,9 +208,16 @@ def run(config):
|
||||
id_col = view[ID_COLUMN]
|
||||
def_col = view[DEFINITION_COLUMN]
|
||||
seq_col = view[NUC_SEQUENCE_COLUMN]
|
||||
|
||||
|
||||
# Prepare taxon scientific name if SILVA file
|
||||
if 'inputformat' in config['obi'] and config['obi']['inputformat'] == b"silva":
|
||||
silva = True
|
||||
sci_name_col = Column.new_column(view, SCIENTIFIC_NAME_COLUMN, OBI_STR)
|
||||
else:
|
||||
silva = False
|
||||
|
||||
dcols = {}
|
||||
|
||||
|
||||
# First read through the entries to prepare columns with dictionaries as they are very time-expensive to rewrite
|
||||
if config['import']['preread']:
|
||||
logger("info", "First readthrough...")
|
||||
@ -217,11 +240,14 @@ def run(config):
|
||||
logger("info", "Read %d entries", i)
|
||||
|
||||
for tag in entry :
|
||||
newtag = tag
|
||||
if tag[:7] == b"merged_":
|
||||
newtag = MERGED_PREFIX+tag[7:]
|
||||
if type(entry[tag]) == dict :
|
||||
if tag in dict_dict:
|
||||
dict_dict[tag][0].update(entry[tag].keys())
|
||||
dict_dict[newtag][0].update(entry[tag].keys())
|
||||
else:
|
||||
dict_dict[tag] = [set(entry[tag].keys()), get_obitype(entry[tag])]
|
||||
dict_dict[newtag] = [set(entry[tag].keys()), get_obitype(entry[tag])]
|
||||
i+=1
|
||||
|
||||
if pb is not None:
|
||||
@ -231,15 +257,16 @@ def run(config):
|
||||
for tag in dict_dict:
|
||||
dcols[tag] = (Column.new_column(view, tag, dict_dict[tag][1], \
|
||||
nb_elements_per_line=len(dict_dict[tag][0]), \
|
||||
elements_names=list(dict_dict[tag][0])), \
|
||||
value_obitype)
|
||||
elements_names=list(dict_dict[tag][0]), \
|
||||
dict_column=True), \
|
||||
dict_dict[tag][1])
|
||||
|
||||
|
||||
# Reinitialize the input
|
||||
if isinstance(input[0], CompressedFile):
|
||||
input_is_file = True
|
||||
if entry_count >= 0:
|
||||
pb = ProgressBar(entry_count, config, seconde=5)
|
||||
pb = ProgressBar(entry_count, config)
|
||||
try:
|
||||
input[0].close()
|
||||
except AttributeError:
|
||||
@ -256,7 +283,6 @@ def run(config):
|
||||
|
||||
if entry is None: # error or exception handled at lower level, not raised because Python generators can't resume after any exception is raised
|
||||
if config['obi']['skiperror']:
|
||||
i-=1
|
||||
continue
|
||||
else:
|
||||
raise RollbackException("obi import error, rollbacking view", view)
|
||||
@ -265,123 +291,140 @@ def run(config):
|
||||
pb(i)
|
||||
elif not i%50000:
|
||||
logger("info", "Imported %d entries", i)
|
||||
|
||||
if NUC_SEQS_view:
|
||||
id_col[i] = entry.id
|
||||
def_col[i] = entry.definition
|
||||
seq_col[i] = entry.seq
|
||||
# Check if there is a sequencing quality associated by checking the first entry # TODO haven't found a more robust solution yet
|
||||
if i == 0:
|
||||
get_quality = QUALITY_COLUMN in entry
|
||||
|
||||
try:
|
||||
|
||||
if NUC_SEQS_view:
|
||||
id_col[i] = entry.id
|
||||
def_col[i] = entry.definition
|
||||
seq_col[i] = entry.seq
|
||||
# Check if there is a sequencing quality associated by checking the first entry # TODO haven't found a more robust solution yet
|
||||
if i == 0:
|
||||
get_quality = QUALITY_COLUMN in entry
|
||||
if get_quality:
|
||||
Column.new_column(view, QUALITY_COLUMN, OBI_QUAL)
|
||||
qual_col = view[QUALITY_COLUMN]
|
||||
if get_quality:
|
||||
Column.new_column(view, QUALITY_COLUMN, OBI_QUAL)
|
||||
qual_col = view[QUALITY_COLUMN]
|
||||
if get_quality:
|
||||
qual_col[i] = entry.quality
|
||||
|
||||
for tag in entry :
|
||||
|
||||
if tag != ID_COLUMN and tag != DEFINITION_COLUMN and tag != NUC_SEQUENCE_COLUMN and tag != QUALITY_COLUMN : # TODO dirty
|
||||
|
||||
value = entry[tag]
|
||||
if tag == b"taxid":
|
||||
tag = TAXID_COLUMN
|
||||
if tag == b"count":
|
||||
tag = COUNT_COLUMN
|
||||
qual_col[i] = entry.quality
|
||||
|
||||
if tag not in dcols :
|
||||
|
||||
value_type = type(value)
|
||||
nb_elts = 1
|
||||
value_obitype = OBI_VOID
|
||||
|
||||
if value_type == dict or value_type == list :
|
||||
nb_elts = len(value)
|
||||
elt_names = list(value)
|
||||
else :
|
||||
nb_elts = 1
|
||||
elt_names = None
|
||||
|
||||
value_obitype = get_obitype(value)
|
||||
|
||||
if value_obitype != OBI_VOID :
|
||||
dcols[tag] = (Column.new_column(view, tag, value_obitype, nb_elements_per_line=nb_elts, elements_names=elt_names), value_obitype)
|
||||
|
||||
# Fill value
|
||||
if value_type == dict and nb_elts == 1: # special case that makes the OBI3 create a 1 elt/line column which won't read a dict value
|
||||
value = value[list(value.keys())[0]] # The solution is to transform the value in a simple atomic one acceptable by the column
|
||||
dcols[tag][0][i] = value
|
||||
|
||||
# TODO else log error?
|
||||
|
||||
else :
|
||||
|
||||
rewrite = False
|
||||
|
||||
# Check type adequation
|
||||
old_type = dcols[tag][1]
|
||||
new_type = OBI_VOID
|
||||
new_type = update_obitype(old_type, value)
|
||||
if old_type != new_type :
|
||||
rewrite = True
|
||||
|
||||
try:
|
||||
# Check that it's not the case where the first entry contained a dict of length 1 and now there is a new key
|
||||
if type(value) == dict and \
|
||||
dcols[tag][0].nb_elements_per_line == 1 and len(value.keys()) == 1 \
|
||||
and dcols[tag][0].elements_names[0] != list(value.keys())[0] :
|
||||
raise IndexError # trigger column rewrite
|
||||
# Parse taxon scientific name if SILVA file
|
||||
if silva:
|
||||
sci_name = entry.definition.split(b";")[-1]
|
||||
sci_name_col[i] = sci_name
|
||||
|
||||
for tag in entry :
|
||||
|
||||
if tag != ID_COLUMN and tag != DEFINITION_COLUMN and tag != NUC_SEQUENCE_COLUMN and tag != QUALITY_COLUMN : # TODO dirty
|
||||
|
||||
value = entry[tag]
|
||||
if tag == b"taxid":
|
||||
tag = TAXID_COLUMN
|
||||
if tag == b"count":
|
||||
tag = COUNT_COLUMN
|
||||
if tag[:7] == b"merged_":
|
||||
tag = MERGED_PREFIX+tag[7:]
|
||||
|
||||
# Fill value
|
||||
dcols[tag][0][i] = value
|
||||
|
||||
except IndexError :
|
||||
|
||||
if tag not in dcols :
|
||||
|
||||
value_type = type(value)
|
||||
old_column = dcols[tag][0]
|
||||
old_nb_elements_per_line = old_column.nb_elements_per_line
|
||||
new_nb_elements_per_line = 0
|
||||
old_elements_names = old_column.elements_names
|
||||
new_elements_names = None
|
||||
|
||||
#####################################################################
|
||||
nb_elts = 1
|
||||
value_obitype = OBI_VOID
|
||||
dict_col = False
|
||||
|
||||
# Check the length and keys of column lines if needed
|
||||
if value_type == dict : # Check dictionary keys
|
||||
for k in value :
|
||||
if k not in old_elements_names :
|
||||
new_elements_names = list(set(old_elements_names+[tobytes(k) for k in value]))
|
||||
rewrite = True
|
||||
break
|
||||
if value_type == dict or value_type == list :
|
||||
nb_elts = len(value)
|
||||
elt_names = list(value)
|
||||
if value_type == dict :
|
||||
dict_col = True
|
||||
else :
|
||||
nb_elts = 1
|
||||
elt_names = None
|
||||
|
||||
elif value_type == list or value_type == tuple : # Check vector length
|
||||
if old_nb_elements_per_line < len(value) :
|
||||
new_nb_elements_per_line = len(value)
|
||||
rewrite = True
|
||||
value_obitype = get_obitype(value)
|
||||
|
||||
#####################################################################
|
||||
|
||||
if rewrite :
|
||||
if new_nb_elements_per_line == 0 and new_elements_names is not None :
|
||||
new_nb_elements_per_line = len(new_elements_names)
|
||||
|
||||
# Reset obierrno
|
||||
obi_errno = 0
|
||||
|
||||
dcols[tag] = (view.rewrite_column_with_diff_attributes(old_column.name,
|
||||
new_data_type=new_type,
|
||||
new_nb_elements_per_line=new_nb_elements_per_line,
|
||||
new_elements_names=new_elements_names,
|
||||
rewrite_last_line=False),
|
||||
new_type)
|
||||
|
||||
# Update the dictionary:
|
||||
for t in dcols :
|
||||
dcols[t] = (view[t], dcols[t][1])
|
||||
|
||||
if value_obitype != OBI_VOID :
|
||||
dcols[tag] = (Column.new_column(view, tag, value_obitype, nb_elements_per_line=nb_elts, elements_names=elt_names, dict_column=dict_col), value_obitype)
|
||||
|
||||
# Fill value
|
||||
dcols[tag][0][i] = value
|
||||
|
||||
|
||||
# TODO else log error?
|
||||
|
||||
else :
|
||||
|
||||
rewrite = False
|
||||
|
||||
# Check type adequation
|
||||
old_type = dcols[tag][1]
|
||||
new_type = OBI_VOID
|
||||
new_type = update_obitype(old_type, value)
|
||||
if old_type != new_type :
|
||||
rewrite = True
|
||||
|
||||
try:
|
||||
# Check that it's not the case where the first entry contained a dict of length 1 and now there is a new key
|
||||
if type(value) == dict and \
|
||||
dcols[tag][0].nb_elements_per_line == 1 \
|
||||
and set(dcols[tag][0].elements_names) != set(value.keys()) :
|
||||
raise IndexError # trigger column rewrite
|
||||
|
||||
# Fill value
|
||||
dcols[tag][0][i] = value
|
||||
|
||||
except IndexError :
|
||||
|
||||
value_type = type(value)
|
||||
old_column = dcols[tag][0]
|
||||
old_nb_elements_per_line = old_column.nb_elements_per_line
|
||||
new_nb_elements_per_line = 0
|
||||
old_elements_names = old_column.elements_names
|
||||
new_elements_names = None
|
||||
|
||||
#####################################################################
|
||||
|
||||
# Check the length and keys of column lines if needed
|
||||
if value_type == dict : # Check dictionary keys
|
||||
for k in value :
|
||||
if k not in old_elements_names :
|
||||
new_elements_names = list(set(old_elements_names+[tobytes(k) for k in value]))
|
||||
rewrite = True
|
||||
break
|
||||
|
||||
elif value_type == list or value_type == tuple : # Check vector length
|
||||
if old_nb_elements_per_line < len(value) :
|
||||
new_nb_elements_per_line = len(value)
|
||||
rewrite = True
|
||||
|
||||
#####################################################################
|
||||
|
||||
if rewrite :
|
||||
if new_nb_elements_per_line == 0 and new_elements_names is not None :
|
||||
new_nb_elements_per_line = len(new_elements_names)
|
||||
|
||||
# Reset obierrno
|
||||
obi_errno = 0
|
||||
|
||||
dcols[tag] = (view.rewrite_column_with_diff_attributes(old_column.name,
|
||||
new_data_type=new_type,
|
||||
new_nb_elements_per_line=new_nb_elements_per_line,
|
||||
new_elements_names=new_elements_names,
|
||||
rewrite_last_line=False),
|
||||
new_type)
|
||||
|
||||
# Update the dictionary:
|
||||
for t in dcols :
|
||||
dcols[t] = (view[t], dcols[t][1])
|
||||
|
||||
# Fill value
|
||||
dcols[tag][0][i] = value
|
||||
|
||||
except Exception as e:
|
||||
print("\nCould not import sequence id:", entry.id, "(error raised:", e, ")")
|
||||
if 'skiperror' in config['obi'] and not config['obi']['skiperror']:
|
||||
raise e
|
||||
else:
|
||||
pass
|
||||
|
||||
i+=1
|
||||
|
||||
if pb is not None:
|
||||
@ -402,7 +445,7 @@ def run(config):
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
output[0].close()
|
||||
output[0].close(force=True)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
@ -31,26 +31,11 @@ def run(config):
|
||||
input = open_uri(config['obi']['inputURI'])
|
||||
if input is None:
|
||||
raise Exception("Could not read input")
|
||||
if input[2] == DMS and not config['ls']['longformat']:
|
||||
dms = input[0]
|
||||
l = []
|
||||
for view in input[0]:
|
||||
l.append(tostr(view) + "\t(Date created: " + str(bytes2str_object(dms[view].comments["Date created"]))+")")
|
||||
dms[view].close()
|
||||
l.sort()
|
||||
for v in l:
|
||||
print(v)
|
||||
|
||||
# Print representation
|
||||
if config['ls']['longformat']:
|
||||
print(input[1].repr_longformat())
|
||||
else:
|
||||
print(repr(input[1]))
|
||||
if input[2] == DMS:
|
||||
taxolist = ["\n### Taxonomies:"]
|
||||
for t in Taxonomy.list_taxos(input[0]):
|
||||
taxolist.append("\t"+tostr(t))
|
||||
if len(taxolist) > 1:
|
||||
for t in taxolist:
|
||||
print(t)
|
||||
if config['ls']['longformat'] and len(input[1].comments) > 0:
|
||||
print("\n### Comments:")
|
||||
print(str(input[1].comments))
|
||||
|
||||
input[0].close(force=True)
|
||||
|
@ -2,10 +2,10 @@
|
||||
|
||||
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.dms.view import RollbackException, View
|
||||
from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
|
||||
from obitools3.dms.column.column cimport Column, Column_line
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.libalign._freeendgapfm import FreeEndGapFullMatch
|
||||
@ -14,17 +14,14 @@ from obitools3.dms.obiseq cimport Nuc_Seq
|
||||
from obitools3.dms.capi.obitypes cimport OBI_SEQ, OBI_QUAL
|
||||
from obitools3.dms.capi.apat cimport MAX_PATTERN
|
||||
from obitools3.dms.capi.obiview cimport REVERSE_SEQUENCE_COLUMN, REVERSE_QUALITY_COLUMN
|
||||
from obitools3.utils cimport tobytes
|
||||
from obitools3.utils cimport tobytes, str2bytes
|
||||
|
||||
from libc.stdint cimport INT32_MAX
|
||||
from functools import reduce
|
||||
import math
|
||||
import sys
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
|
||||
#REVERSE_SEQ_COLUMN_NAME = b"REVERSE_SEQUENCE" # used by alignpairedend tool
|
||||
#REVERSE_QUALITY_COLUMN_NAME = b"REVERSE_QUALITY" # used by alignpairedend tool
|
||||
from io import BufferedWriter
|
||||
|
||||
|
||||
__title__="Assigns sequence records to the corresponding experiment/sample based on DNA tags and primers"
|
||||
@ -34,7 +31,8 @@ def addOptions(parser):
|
||||
|
||||
addMinimalInputOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group = parser.add_argument_group('obi ngsfilter specific options')
|
||||
|
||||
group.add_argument('-t','--info-view',
|
||||
@ -42,8 +40,9 @@ def addOptions(parser):
|
||||
metavar="<URI>",
|
||||
type=str,
|
||||
default=None,
|
||||
help="URI to the view containing the samples definition (with tags, primers, sample names,...)"
|
||||
"Warning: primer lengths must be less than or equal to 32")
|
||||
required=True,
|
||||
help="URI to the view containing the samples definition (with tags, primers, sample names,...).\n"
|
||||
"\nWarning: primer lengths must be less than or equal to 32")
|
||||
|
||||
group.add_argument('-R', '--reverse-reads',
|
||||
action="store", dest="ngsfilter:reverse",
|
||||
@ -57,7 +56,7 @@ def addOptions(parser):
|
||||
metavar="<URI>",
|
||||
type=str,
|
||||
default=None,
|
||||
help="URI to the view used to store the sequences unassigned to any sample")
|
||||
help="URI to the view used to store the sequences unassigned to any sample. Those sequences are untrimmed.")
|
||||
|
||||
group.add_argument('--no-tags',
|
||||
action="store_true", dest="ngsfilter:notags",
|
||||
@ -478,6 +477,8 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
||||
if not directmatch[0].forward:
|
||||
sequences[0] = sequences[0].reverse_complement
|
||||
sequences[0][b'reversed'] = True # used by the alignpairedend tool (in kmer_similarity.c)
|
||||
else:
|
||||
sequences[0][b'reversed'] = False # used by the alignpairedend tool (in kmer_similarity.c)
|
||||
|
||||
sample=None
|
||||
if not no_tags:
|
||||
@ -505,7 +506,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
||||
sample=None
|
||||
|
||||
if sample is None:
|
||||
sequences[0][b'error']=b"No tags found"
|
||||
sequences[0][b'error']=b"No sample with that tag combination"
|
||||
return False, sequences[0]
|
||||
|
||||
sequences[0].update(sample)
|
||||
@ -536,7 +537,8 @@ def run(config):
|
||||
raise Exception("Could not open input reads")
|
||||
if input[2] != View_NUC_SEQS:
|
||||
raise NotImplementedError('obi ngsfilter only works on NUC_SEQS views')
|
||||
|
||||
i_dms = input[0]
|
||||
|
||||
if "reverse" in config["ngsfilter"]:
|
||||
|
||||
forward = input[1]
|
||||
@ -577,8 +579,19 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
o_view = output[1]
|
||||
|
||||
# If stdout output, create a temporary view in the input dms that will be deleted afterwards.
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
o_view_name = b"temp"
|
||||
while o_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
o_view_name = o_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view = View_NUC_SEQS.new(i_dms, o_view_name)
|
||||
|
||||
# Open the view containing the informations about the tags and the primers
|
||||
info_input = open_uri(config['ngsfilter']['info_view'])
|
||||
if info_input is None:
|
||||
@ -599,7 +612,10 @@ def run(config):
|
||||
unidentified = None
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(entries_len, config, seconde=5)
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(entries_len, config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
# Check and store primers and tags
|
||||
try:
|
||||
@ -629,11 +645,13 @@ def run(config):
|
||||
|
||||
g = 0
|
||||
u = 0
|
||||
i = 0
|
||||
no_tags = config['ngsfilter']['notags']
|
||||
try:
|
||||
for i in range(entries_len):
|
||||
PyErr_CheckSignals()
|
||||
pb(i)
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
if not_aligned:
|
||||
modseq = [Nuc_Seq.new_from_stored(forward[i]), Nuc_Seq.new_from_stored(reverse[i])]
|
||||
else:
|
||||
@ -643,7 +661,13 @@ def run(config):
|
||||
o_view[g].set(oseq.id, oseq.seq, definition=oseq.definition, quality=oseq.quality, tags=oseq)
|
||||
g+=1
|
||||
elif unidentified is not None:
|
||||
unidentified[u].set(oseq.id, oseq.seq, definition=oseq.definition, quality=oseq.quality, tags=oseq)
|
||||
# Untrim sequences (put original back)
|
||||
if len(modseq) > 1:
|
||||
oseq[REVERSE_SEQUENCE_COLUMN] = reverse[i].seq
|
||||
oseq[REVERSE_QUALITY_COLUMN] = reverse[i].quality
|
||||
unidentified[u].set(oseq.id, forward[i].seq, definition=oseq.definition, quality=forward[i].quality, tags=oseq)
|
||||
else:
|
||||
unidentified[u].set(oseq.id, entries[i].seq, definition=oseq.definition, quality=entries[i].quality, tags=oseq)
|
||||
u+=1
|
||||
except Exception, e:
|
||||
if unidentified is not None:
|
||||
@ -651,8 +675,9 @@ def run(config):
|
||||
else:
|
||||
raise RollbackException("obi ngsfilter error, rollbacking view: "+str(e), o_view)
|
||||
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
if pb is not None:
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# Save command config in View and DMS comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
@ -661,13 +686,23 @@ def run(config):
|
||||
unidentified.write_config(config, "ngsfilter", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||
# Add comment about unidentified seqs
|
||||
unidentified.comments["info"] = "View containing sequences categorized as unidentified by the ngsfilter command"
|
||||
output[0].record_command_line(command_line)
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_view), file=sys.stderr)
|
||||
|
||||
input[0].close(force=True)
|
||||
output[0].close(force=True)
|
||||
# If stdout output, delete the temporary result view in the input DMS
|
||||
if type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
|
||||
i_dms.close(force=True)
|
||||
o_dms.close(force=True)
|
||||
info_input[0].close(force=True)
|
||||
if unidentified is not None:
|
||||
unidentified_input[0].close(force=True)
|
||||
|
@ -4,7 +4,7 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view.view cimport View, Line_selection
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport str2bytes
|
||||
@ -24,6 +24,7 @@ from obitools3.dms.capi.obitypes cimport OBI_BOOL, \
|
||||
import time
|
||||
import sys
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
from io import BufferedWriter
|
||||
|
||||
|
||||
NULL_VALUE = {OBI_BOOL: OBIBool_NA,
|
||||
@ -42,6 +43,7 @@ def addOptions(parser):
|
||||
|
||||
addMinimalInputOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group=parser.add_argument_group('obi sort specific options')
|
||||
|
||||
@ -59,7 +61,7 @@ def addOptions(parser):
|
||||
|
||||
|
||||
def line_cmp(line, key, pb):
|
||||
pb
|
||||
pb
|
||||
if line[key] is None:
|
||||
return NULL_VALUE[line.view[key].data_type_int]
|
||||
else:
|
||||
@ -86,20 +88,28 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
o_dms = output[0]
|
||||
o_view_name_final = output[1]
|
||||
o_view_name = o_view_name_final
|
||||
output_0 = output[0]
|
||||
final_o_view_name = output[1]
|
||||
|
||||
# If the input and output DMS are not the same, create output view in input DMS first, then export it
|
||||
# to output DMS, making sure the temporary view name is unique in the input DMS
|
||||
if i_dms != o_dms:
|
||||
# If stdout output or the input and output DMS are not the same, create a temporary view that will be exported and deleted.
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
temporary_view_name = b"temp"
|
||||
i=0
|
||||
while o_view_name in i_dms:
|
||||
o_view_name = o_view_name_final+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = temporary_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
else:
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(len(i_view), config, seconde=5)
|
||||
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(len(i_view), config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
keys = config['sort']['keys']
|
||||
|
||||
selection = Line_selection(i_view)
|
||||
@ -110,10 +120,14 @@ def run(config):
|
||||
|
||||
for k in keys: # TODO order?
|
||||
PyErr_CheckSignals()
|
||||
selection.sort(key=lambda line_idx: line_cmp(i_view[line_idx], k, pb(line_idx)), reverse=config['sort']['reverse'])
|
||||
|
||||
pb(len(i_view), force=True)
|
||||
print("", file=sys.stderr)
|
||||
if pb is not None:
|
||||
selection.sort(key=lambda line_idx: line_cmp(i_view[line_idx], k, pb(line_idx)), reverse=config['sort']['reverse'])
|
||||
else:
|
||||
selection.sort(key=lambda line_idx: line_cmp(i_view[line_idx], k, None), reverse=config['sort']['reverse'])
|
||||
|
||||
if pb is not None:
|
||||
pb(len(i_view), force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# Create output view with the sorted line selection
|
||||
try:
|
||||
@ -132,16 +146,23 @@ def run(config):
|
||||
# and delete the temporary view in the input DMS
|
||||
if i_dms != o_dms:
|
||||
o_view.close()
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, o_view_name_final)
|
||||
o_view = o_dms[o_view_name_final]
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, final_o_view_name)
|
||||
o_view = o_dms[final_o_view_name]
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_view), file=sys.stderr)
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms:
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
o_dms.close(force=True)
|
||||
|
||||
i_dms.close(force=True)
|
||||
|
||||
logger("info", "Done.")
|
||||
|
@ -162,7 +162,7 @@ def run(config):
|
||||
lcat=0
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(len(i_view), config, seconde=5)
|
||||
pb = ProgressBar(len(i_view), config)
|
||||
|
||||
for i in range(len(i_view)):
|
||||
PyErr_CheckSignals()
|
||||
|
@ -4,7 +4,7 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.dms import DMS
|
||||
from obitools3.dms.view.view cimport View, Line_selection
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputOption, addNoProgressBarOption
|
||||
from obitools3.dms.view import RollbackException
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport str2bytes
|
||||
@ -12,6 +12,7 @@ from obitools3.utils cimport str2bytes
|
||||
import time
|
||||
import sys
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
from io import BufferedWriter
|
||||
|
||||
|
||||
__title__="Keep the N last lines of a view."
|
||||
@ -21,6 +22,7 @@ def addOptions(parser):
|
||||
|
||||
addMinimalInputOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group=parser.add_argument_group('obi tail specific options')
|
||||
|
||||
@ -52,31 +54,41 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
o_dms = output[0]
|
||||
o_view_name_final = output[1]
|
||||
o_view_name = o_view_name_final
|
||||
output_0 = output[0]
|
||||
final_o_view_name = output[1]
|
||||
|
||||
# If the input and output DMS are not the same, create output view in input DMS first, then export it
|
||||
# to output DMS, making sure the temporary view name is unique in the input DMS
|
||||
if i_dms != o_dms:
|
||||
# If stdout output or the input and output DMS are not the same, create a temporary view that will be exported and deleted.
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
temporary_view_name = b"temp"
|
||||
i=0
|
||||
while o_view_name in i_dms:
|
||||
o_view_name = o_view_name_final+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = temporary_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
if type(output_0)==BufferedWriter:
|
||||
o_dms = i_dms
|
||||
else:
|
||||
o_view_name = final_o_view_name
|
||||
|
||||
start = max(len(i_view) - config['tail']['count'], 0)
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(len(i_view) - start, config, seconde=5)
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(len(i_view) - start, config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
selection = Line_selection(i_view)
|
||||
|
||||
for i in range(start, len(i_view)):
|
||||
PyErr_CheckSignals()
|
||||
pb(i)
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
selection.append(i)
|
||||
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
if pb is not None:
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# Save command config in View comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
@ -97,14 +109,20 @@ def run(config):
|
||||
# and delete the temporary view in the input DMS
|
||||
if i_dms != o_dms:
|
||||
o_view.close()
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, o_view_name_final)
|
||||
o_view = o_dms[o_view_name_final]
|
||||
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], o_view_name, final_o_view_name)
|
||||
o_view = o_dms[final_o_view_name]
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_view), file=sys.stderr)
|
||||
|
||||
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms:
|
||||
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
o_dms.close(force=True)
|
||||
i_dms.close(force=True)
|
||||
|
@ -23,6 +23,7 @@ from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
|
||||
import shutil
|
||||
import string
|
||||
import random
|
||||
import sys
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
|
||||
@ -300,8 +301,11 @@ def fill_column(config, infos, col) :
|
||||
def create_random_column(config, infos) :
|
||||
alias = random.choice([b'', random_unique_name(infos)])
|
||||
tuples = random.choice([True, False])
|
||||
dict_column = False
|
||||
if not tuples :
|
||||
nb_elements_per_line=random.randint(1, config['test']['maxelts'])
|
||||
if nb_elements_per_line > 1:
|
||||
dict_column = True
|
||||
elements_names = []
|
||||
for i in range(nb_elements_per_line) :
|
||||
elements_names.append(random_unique_element_name(config, infos))
|
||||
@ -317,6 +321,7 @@ def create_random_column(config, infos) :
|
||||
data_type,
|
||||
nb_elements_per_line=nb_elements_per_line,
|
||||
elements_names=elements_names,
|
||||
dict_column=dict_column,
|
||||
tuples=tuples,
|
||||
comments=random_comments(config),
|
||||
alias=alias
|
||||
@ -366,7 +371,7 @@ def random_new_view(config, infos, first=False):
|
||||
infos['view'] = View_NUC_SEQS.new(infos['dms'], random_unique_name(infos), comments=random_comments(config)) # TODO quality column
|
||||
else :
|
||||
infos['view'] = View.new(infos['dms'], random_unique_name(infos), comments=random_comments(config)) # TODO quality column
|
||||
|
||||
infos['view'].write_config(config, "test", infos["command_line"], input_dms_name=[infos['dms'].name], input_view_name=["random"])
|
||||
print_test(config, repr(infos['view']))
|
||||
if v_to_clone is not None :
|
||||
if line_selection is None:
|
||||
@ -441,7 +446,7 @@ def addOptions(parser):
|
||||
default=20,
|
||||
type=int,
|
||||
help="Maximum length of tuples. "
|
||||
"Default: 200")
|
||||
"Default: 20")
|
||||
|
||||
group.add_argument('--max_ini_col_count','-o',
|
||||
action="store", dest="test:maxinicolcount",
|
||||
@ -454,10 +459,10 @@ def addOptions(parser):
|
||||
group.add_argument('--max_line_nb','-l',
|
||||
action="store", dest="test:maxlinenb",
|
||||
metavar='<MAX_LINE_NB>',
|
||||
default=10000,
|
||||
default=1000,
|
||||
type=int,
|
||||
help="Maximum number of lines in a column. "
|
||||
"Default: 10000")
|
||||
"Default: 1000")
|
||||
|
||||
group.add_argument('--max_elts_per_line','-e',
|
||||
action="store", dest="test:maxelts",
|
||||
@ -497,7 +502,8 @@ def run(config):
|
||||
(b"OBI_SEQ", False): random_seq, (b"OBI_SEQ", True): random_seq_tuples,
|
||||
(b"OBI_STR", False): random_bytes, (b"OBI_STR", True): random_bytes_tuples
|
||||
},
|
||||
'tests': [test_set_and_get, test_add_col, test_delete_col, test_col_alias, test_new_view]
|
||||
'tests': [test_set_and_get, test_add_col, test_delete_col, test_col_alias, test_new_view],
|
||||
'command_line': " ".join(sys.argv[1:])
|
||||
}
|
||||
|
||||
# TODO ???
|
||||
|
@ -5,5 +5,5 @@ from obitools3.dms.taxo.taxo cimport Taxonomy
|
||||
from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
|
||||
|
||||
|
||||
cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy)
|
||||
cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, list mergedKeys_list=*, Taxonomy taxonomy=*, bint mergeIds=*, list categories=*, int max_elts=*)
|
||||
cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy, dict config)
|
||||
cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, dict config, list mergedKeys_list=*, Taxonomy taxonomy=*, bint mergeIds=*, list categories=*, int max_elts=*)
|
||||
|
@ -14,13 +14,15 @@ from obitools3.dms.capi.obitypes cimport OBI_INT, OBI_STR, index_t
|
||||
from obitools3.apps.optiongroups import addMinimalInputOption, \
|
||||
addMinimalOutputOption, \
|
||||
addTaxonomyOption, \
|
||||
addEltLimitOption
|
||||
addEltLimitOption, \
|
||||
addNoProgressBarOption
|
||||
from obitools3.uri.decode import open_uri
|
||||
from obitools3.apps.config import logger
|
||||
from obitools3.utils cimport tobytes, tostr
|
||||
from obitools3.utils cimport tobytes, tostr, str2bytes
|
||||
|
||||
import sys
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
from io import BufferedWriter
|
||||
|
||||
|
||||
__title__="Group sequence records together"
|
||||
@ -32,6 +34,7 @@ def addOptions(parser):
|
||||
addTaxonomyOption(parser)
|
||||
addMinimalOutputOption(parser)
|
||||
addEltLimitOption(parser)
|
||||
addNoProgressBarOption(parser)
|
||||
|
||||
group = parser.add_argument_group('obi uniq specific options')
|
||||
|
||||
@ -56,7 +59,7 @@ def addOptions(parser):
|
||||
"(option can be used several times).")
|
||||
|
||||
|
||||
cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy, dict config) :
|
||||
|
||||
cdef int taxid
|
||||
cdef Nuc_Seq_Stored seq
|
||||
@ -69,7 +72,7 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
cdef object gn_sn
|
||||
cdef object fa_sn
|
||||
|
||||
# Create columns
|
||||
# Create columns and save them for efficiency
|
||||
if b"species" in o_view and o_view[b"species"].data_type_int != OBI_INT :
|
||||
o_view.delete_column(b"species")
|
||||
if b"species" not in o_view:
|
||||
@ -77,6 +80,7 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
b"species",
|
||||
OBI_INT
|
||||
)
|
||||
species_column = o_view[b"species"]
|
||||
|
||||
if b"genus" in o_view and o_view[b"genus"].data_type_int != OBI_INT :
|
||||
o_view.delete_column(b"genus")
|
||||
@ -85,6 +89,7 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
b"genus",
|
||||
OBI_INT
|
||||
)
|
||||
genus_column = o_view[b"genus"]
|
||||
|
||||
if b"family" in o_view and o_view[b"family"].data_type_int != OBI_INT :
|
||||
o_view.delete_column(b"family")
|
||||
@ -93,6 +98,7 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
b"family",
|
||||
OBI_INT
|
||||
)
|
||||
family_column = o_view[b"family"]
|
||||
|
||||
if b"species_name" in o_view and o_view[b"species_name"].data_type_int != OBI_STR :
|
||||
o_view.delete_column(b"species_name")
|
||||
@ -101,6 +107,7 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
b"species_name",
|
||||
OBI_STR
|
||||
)
|
||||
species_name_column = o_view[b"species_name"]
|
||||
|
||||
if b"genus_name" in o_view and o_view[b"genus_name"].data_type_int != OBI_STR :
|
||||
o_view.delete_column(b"genus_name")
|
||||
@ -109,6 +116,7 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
b"genus_name",
|
||||
OBI_STR
|
||||
)
|
||||
genus_name_column = o_view[b"genus_name"]
|
||||
|
||||
if b"family_name" in o_view and o_view[b"family_name"].data_type_int != OBI_STR :
|
||||
o_view.delete_column(b"family_name")
|
||||
@ -117,6 +125,7 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
b"family_name",
|
||||
OBI_STR
|
||||
)
|
||||
family_name_column = o_view[b"family_name"]
|
||||
|
||||
if b"rank" in o_view and o_view[b"rank"].data_type_int != OBI_STR :
|
||||
o_view.delete_column(b"rank")
|
||||
@ -125,6 +134,7 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
b"rank",
|
||||
OBI_STR
|
||||
)
|
||||
rank_column = o_view[b"rank"]
|
||||
|
||||
if b"scientific_name" in o_view and o_view[b"scientific_name"].data_type_int != OBI_STR :
|
||||
o_view.delete_column(b"scientific_name")
|
||||
@ -133,9 +143,19 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
b"scientific_name",
|
||||
OBI_STR
|
||||
)
|
||||
|
||||
for seq in o_view:
|
||||
PyErr_CheckSignals()
|
||||
scientific_name_column = o_view[b"scientific_name"]
|
||||
|
||||
# Initialize the progress bar
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(len(o_view), config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
i=0
|
||||
for seq in o_view:
|
||||
PyErr_CheckSignals()
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
if MERGED_TAXID_COLUMN in seq :
|
||||
m_taxids = []
|
||||
m_taxids_dict = seq[MERGED_TAXID_COLUMN]
|
||||
@ -165,20 +185,24 @@ cdef merge_taxonomy_classification(View_NUC_SEQS o_view, Taxonomy taxonomy) :
|
||||
else:
|
||||
fa_sn = None
|
||||
tfa = None
|
||||
|
||||
seq[b"species"] = tsp
|
||||
seq[b"genus"] = tgn
|
||||
seq[b"family"] = tfa
|
||||
|
||||
seq[b"species_name"] = sp_sn
|
||||
seq[b"genus_name"] = gn_sn
|
||||
seq[b"family_name"] = fa_sn
|
||||
|
||||
seq[b"rank"] = taxonomy.get_rank(taxid)
|
||||
seq[b"scientific_name"] = taxonomy.get_scientific_name(taxid)
|
||||
species_column[i] = tsp
|
||||
genus_column[i] = tgn
|
||||
family_column[i] = tfa
|
||||
|
||||
species_name_column[i] = sp_sn
|
||||
genus_name_column[i] = gn_sn
|
||||
family_name_column[i] = fa_sn
|
||||
|
||||
rank_column[i] = taxonomy.get_rank(taxid)
|
||||
scientific_name_column[i] = taxonomy.get_scientific_name(taxid)
|
||||
i+=1
|
||||
|
||||
if pb is not None:
|
||||
pb(len(o_view), force=True)
|
||||
|
||||
|
||||
|
||||
cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, list mergedKeys_list=None, Taxonomy taxonomy=None, bint mergeIds=False, list categories=None, int max_elts=1000000) :
|
||||
cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, dict config, list mergedKeys_list=None, Taxonomy taxonomy=None, bint mergeIds=False, list categories=None, int max_elts=1000000) :
|
||||
|
||||
cdef int i
|
||||
cdef int k
|
||||
@ -187,6 +211,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
cdef int u_idx
|
||||
cdef int i_idx
|
||||
cdef int i_count
|
||||
cdef int o_count
|
||||
cdef str key_str
|
||||
cdef bytes key
|
||||
cdef bytes mkey
|
||||
@ -209,7 +234,6 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
cdef Nuc_Seq_Stored i_seq
|
||||
cdef Nuc_Seq_Stored o_seq
|
||||
cdef Nuc_Seq_Stored u_seq
|
||||
cdef Column i_col
|
||||
cdef Column i_seq_col
|
||||
cdef Column i_id_col
|
||||
cdef Column i_taxid_col
|
||||
@ -217,6 +241,8 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
cdef Column o_id_col
|
||||
cdef Column o_taxid_dist_col
|
||||
cdef Column o_merged_col
|
||||
cdef Column o_count_col
|
||||
cdef Column i_count_col
|
||||
cdef Column_line i_mcol
|
||||
cdef object taxid_dist_dict
|
||||
cdef object iter_view
|
||||
@ -252,7 +278,12 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
mergedKeys_m = []
|
||||
for k in range(k_count):
|
||||
mergedKeys_m.append(MERGED_PREFIX + mergedKeys[k])
|
||||
|
||||
|
||||
# Check that not trying to remerge without total count information
|
||||
for key in mergedKeys_m:
|
||||
if key in view and COUNT_COLUMN not in view:
|
||||
raise Exception("\n>>>>\nError: trying to re-merge tags without total count tag. Run obi annotate to add the count tag from the relevant merged tag, i.e.: \nobi annotate --set-tag COUNT:'sum([value for key,value in sequence['MERGED_sample'].items()])' dms/input dms/output\n")
|
||||
|
||||
if categories is None:
|
||||
categories = []
|
||||
|
||||
@ -274,7 +305,8 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
iter_view = iter(view)
|
||||
for i_seq in iter_view :
|
||||
PyErr_CheckSignals()
|
||||
pb(i)
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
|
||||
# This can't be done in the same line as the unique_id tuple creation because it generates a bug
|
||||
# where Cython (version 0.25.2) does not detect the reference to the categs_list variable and deallocates
|
||||
@ -284,6 +316,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
for x in categories :
|
||||
catl.append(i_seq[x])
|
||||
|
||||
#unique_id = tuple(catl) + (i_seq_col[i],)
|
||||
unique_id = tuple(catl) + (i_seq_col.get_line_idx(i),)
|
||||
#unique_id = tuple(i_seq[x] for x in categories) + (seq_col.get_line_idx(i),) # The line that cython can't read properly
|
||||
|
||||
@ -320,7 +353,14 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
for k in range(k_count):
|
||||
key = mergedKeys[k]
|
||||
merged_col_name = mergedKeys_m[k]
|
||||
i_col = view[key]
|
||||
|
||||
# if merged_infos[merged_col_name]['nb_elts'] == 1:
|
||||
# raise Exception("Can't merge information from a tag with only one element (e.g. one sample ; don't use -m option)")
|
||||
|
||||
if merged_col_name in view:
|
||||
i_col = view[merged_col_name]
|
||||
else:
|
||||
i_col = view[key]
|
||||
|
||||
if merged_infos[merged_col_name]['nb_elts'] > max_elts:
|
||||
str_merged_cols.append(merged_col_name)
|
||||
@ -338,6 +378,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
OBI_INT,
|
||||
nb_elements_per_line=merged_infos[merged_col_name]['nb_elts'],
|
||||
elements_names=list(merged_infos[merged_col_name]['elt_names']),
|
||||
dict_column=True,
|
||||
comments=i_col.comments,
|
||||
alias=merged_col_name
|
||||
)
|
||||
@ -360,6 +401,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
OBI_INT,
|
||||
nb_elements_per_line=len(view),
|
||||
elements_names=[id for id in i_id_col],
|
||||
dict_column=True,
|
||||
alias=TAXID_DIST_COLUMN
|
||||
)
|
||||
|
||||
@ -374,23 +416,35 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
alias=MERGED_COLUMN
|
||||
)
|
||||
|
||||
# Keep columns that are going to be used a lot in variables
|
||||
# Keep columns in variables for efficiency
|
||||
o_id_col = o_view[ID_COLUMN]
|
||||
if TAXID_DIST_COLUMN in o_view:
|
||||
o_taxid_dist_col = o_view[TAXID_DIST_COLUMN]
|
||||
if MERGED_COLUMN in o_view:
|
||||
o_merged_col = o_view[MERGED_COLUMN]
|
||||
if COUNT_COLUMN not in o_view:
|
||||
Column.new_column(o_view,
|
||||
COUNT_COLUMN,
|
||||
OBI_INT)
|
||||
o_count_col = o_view[COUNT_COLUMN]
|
||||
if COUNT_COLUMN in view:
|
||||
i_count_col = view[COUNT_COLUMN]
|
||||
|
||||
if pb is not None:
|
||||
pb(len(view), force=True)
|
||||
print("")
|
||||
|
||||
pb(len(view), force=True)
|
||||
print("")
|
||||
logger("info", "Second browsing through the input")
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(len(uniques), seconde=5)
|
||||
if pb is not None:
|
||||
pb = ProgressBar(len(view))
|
||||
|
||||
o_idx = 0
|
||||
total_treated = 0
|
||||
|
||||
for unique_id in uniques :
|
||||
PyErr_CheckSignals()
|
||||
pb(o_idx)
|
||||
|
||||
merged_sequences = uniques[unique_id]
|
||||
|
||||
@ -407,7 +461,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
merged_list = list(set(merged_list)) # deduplicate the list
|
||||
o_merged_col[o_idx] = merged_list
|
||||
|
||||
o_seq[COUNT_COLUMN] = 0
|
||||
o_count = 0
|
||||
|
||||
if TAXID_DIST_COLUMN in u_seq and i_taxid_dist_col[u_idx] is not None:
|
||||
taxid_dist_dict = i_taxid_dist_col[u_idx]
|
||||
@ -419,16 +473,20 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
merged_dict[mkey] = {}
|
||||
|
||||
for i_idx in merged_sequences:
|
||||
|
||||
PyErr_CheckSignals()
|
||||
|
||||
if pb is not None:
|
||||
pb(total_treated)
|
||||
|
||||
i_id = i_id_col[i_idx]
|
||||
i_seq = view[i_idx]
|
||||
|
||||
if COUNT_COLUMN not in i_seq or i_seq[COUNT_COLUMN] is None:
|
||||
if COUNT_COLUMN not in i_seq or i_count_col[i_idx] is None:
|
||||
i_count = 1
|
||||
else:
|
||||
i_count = i_seq[COUNT_COLUMN]
|
||||
i_count = i_count_col[i_idx]
|
||||
|
||||
o_seq[COUNT_COLUMN] += i_count
|
||||
o_count += i_count
|
||||
|
||||
for k in range(k_count):
|
||||
|
||||
@ -463,44 +521,55 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
|
||||
mcol[key2] = i_mcol[key2]
|
||||
else:
|
||||
mcol[key2] = mcol[key2] + i_mcol[key2]
|
||||
|
||||
# Write taxid_dist
|
||||
if mergeIds and TAXID_COLUMN in mergedKeys:
|
||||
if TAXID_DIST_COLUMN in str_merged_cols:
|
||||
o_taxid_dist_col[o_idx] = str(taxid_dist_dict)
|
||||
else:
|
||||
o_taxid_dist_col[o_idx] = taxid_dist_dict
|
||||
|
||||
# Write merged dicts
|
||||
for mkey in merged_dict:
|
||||
if mkey in str_merged_cols:
|
||||
mkey_cols[mkey][o_idx] = str(merged_dict[mkey])
|
||||
else:
|
||||
mkey_cols[mkey][o_idx] = merged_dict[mkey]
|
||||
# Sets NA values to 0 # TODO discuss, for now keep as None and test for None instead of testing for 0 in tools
|
||||
#for key in mkey_cols[mkey][o_idx]:
|
||||
# if mkey_cols[mkey][o_idx][key] is None:
|
||||
# mkey_cols[mkey][o_idx][key] = 0
|
||||
|
||||
|
||||
for key in i_seq.keys():
|
||||
# Delete informations that differ between the merged sequences
|
||||
# TODO make special columns list?
|
||||
# TODO make special columns list? // could be more efficient
|
||||
if key != COUNT_COLUMN and key != ID_COLUMN and key != NUC_SEQUENCE_COLUMN and key in o_seq and o_seq[key] != i_seq[key] \
|
||||
and key not in merged_dict :
|
||||
o_seq[key] = None
|
||||
|
||||
total_treated += 1
|
||||
|
||||
# Write merged dicts
|
||||
for mkey in merged_dict:
|
||||
if mkey in str_merged_cols:
|
||||
mkey_cols[mkey][o_idx] = str(merged_dict[mkey])
|
||||
else:
|
||||
mkey_cols[mkey][o_idx] = merged_dict[mkey]
|
||||
# Sets NA values to 0 # TODO discuss, for now keep as None and test for None instead of testing for 0 in tools
|
||||
#for key in mkey_cols[mkey][o_idx]:
|
||||
# if mkey_cols[mkey][o_idx][key] is None:
|
||||
# mkey_cols[mkey][o_idx][key] = 0
|
||||
|
||||
# Write taxid_dist
|
||||
if mergeIds and TAXID_COLUMN in mergedKeys:
|
||||
if TAXID_DIST_COLUMN in str_merged_cols:
|
||||
o_taxid_dist_col[o_idx] = str(taxid_dist_dict)
|
||||
else:
|
||||
o_taxid_dist_col[o_idx] = taxid_dist_dict
|
||||
|
||||
o_count_col[o_idx] = o_count
|
||||
o_idx += 1
|
||||
|
||||
if pb is not None:
|
||||
pb(len(view), force=True)
|
||||
|
||||
# Deletes quality columns if there is one because the matching between sequence and quality will be broken (quality set to NA when sequence not)
|
||||
if QUALITY_COLUMN in view:
|
||||
o_view.delete_column(QUALITY_COLUMN)
|
||||
if REVERSE_QUALITY_COLUMN in view:
|
||||
o_view.delete_column(REVERSE_QUALITY_COLUMN)
|
||||
|
||||
# Delete old columns that are now merged
|
||||
for k in range(k_count):
|
||||
if mergedKeys[k] in o_view:
|
||||
o_view.delete_column(mergedKeys[k])
|
||||
|
||||
if taxonomy is not None:
|
||||
print("") # TODO because in the middle of progress bar. Better solution?
|
||||
logger("info", "Merging taxonomy classification")
|
||||
merge_taxonomy_classification(o_view, taxonomy)
|
||||
merge_taxonomy_classification(o_view, taxonomy, config)
|
||||
|
||||
|
||||
|
||||
@ -532,8 +601,23 @@ def run(config):
|
||||
if output is None:
|
||||
raise Exception("Could not create output view")
|
||||
|
||||
i_dms = input[0]
|
||||
entries = input[1]
|
||||
o_view = output[1]
|
||||
o_dms = output[0]
|
||||
output_0 = output[0]
|
||||
|
||||
# If stdout output create a temporary view that will be exported and deleted.
|
||||
if type(output_0)==BufferedWriter:
|
||||
temporary_view_name = b"temp"
|
||||
i=0
|
||||
while temporary_view_name in i_dms: # Making sure view name is unique in input DMS
|
||||
temporary_view_name = temporary_view_name+b"_"+str2bytes(str(i))
|
||||
i+=1
|
||||
o_view_name = temporary_view_name
|
||||
o_dms = i_dms
|
||||
o_view = View_NUC_SEQS.new(i_dms, o_view_name)
|
||||
else:
|
||||
o_view = output[1]
|
||||
|
||||
if 'taxoURI' in config['obi'] and config['obi']['taxoURI'] is not None:
|
||||
taxo_uri = open_uri(config['obi']['taxoURI'])
|
||||
@ -544,15 +628,19 @@ def run(config):
|
||||
taxo = None
|
||||
|
||||
# Initialize the progress bar
|
||||
pb = ProgressBar(len(entries), config, seconde=5)
|
||||
if config['obi']['noprogressbar'] == False:
|
||||
pb = ProgressBar(len(entries), config)
|
||||
else:
|
||||
pb = None
|
||||
|
||||
try:
|
||||
uniq_sequences(entries, o_view, pb, mergedKeys_list=config['uniq']['merge'], taxonomy=taxo, mergeIds=config['uniq']['mergeids'], categories=config['uniq']['categories'], max_elts=config['obi']['maxelts'])
|
||||
except Exception, e:
|
||||
raise RollbackException("obi uniq error, rollbacking view: "+str(e), o_view)
|
||||
if len(entries) > 0:
|
||||
try:
|
||||
uniq_sequences(entries, o_view, pb, config, mergedKeys_list=config['uniq']['merge'], taxonomy=taxo, mergeIds=config['uniq']['mergeids'], categories=config['uniq']['categories'], max_elts=config['obi']['maxelts'])
|
||||
except Exception, e:
|
||||
raise RollbackException("obi uniq error, rollbacking view: "+str(e), o_view)
|
||||
|
||||
pb(len(entries), force=True)
|
||||
print("", file=sys.stderr)
|
||||
if pb is not None:
|
||||
print("", file=sys.stderr)
|
||||
|
||||
# Save command config in View and DMS comments
|
||||
command_line = " ".join(sys.argv[1:])
|
||||
@ -562,13 +650,23 @@ def run(config):
|
||||
input_dms_name.append(config['obi']['taxoURI'].split("/")[-3])
|
||||
input_view_name.append("taxonomy/"+config['obi']['taxoURI'].split("/")[-1])
|
||||
o_view.write_config(config, "uniq", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||
output[0].record_command_line(command_line)
|
||||
o_dms.record_command_line(command_line)
|
||||
|
||||
# stdout output: write to buffer
|
||||
if type(output_0)==BufferedWriter:
|
||||
logger("info", "Printing to output...")
|
||||
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||
o_view.close()
|
||||
|
||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||
#print(repr(o_view), file=sys.stderr)
|
||||
|
||||
input[0].close()
|
||||
output[0].close()
|
||||
|
||||
# If stdout output, delete the temporary result view in the input DMS
|
||||
if type(output_0)==BufferedWriter:
|
||||
View.delete_view(i_dms, o_view_name)
|
||||
|
||||
i_dms.close(force=True)
|
||||
o_dms.close(force=True)
|
||||
|
||||
logger("info", "Done.")
|
||||
|
||||
|
@ -34,6 +34,7 @@ cdef extern from "obidms.h" nogil:
|
||||
int obi_close_dms(OBIDMS_p dms, bint force)
|
||||
char* obi_dms_get_dms_path(OBIDMS_p dms)
|
||||
char* obi_dms_get_full_path(OBIDMS_p dms, const_char_p path_name)
|
||||
char* obi_dms_formatted_infos(OBIDMS_p dms, bint detailed)
|
||||
void obi_close_atexit()
|
||||
|
||||
obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number)
|
||||
|
@ -31,6 +31,7 @@ cdef extern from "obidmscolumn.h" nogil:
|
||||
const_char_p elements_names
|
||||
OBIType_t returned_data_type
|
||||
OBIType_t stored_data_type
|
||||
bint dict_column
|
||||
bint tuples
|
||||
bint to_eval
|
||||
time_t creation_date
|
||||
@ -68,3 +69,6 @@ cdef extern from "obidmscolumn.h" nogil:
|
||||
int obi_column_write_comments(OBIDMS_column_p column, const char* comments)
|
||||
|
||||
int obi_column_add_comment(OBIDMS_column_p column, const char* key, const char* value)
|
||||
|
||||
char* obi_column_formatted_infos(OBIDMS_column_p column, bint detailed)
|
||||
|
@ -11,4 +11,5 @@ cdef extern from "obi_ecotag.h" nogil:
|
||||
const char* taxonomy_name,
|
||||
const char* output_view_name,
|
||||
const char* output_view_comments,
|
||||
double ecotag_threshold)
|
||||
double ecotag_threshold,
|
||||
double bubble_threshold)
|
||||
|
@ -7,6 +7,8 @@ from libc.stdint cimport int32_t
|
||||
|
||||
cdef extern from "obidms_taxonomy.h" nogil:
|
||||
|
||||
extern int MIN_LOCAL_TAXID
|
||||
|
||||
struct ecotxnode :
|
||||
int32_t taxid
|
||||
int32_t rank
|
||||
@ -18,6 +20,13 @@ cdef extern from "obidms_taxonomy.h" nogil:
|
||||
ctypedef ecotxnode ecotx_t
|
||||
|
||||
|
||||
struct econame_t : # can't get this struct to be accepted by Cython ('unknown size')
|
||||
char* name
|
||||
char* class_name
|
||||
int32_t is_scientific_name
|
||||
ecotxnode* taxon
|
||||
|
||||
|
||||
struct ecotxidx_t :
|
||||
int32_t count
|
||||
int32_t max_taxid
|
||||
@ -30,9 +39,14 @@ cdef extern from "obidms_taxonomy.h" nogil:
|
||||
char** label
|
||||
|
||||
|
||||
struct econameidx_t :
|
||||
int32_t count
|
||||
econame_t* names
|
||||
|
||||
|
||||
struct OBIDMS_taxonomy_t :
|
||||
ecorankidx_t* ranks
|
||||
# econameidx_t* names
|
||||
econameidx_t* names
|
||||
ecotxidx_t* taxa
|
||||
|
||||
ctypedef OBIDMS_taxonomy_t* OBIDMS_taxonomy_p
|
||||
@ -51,7 +65,11 @@ cdef extern from "obidms_taxonomy.h" nogil:
|
||||
ecotx_t* obi_taxo_get_parent_at_rank(ecotx_t* taxon, int32_t rankidx)
|
||||
|
||||
ecotx_t* obi_taxo_get_taxon_with_taxid(OBIDMS_taxonomy_p taxonomy, int32_t taxid)
|
||||
|
||||
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
|
||||
|
||||
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
|
||||
|
||||
bint obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid)
|
||||
|
||||
ecotx_t* obi_taxo_get_species(ecotx_t* taxon, OBIDMS_taxonomy_p taxonomy)
|
||||
@ -71,4 +89,4 @@ cdef extern from "obidms_taxonomy.h" nogil:
|
||||
int obi_taxo_add_preferred_name_with_taxon(OBIDMS_taxonomy_p tax, ecotx_t* taxon, const char* preferred_name)
|
||||
|
||||
const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks)
|
||||
|
||||
|
||||
|
@ -27,6 +27,7 @@ cdef extern from "obiview.h" nogil:
|
||||
extern const_char_p REVERSE_QUALITY_COLUMN
|
||||
extern const_char_p REVERSE_SEQUENCE_COLUMN
|
||||
extern const_char_p COUNT_COLUMN
|
||||
extern const_char_p SCIENTIFIC_NAME_COLUMN
|
||||
extern const_char_p TAXID_COLUMN
|
||||
extern const_char_p MERGED_TAXID_COLUMN
|
||||
extern const_char_p MERGED_PREFIX
|
||||
@ -94,6 +95,7 @@ cdef extern from "obiview.h" nogil:
|
||||
index_t nb_elements_per_line,
|
||||
char* elements_names,
|
||||
bint elt_names_formatted,
|
||||
bint dict_column,
|
||||
bint tuples,
|
||||
bint to_eval,
|
||||
const_char_p indexer_name,
|
||||
@ -103,13 +105,17 @@ cdef extern from "obiview.h" nogil:
|
||||
bint create)
|
||||
|
||||
int obi_view_delete_column(Obiview_p view, const_char_p column_name, bint delete_file)
|
||||
|
||||
|
||||
OBIDMS_column_p obi_view_get_column(Obiview_p view, const_char_p column_name)
|
||||
|
||||
OBIDMS_column_p* obi_view_get_pointer_on_column_in_view(Obiview_p view, const_char_p column_name)
|
||||
|
||||
int obi_view_create_column_alias(Obiview_p view, const_char_p current_name, const_char_p alias)
|
||||
|
||||
|
||||
char* obi_view_formatted_infos(Obiview_p view, bint detailed)
|
||||
|
||||
char* obi_view_formatted_infos_one_line(Obiview_p view)
|
||||
|
||||
int obi_view_write_comments(Obiview_p view, const_char_p comments)
|
||||
|
||||
int obi_view_add_comment(Obiview_p view, const_char_p key, const_char_p value)
|
||||
|
@ -22,6 +22,7 @@ cdef class Column(OBIWrapper) :
|
||||
|
||||
cdef inline OBIDMS_column_p pointer(self)
|
||||
cdef read_elements_names(self)
|
||||
cpdef list keys(self)
|
||||
|
||||
@staticmethod
|
||||
cdef type get_column_class(obitype_t obitype, bint multi_elts, bint tuples)
|
||||
|
@ -14,6 +14,7 @@ from ..capi.obidms cimport obi_import_column
|
||||
from ..capi.obidmscolumn cimport OBIDMS_column_header_p, \
|
||||
obi_close_column, \
|
||||
obi_get_elements_names, \
|
||||
obi_column_formatted_infos, \
|
||||
obi_column_write_comments
|
||||
|
||||
from ..capi.obiutils cimport obi_format_date
|
||||
@ -38,8 +39,9 @@ from obitools3.utils cimport tobytes, \
|
||||
|
||||
from obitools3.dms.column import typed_column
|
||||
|
||||
from libc.stdlib cimport free
|
||||
|
||||
from libc.stdlib cimport free
|
||||
from libc.string cimport strcpy
|
||||
|
||||
import importlib
|
||||
import inspect
|
||||
import pkgutil
|
||||
@ -88,6 +90,7 @@ cdef class Column(OBIWrapper) :
|
||||
obitype_t data_type,
|
||||
index_t nb_elements_per_line=1,
|
||||
list elements_names=None,
|
||||
bint dict_column=False,
|
||||
bint tuples=False,
|
||||
bint to_eval=False,
|
||||
object associated_column_name=b"",
|
||||
@ -96,6 +99,7 @@ cdef class Column(OBIWrapper) :
|
||||
object alias=b""):
|
||||
# TODO indexer_name?
|
||||
|
||||
cdef Column column
|
||||
cdef bytes column_name_b = tobytes(column_name)
|
||||
cdef bytes alias_b = tobytes(alias)
|
||||
cdef bytes comments_b = str2bytes(json.dumps(bytes2str_object(comments)))
|
||||
@ -131,13 +135,14 @@ cdef class Column(OBIWrapper) :
|
||||
raise RuntimeError("Cannot create column %s in view %s: trying to create quality column but no NUC_SEQ column to associate it with in the view" % (bytes2str(column_name_b),
|
||||
bytes2str(view.name)))
|
||||
associated_column_name_b = NUC_SEQUENCE_COLUMN
|
||||
associated_column_version = view[NUC_SEQUENCE_COLUMN].version
|
||||
associated_column_version = view[NUC_SEQUENCE_COLUMN].version
|
||||
elif column_name == REVERSE_QUALITY_COLUMN:
|
||||
if REVERSE_SEQUENCE_COLUMN not in view:
|
||||
raise RuntimeError("Cannot create column %s in view %s: trying to create reverse quality column but no REVERSE_SEQUENCE column to associate it with in the view" % (bytes2str(column_name_b),
|
||||
bytes2str(view.name)))
|
||||
associated_column_name_b = REVERSE_SEQUENCE_COLUMN
|
||||
associated_column_version = view[REVERSE_SEQUENCE_COLUMN].version
|
||||
|
||||
|
||||
if (obi_view_add_column(view = view.pointer(),
|
||||
column_name = column_name_b,
|
||||
@ -148,6 +153,7 @@ cdef class Column(OBIWrapper) :
|
||||
nb_elements_per_line = nb_elements_per_line,
|
||||
elements_names = elements_names_p,
|
||||
elt_names_formatted = False,
|
||||
dict_column = dict_column,
|
||||
tuples = tuples,
|
||||
to_eval = to_eval,
|
||||
indexer_name = NULL,
|
||||
@ -157,8 +163,19 @@ cdef class Column(OBIWrapper) :
|
||||
create = True)<0):
|
||||
raise RuntimeError("Cannot create column %s in view %s" % (bytes2str(column_name_b),
|
||||
bytes2str(view.name)))
|
||||
|
||||
return Column.open(view, alias_b)
|
||||
|
||||
column = Column.open(view, alias_b)
|
||||
|
||||
# Automatically associate nuc sequence column to quality column if necessary
|
||||
if data_type == OBI_QUAL:
|
||||
if column_name == QUALITY_COLUMN:
|
||||
view[NUC_SEQUENCE_COLUMN].associated_column_name = column.name
|
||||
view[NUC_SEQUENCE_COLUMN].associated_column_version = column.version
|
||||
elif column_name == REVERSE_QUALITY_COLUMN:
|
||||
view[REVERSE_SEQUENCE_COLUMN].associated_column_name = column.name
|
||||
view[REVERSE_SEQUENCE_COLUMN].associated_column_version = column.version
|
||||
|
||||
return column
|
||||
|
||||
|
||||
@staticmethod
|
||||
@ -185,7 +202,7 @@ cdef class Column(OBIWrapper) :
|
||||
|
||||
column_p = column_pp[0]
|
||||
column_type = column_p.header.returned_data_type
|
||||
column_class = Column.get_column_class(column_type, (column_p.header.nb_elements_per_line > 1), column_p.header.tuples)
|
||||
column_class = Column.get_column_class(column_type, (column_p.header.nb_elements_per_line > 1 or column_p.header.dict_column == True), column_p.header.tuples)
|
||||
column = OBIWrapper.new_wrapper(column_class, column_pp)
|
||||
|
||||
column._view = view
|
||||
@ -221,6 +238,7 @@ cdef class Column(OBIWrapper) :
|
||||
nb_elements_per_line = -1,
|
||||
elements_names = NULL,
|
||||
elt_names_formatted = False,
|
||||
dict_column = False,
|
||||
tuples = False,
|
||||
to_eval = False,
|
||||
indexer_name = NULL,
|
||||
@ -287,10 +305,25 @@ cdef class Column(OBIWrapper) :
|
||||
|
||||
@OBIWrapper.checkIsActive
|
||||
def __repr__(self) :
|
||||
cdef bytes s
|
||||
s = self._alias + b", data type: " + self.data_type
|
||||
return bytes2str(s)
|
||||
cdef str s
|
||||
cdef char* sc
|
||||
cdef OBIDMS_column_p pointer = self.pointer()
|
||||
sc = obi_column_formatted_infos(pointer, False)
|
||||
s = bytes2str(sc)
|
||||
free(sc)
|
||||
return s
|
||||
|
||||
|
||||
@OBIWrapper.checkIsActive
|
||||
def repr_longformat(self) :
|
||||
cdef str s
|
||||
cdef char* sc
|
||||
cdef OBIDMS_column_p pointer = self.pointer()
|
||||
sc = obi_column_formatted_infos(pointer, True)
|
||||
s = bytes2str(sc)
|
||||
free(sc)
|
||||
return s
|
||||
|
||||
|
||||
def close(self): # TODO discuss, can't be called bc then bug when closing view that tries to close it in C
|
||||
|
||||
@ -316,7 +349,10 @@ cdef class Column(OBIWrapper) :
|
||||
free(elts_names_b)
|
||||
return elts_names_list
|
||||
|
||||
|
||||
cpdef list keys(self):
|
||||
return self._elements_names
|
||||
|
||||
|
||||
# Column alias property getter and setter
|
||||
@property
|
||||
def name(self):
|
||||
@ -333,7 +369,7 @@ cdef class Column(OBIWrapper) :
|
||||
@property
|
||||
def elements_names(self):
|
||||
return self._elements_names
|
||||
|
||||
|
||||
# nb_elements_per_line property getter
|
||||
@property
|
||||
def nb_elements_per_line(self):
|
||||
@ -341,6 +377,13 @@ cdef class Column(OBIWrapper) :
|
||||
raise OBIDeactivatedInstanceError()
|
||||
return self.pointer().header.nb_elements_per_line
|
||||
|
||||
# dict_column property getter
|
||||
@property
|
||||
def dict_column(self):
|
||||
if not self.active() :
|
||||
raise OBIDeactivatedInstanceError()
|
||||
return self.pointer().header.dict_column
|
||||
|
||||
# data_type property getter
|
||||
@property
|
||||
def data_type(self):
|
||||
@ -397,6 +440,31 @@ cdef class Column(OBIWrapper) :
|
||||
raise OBIDeactivatedInstanceError()
|
||||
return obi_format_date(self.pointer().header.creation_date)
|
||||
|
||||
|
||||
# associated_column name property getter and setter
|
||||
@property
|
||||
def associated_column_name(self):
|
||||
if not self.active() :
|
||||
raise OBIDeactivatedInstanceError()
|
||||
return self.pointer().header.associated_column.column_name
|
||||
|
||||
@associated_column_name.setter
|
||||
def associated_column_name(self, object new_name):
|
||||
strcpy(self.pointer().header.associated_column.column_name, tobytes(new_name))
|
||||
|
||||
|
||||
# associated_column version property getter and setter
|
||||
@property
|
||||
def associated_column_version(self):
|
||||
if not self.active() :
|
||||
raise OBIDeactivatedInstanceError()
|
||||
return self.pointer().header.associated_column.version
|
||||
|
||||
@associated_column_version.setter
|
||||
def associated_column_version(self, int new_version):
|
||||
self.pointer().header.associated_column.version = new_version
|
||||
|
||||
|
||||
# comments property getter
|
||||
@property
|
||||
def comments(self):
|
||||
|
@ -38,11 +38,13 @@ cdef class Column_bool(Column):
|
||||
object column_name,
|
||||
index_t nb_elements_per_line=1,
|
||||
object elements_names=None,
|
||||
bint dict_column=False,
|
||||
bint tuples=False,
|
||||
object comments={}):
|
||||
return Column.new_column(view, column_name, OBI_BOOL,
|
||||
nb_elements_per_line=nb_elements_per_line,
|
||||
elements_names=elements_names,
|
||||
elements_names=elements_names,
|
||||
dict_column=dict_column,
|
||||
tuples=tuples,
|
||||
comments=comments)
|
||||
|
||||
|
@ -36,12 +36,14 @@ cdef class Column_char(Column):
|
||||
object column_name,
|
||||
index_t nb_elements_per_line=1,
|
||||
object elements_names=None,
|
||||
bint dict_column=False,
|
||||
bint tuples=False,
|
||||
object comments={}):
|
||||
|
||||
return Column.new_column(view, column_name, OBI_CHAR,
|
||||
nb_elements_per_line=nb_elements_per_line,
|
||||
elements_names=elements_names,
|
||||
dict_column=dict_column,
|
||||
tuples=tuples,
|
||||
comments=comments)
|
||||
|
||||
|
@ -36,12 +36,14 @@ cdef class Column_float(Column):
|
||||
object column_name,
|
||||
index_t nb_elements_per_line=1,
|
||||
object elements_names=None,
|
||||
bint dict_column=False,
|
||||
bint tuples=False,
|
||||
object comments={}):
|
||||
|
||||
return Column.new_column(view, column_name, OBI_FLOAT,
|
||||
nb_elements_per_line=nb_elements_per_line,
|
||||
elements_names=elements_names,
|
||||
dict_column=dict_column,
|
||||
tuples=tuples,
|
||||
comments=comments)
|
||||
|
||||
|
@ -38,12 +38,14 @@ cdef class Column_int(Column):
|
||||
object column_name,
|
||||
index_t nb_elements_per_line=1,
|
||||
object elements_names=None,
|
||||
bint dict_column=False,
|
||||
bint tuples=False,
|
||||
object comments={}):
|
||||
|
||||
return Column.new_column(view, column_name, OBI_INT,
|
||||
nb_elements_per_line=nb_elements_per_line,
|
||||
elements_names=elements_names,
|
||||
dict_column=dict_column,
|
||||
tuples=tuples,
|
||||
comments=comments)
|
||||
|
||||
|
@ -38,6 +38,7 @@ cdef class Column_qual(Column_idx):
|
||||
object column_name,
|
||||
index_t nb_elements_per_line=1,
|
||||
object elements_names=None,
|
||||
bint dict_column=False,
|
||||
object associated_column_name=b"",
|
||||
int associated_column_version=-1,
|
||||
object comments={}):
|
||||
@ -45,6 +46,7 @@ cdef class Column_qual(Column_idx):
|
||||
return Column.new_column(view, column_name, OBI_QUAL,
|
||||
nb_elements_per_line=nb_elements_per_line,
|
||||
elements_names=elements_names,
|
||||
dict_column=dict_column,
|
||||
tuples=False,
|
||||
associated_column_name=associated_column_name,
|
||||
associated_column_version=associated_column_name,
|
||||
|
@ -39,12 +39,14 @@ cdef class Column_seq(Column_idx):
|
||||
object column_name,
|
||||
index_t nb_elements_per_line=1,
|
||||
object elements_names=None,
|
||||
bint dict_column=False,
|
||||
bint tuples=False,
|
||||
object comments={}):
|
||||
|
||||
return Column.new_column(view, column_name, OBI_SEQ,
|
||||
nb_elements_per_line=nb_elements_per_line,
|
||||
elements_names=elements_names,
|
||||
dict_column=dict_column,
|
||||
tuples=tuples,
|
||||
comments=comments)
|
||||
|
||||
|
@ -38,12 +38,14 @@ cdef class Column_str(Column_idx):
|
||||
object column_name,
|
||||
index_t nb_elements_per_line=1,
|
||||
object elements_names=None,
|
||||
bint dict_column=False,
|
||||
bint tuples=False,
|
||||
object comments={}):
|
||||
|
||||
return Column.new_column(view, column_name, OBI_STR,
|
||||
nb_elements_per_line=nb_elements_per_line,
|
||||
elements_names=elements_names,
|
||||
dict_column=dict_column,
|
||||
tuples=tuples,
|
||||
comments=comments)
|
||||
|
||||
|
@ -10,7 +10,8 @@ from .capi.obidms cimport obi_open_dms, \
|
||||
obi_dms_exists, \
|
||||
obi_dms_get_full_path, \
|
||||
obi_close_atexit, \
|
||||
obi_dms_write_comments
|
||||
obi_dms_write_comments, \
|
||||
obi_dms_formatted_infos
|
||||
|
||||
from .capi.obitypes cimport const_char_p
|
||||
|
||||
@ -32,6 +33,8 @@ from .object import OBIWrapper
|
||||
import json
|
||||
import time
|
||||
|
||||
from libc.stdlib cimport free
|
||||
|
||||
|
||||
cdef class DMS(OBIWrapper):
|
||||
|
||||
@ -223,11 +226,24 @@ cdef class DMS(OBIWrapper):
|
||||
|
||||
|
||||
@OBIWrapper.checkIsActive
|
||||
def __repr__(self):
|
||||
cdef str s
|
||||
s=""
|
||||
for view_name in self.keys():
|
||||
s = s + repr(self.get_view(view_name)) + "\n"
|
||||
def __repr__(self) :
|
||||
cdef str s
|
||||
cdef char* sc
|
||||
cdef OBIDMS_p pointer = self.pointer()
|
||||
sc = obi_dms_formatted_infos(pointer, False)
|
||||
s = bytes2str(sc)
|
||||
free(sc)
|
||||
return s
|
||||
|
||||
|
||||
@OBIWrapper.checkIsActive
|
||||
def repr_longformat(self) :
|
||||
cdef str s
|
||||
cdef char* sc
|
||||
cdef OBIDMS_p pointer = self.pointer()
|
||||
sc = obi_dms_formatted_infos(pointer, True)
|
||||
s = bytes2str(sc)
|
||||
free(sc)
|
||||
return s
|
||||
|
||||
|
||||
|
@ -39,4 +39,6 @@ cdef class Nuc_Seq_Stored(Seq_Stored) :
|
||||
cpdef set_quality_char(self, object new_qual, int offset=*)
|
||||
cpdef object build_quality_array(self, list quality)
|
||||
cpdef bytes build_reverse_complement(self)
|
||||
cpdef str get_str(self)
|
||||
cpdef str get_str(self)
|
||||
cpdef repr_bytes(self)
|
||||
|
@ -431,9 +431,12 @@ cdef class Nuc_Seq_Stored(Seq_Stored) :
|
||||
return len(self._view.get_column(NUC_SEQUENCE_COLUMN).get_line(self.index))
|
||||
|
||||
def __repr__(self):
|
||||
return bytes2str(self.repr_bytes())
|
||||
|
||||
cpdef repr_bytes(self):
|
||||
if self.quality is None:
|
||||
formatter = FastaFormat()
|
||||
else:
|
||||
formatter = FastqFormat()
|
||||
return bytes2str(formatter(self))
|
||||
return formatter(self)
|
||||
|
||||
|
@ -11,11 +11,14 @@ cdef class Taxonomy(OBIWrapper) :
|
||||
cdef bytes _name
|
||||
cdef DMS _dms
|
||||
cdef list _ranks
|
||||
cdef dict _name_dict
|
||||
|
||||
cdef inline OBIDMS_taxonomy_p pointer(self)
|
||||
cdef fill_name_dict(self)
|
||||
|
||||
cpdef Taxon get_taxon_by_idx(self, int idx)
|
||||
cpdef Taxon get_taxon_by_taxid(self, int taxid)
|
||||
cpdef Taxon get_taxon_by_name(self, object taxon_name, object restricting_taxid=*)
|
||||
cpdef write(self, object prefix)
|
||||
cpdef int add_taxon(self, str name, str rank_name, int parent_taxid, int min_taxid=*)
|
||||
cpdef object get_species(self, int taxid)
|
||||
|
@ -15,7 +15,11 @@ from ..capi.obitaxonomy cimport obi_taxonomy_exists, \
|
||||
obi_taxo_get_species, \
|
||||
obi_taxo_get_genus, \
|
||||
obi_taxo_get_family, \
|
||||
ecotx_t
|
||||
ecotx_t, \
|
||||
econame_t, \
|
||||
obi_taxo_get_name_from_name_idx, \
|
||||
obi_taxo_get_taxon_from_name_idx
|
||||
|
||||
|
||||
from cpython.pycapsule cimport PyCapsule_New, PyCapsule_GetPointer
|
||||
import tarfile
|
||||
@ -24,11 +28,29 @@ from libc.stdlib cimport free
|
||||
|
||||
|
||||
cdef class Taxonomy(OBIWrapper) :
|
||||
# TODO function to import taxonomy?
|
||||
|
||||
# TODO function to import taxonomy?
|
||||
|
||||
cdef inline OBIDMS_taxonomy_p pointer(self) :
|
||||
return <OBIDMS_taxonomy_p>(self._pointer)
|
||||
|
||||
cdef fill_name_dict(self):
|
||||
print("Indexing taxon names...")
|
||||
|
||||
cdef OBIDMS_taxonomy_p pointer = self.pointer()
|
||||
cdef ecotx_t* taxon_p
|
||||
cdef object taxon_capsule
|
||||
cdef bytes name
|
||||
cdef int count
|
||||
cdef int n
|
||||
|
||||
count = (<OBIDMS_taxonomy_p>pointer).names.count
|
||||
|
||||
for n in range(count) :
|
||||
name = obi_taxo_get_name_from_name_idx(pointer, n)
|
||||
taxon_p = obi_taxo_get_taxon_from_name_idx(pointer, n)
|
||||
taxon_capsule = PyCapsule_New(taxon_p, NULL, NULL)
|
||||
self._name_dict[name] = Taxon(taxon_capsule, self)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def exists(DMS dms, object name) :
|
||||
@ -75,7 +97,8 @@ cdef class Taxonomy(OBIWrapper) :
|
||||
|
||||
taxo._dms = dms
|
||||
taxo._name = tobytes(name)
|
||||
|
||||
taxo._name_dict = {}
|
||||
taxo.fill_name_dict()
|
||||
taxo._ranks = []
|
||||
for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) :
|
||||
taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks))
|
||||
@ -118,7 +141,8 @@ cdef class Taxonomy(OBIWrapper) :
|
||||
|
||||
taxo._dms = dms
|
||||
taxo._name = folder_path
|
||||
|
||||
taxo._name_dict = {}
|
||||
taxo.fill_name_dict()
|
||||
taxo._ranks = []
|
||||
for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) :
|
||||
taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks))
|
||||
@ -129,8 +153,8 @@ cdef class Taxonomy(OBIWrapper) :
|
||||
def __getitem__(self, object ref):
|
||||
if type(ref) == int :
|
||||
return self.get_taxon_by_taxid(ref)
|
||||
else :
|
||||
raise NotImplementedError()
|
||||
elif type(ref) == str or type(ref) == bytes :
|
||||
return self.get_taxon_by_name(ref)
|
||||
|
||||
|
||||
cpdef Taxon get_taxon_by_taxid(self, int taxid):
|
||||
@ -143,6 +167,19 @@ cdef class Taxonomy(OBIWrapper) :
|
||||
return Taxon(taxon_capsule, self)
|
||||
|
||||
|
||||
cpdef Taxon get_taxon_by_name(self, object taxon_name, object restricting_taxid=None):
|
||||
taxon = self._name_dict.get(tobytes(taxon_name), None)
|
||||
if not taxon:
|
||||
return None
|
||||
elif restricting_taxid:
|
||||
if self.is_ancestor(restricting_taxid, taxon.taxid):
|
||||
return taxon
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return taxon
|
||||
|
||||
|
||||
cpdef Taxon get_taxon_by_idx(self, int idx):
|
||||
cdef ecotx_t* taxa
|
||||
cdef ecotx_t* taxon_p
|
||||
@ -232,7 +269,7 @@ cdef class Taxonomy(OBIWrapper) :
|
||||
|
||||
taxa = self.pointer().taxa.taxon
|
||||
|
||||
# Yield each taxid
|
||||
# Yield each taxon
|
||||
for t in range(self.pointer().taxa.count):
|
||||
taxon_p = <ecotx_t*> (taxa+t)
|
||||
taxon_capsule = PyCapsule_New(taxon_p, NULL, NULL)
|
||||
|
@ -20,6 +20,10 @@ cdef class View(OBIWrapper):
|
||||
cdef DMS _dms
|
||||
|
||||
cdef inline Obiview_p pointer(self)
|
||||
|
||||
cpdef print_to_output(self,
|
||||
object output,
|
||||
bint noprogressbar=*)
|
||||
|
||||
cpdef delete_column(self,
|
||||
object column_name,
|
||||
@ -61,6 +65,8 @@ cdef class Line :
|
||||
cdef index_t _index
|
||||
cdef View _view
|
||||
|
||||
cpdef repr_bytes(self)
|
||||
|
||||
|
||||
cdef register_view_class(bytes view_type_name,
|
||||
type view_class)
|
||||
|
@ -6,6 +6,9 @@ cdef dict __VIEW_CLASS__= {}
|
||||
|
||||
from libc.stdlib cimport malloc
|
||||
|
||||
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||
from obitools3.version import version
|
||||
|
||||
from ..capi.obiview cimport Alias_column_pair_p, \
|
||||
obi_new_view, \
|
||||
obi_open_view, \
|
||||
@ -16,7 +19,9 @@ from ..capi.obiview cimport Alias_column_pair_p, \
|
||||
obi_view_delete_column, \
|
||||
obi_view_create_column_alias, \
|
||||
obi_view_write_comments, \
|
||||
obi_delete_view
|
||||
obi_delete_view, \
|
||||
obi_view_formatted_infos, \
|
||||
obi_view_formatted_infos_one_line
|
||||
|
||||
from ..capi.obidmscolumn cimport OBIDMS_column_p
|
||||
from ..capi.obidms cimport OBIDMS_p
|
||||
@ -48,10 +53,15 @@ from ..capi.obidms cimport obi_import_view
|
||||
|
||||
from obitools3.format.tab import TabFormat
|
||||
|
||||
from cpython.exc cimport PyErr_CheckSignals
|
||||
|
||||
import importlib
|
||||
import inspect
|
||||
import pkgutil
|
||||
import json
|
||||
import sys
|
||||
|
||||
from libc.stdlib cimport free
|
||||
|
||||
|
||||
cdef class View(OBIWrapper) :
|
||||
@ -178,13 +188,50 @@ cdef class View(OBIWrapper) :
|
||||
|
||||
|
||||
@OBIWrapper.checkIsActive
|
||||
def __repr__(self) :
|
||||
cdef str s = "#View name:\n{name:s}\n#Line count:\n{line_count:d}\n#Columns:\n".format(name = bytes2str(self.name),
|
||||
line_count = self.line_count)
|
||||
for column_name in self.keys() :
|
||||
s = s + repr(self[column_name]) + '\n'
|
||||
def __repr__(self) :
|
||||
cdef str s
|
||||
cdef char* sc
|
||||
cdef Obiview_p pointer = self.pointer()
|
||||
sc = obi_view_formatted_infos(pointer, False)
|
||||
s = bytes2str(sc)
|
||||
free(sc)
|
||||
return s
|
||||
|
||||
|
||||
|
||||
@OBIWrapper.checkIsActive
|
||||
def repr_longformat(self) :
|
||||
cdef str s
|
||||
cdef char* sc
|
||||
cdef Obiview_p pointer = self.pointer()
|
||||
sc = obi_view_formatted_infos(pointer, True)
|
||||
s = bytes2str(sc)
|
||||
free(sc)
|
||||
return s
|
||||
|
||||
|
||||
cpdef print_to_output(self, object output, bint noprogressbar=False):
|
||||
|
||||
cdef int i
|
||||
cdef Line entry
|
||||
|
||||
self.checkIsActive(self)
|
||||
|
||||
# Initialize the progress bar
|
||||
if noprogressbar == False:
|
||||
pb = ProgressBar(len(self))
|
||||
else:
|
||||
pb = None
|
||||
i=0
|
||||
for entry in self:
|
||||
PyErr_CheckSignals()
|
||||
if pb is not None:
|
||||
pb(i)
|
||||
output.write(entry.repr_bytes()+b"\n")
|
||||
i+=1
|
||||
if pb is not None:
|
||||
pb(len(self), force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
|
||||
def keys(self):
|
||||
|
||||
@ -296,7 +343,7 @@ cdef class View(OBIWrapper) :
|
||||
|
||||
new_column = Column.new_column(self, old_column.pointer().header.name, new_data_type,
|
||||
nb_elements_per_line=new_nb_elements_per_line, elements_names=new_elements_names,
|
||||
comments=old_column.comments, alias=column_name_b+tobytes('___new___'))
|
||||
dict_column=(new_nb_elements_per_line>1), comments=old_column.comments, alias=column_name_b+tobytes('___new___'))
|
||||
|
||||
switch_to_dict = old_column.nb_elements_per_line == 1 and new_nb_elements_per_line > 1
|
||||
ori_key = old_column._elements_names[0]
|
||||
@ -357,6 +404,7 @@ cdef class View(OBIWrapper) :
|
||||
col.data_type_int,
|
||||
nb_elements_per_line = col.nb_elements_per_line,
|
||||
elements_names = col._elements_names,
|
||||
dict_column = col.dict_column,
|
||||
tuples = col.tuples,
|
||||
to_eval = col.to_eval,
|
||||
comments = col.comments,
|
||||
@ -405,6 +453,7 @@ cdef class View(OBIWrapper) :
|
||||
for i in range(len(input_view_name)):
|
||||
input_str.append(tostr(input_dms_name[i])+"/"+tostr(input_view_name[i]))
|
||||
comments["input_str"] = input_str
|
||||
comments["version"] = version
|
||||
return bytes2str_object(comments)
|
||||
|
||||
|
||||
@ -531,6 +580,7 @@ cdef class View(OBIWrapper) :
|
||||
for level in self.view_history:
|
||||
command_list = [level[input][b"command_line"] for input in level.keys()]
|
||||
for command in command_list:
|
||||
s+=b"obi "
|
||||
s+=command
|
||||
s+=b"\n"
|
||||
return s
|
||||
@ -756,8 +806,12 @@ cdef class Line :
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return bytes2str(self.repr_bytes())
|
||||
|
||||
|
||||
cpdef repr_bytes(self):
|
||||
formatter = TabFormat(header=False)
|
||||
return bytes2str(formatter(self))
|
||||
return formatter(self)
|
||||
|
||||
|
||||
# View property getter
|
||||
|
@ -3,8 +3,9 @@
|
||||
cimport cython
|
||||
from obitools3.dms.view.view cimport Line
|
||||
from obitools3.utils cimport bytes2str_object, str2bytes, tobytes
|
||||
from obitools3.dms.column.column cimport Column_line
|
||||
from obitools3.dms.column.column cimport Column_line, Column_multi_elts
|
||||
|
||||
import sys
|
||||
|
||||
cdef class TabFormat:
|
||||
|
||||
@ -22,23 +23,45 @@ cdef class TabFormat:
|
||||
if self.first_line:
|
||||
self.tags = [k for k in data.keys()]
|
||||
|
||||
if self.header and self.first_line:
|
||||
for k in self.tags:
|
||||
if isinstance(data.view[k], Column_multi_elts):
|
||||
keys = data.view[k].keys()
|
||||
keys.sort()
|
||||
for k2 in keys:
|
||||
line.append(tobytes(k)+b':'+tobytes(k2))
|
||||
else:
|
||||
line.append(tobytes(k))
|
||||
r = self.sep.join(value for value in line)
|
||||
r += b'\n'
|
||||
line = []
|
||||
|
||||
for k in self.tags:
|
||||
|
||||
if self.header and self.first_line:
|
||||
value = tobytes(k)
|
||||
value = data[k]
|
||||
if isinstance(data.view[k], Column_multi_elts):
|
||||
keys = data.view[k].keys()
|
||||
keys.sort()
|
||||
if value is None: # all keys at None
|
||||
for k2 in keys: # TODO could be much more efficient
|
||||
line.append(self.NAString)
|
||||
else:
|
||||
for k2 in keys: # TODO could be much more efficient
|
||||
if value[k2] is not None:
|
||||
line.append(str2bytes(str(bytes2str_object(value[k2])))) # genius programming
|
||||
else:
|
||||
line.append(self.NAString)
|
||||
else:
|
||||
value = data[k]
|
||||
if value is not None:
|
||||
if type(value) == Column_line:
|
||||
value = value.bytes()
|
||||
else:
|
||||
value = str2bytes(str(bytes2str_object(value))) # genius programming
|
||||
if value is None:
|
||||
value = self.NAString
|
||||
|
||||
line.append(value)
|
||||
|
||||
line.append(str2bytes(str(bytes2str_object(value))))
|
||||
else:
|
||||
line.append(self.NAString)
|
||||
|
||||
if self.header and self.first_line:
|
||||
r += self.sep.join(value for value in line)
|
||||
else:
|
||||
r = self.sep.join(value for value in line)
|
||||
|
||||
if self.first_line:
|
||||
self.first_line = False
|
||||
|
||||
return self.sep.join(value for value in line)
|
||||
|
||||
return r
|
||||
|
@ -183,12 +183,13 @@ def buildConsensus(ali, seq, ref_tags=None):
|
||||
# doesn't work because uint8_t* are forced into bytes by cython (nothing read/kept beyond 0 values)
|
||||
#obi_set_qual_int_with_elt_idx_and_col_p_in_view(view_p, col_p, seq.index, 0, ali.consensus_qual, ali.consensus_len)
|
||||
seq.set(ref_tags.id+b"_CONS", ali.consensus_seq, quality=ali.consensus_qual)
|
||||
seq[b'ali_length'] = ali.consensus_len
|
||||
seq[b'score_norm']=float(ali.score)/ali.consensus_len
|
||||
seq[b"seq_length"] = ali.consensus_len
|
||||
seq[b"overlap_length"] = ali.overlap_len
|
||||
seq[b'score_norm']=round(float(ali.score)/ali.overlap_len, 3)
|
||||
seq[b'shift']=ali.shift
|
||||
else:
|
||||
if len(ali[0])>999: # TODO why?
|
||||
raise AssertionError,"Too long alignemnt"
|
||||
raise AssertionError,"Too long alignment"
|
||||
|
||||
ic=IterOnConsensus(ali)
|
||||
|
||||
@ -250,11 +251,22 @@ def buildJoinedSequence(ali, reverse, seq, forward=None):
|
||||
quality.extend(reverse.quality)
|
||||
seq.set(forward.id +b"_PairedEnd", s, definition=forward.definition, quality=quality)
|
||||
seq[b"score"]=ali.score
|
||||
seq[b"ali_direction"]=ali.direction
|
||||
if len(ali.direction) > 0:
|
||||
seq[b"ali_direction"]=ali.direction
|
||||
else:
|
||||
seq[b"ali_direction"]=None
|
||||
seq[b"mode"]=b"joined"
|
||||
seq[b"pairedend_limit"]=len(forward)
|
||||
seq[b"pairedend_limit"]=len(forward)
|
||||
seq[b"seq_length"] = ali.consensus_len
|
||||
seq[b"overlap_length"] = ali.overlap_len
|
||||
if ali.overlap_len > 0:
|
||||
seq[b'score_norm']=round(float(ali.score)/ali.overlap_len, 3)
|
||||
else:
|
||||
seq[b"score_norm"]=0.0
|
||||
|
||||
for tag in forward:
|
||||
if tag != REVERSE_SEQUENCE_COLUMN and tag != REVERSE_QUALITY_COLUMN:
|
||||
if tag != REVERSE_SEQUENCE_COLUMN and tag != REVERSE_QUALITY_COLUMN and \
|
||||
tag != NUC_SEQUENCE_COLUMN and tag != QUALITY_COLUMN:
|
||||
seq[tag] = forward[tag]
|
||||
return seq
|
||||
|
||||
|
@ -177,7 +177,7 @@ def emblIterator_dir(dir_path,
|
||||
for filename in files:
|
||||
if read==only:
|
||||
return
|
||||
print("Parsing file %s (%d/%d)" % (tostr(filename), read_files, len(files)))
|
||||
print("Parsing file %s (%d/%d)" % (tostr(filename), read_files+1, len(files)))
|
||||
f = uopen(filename)
|
||||
if only is not None:
|
||||
only_f = only-read
|
||||
|
@ -104,6 +104,7 @@ def fastaNucIterator(lineiterator,
|
||||
cdef bytes sequence
|
||||
cdef int skipped, ionly, read
|
||||
cdef Nuc_Seq seq
|
||||
cdef bint stop
|
||||
|
||||
if only is None:
|
||||
ionly = -1
|
||||
@ -130,7 +131,8 @@ def fastaNucIterator(lineiterator,
|
||||
else:
|
||||
line = firstline
|
||||
|
||||
while True:
|
||||
stop=False
|
||||
while not stop:
|
||||
|
||||
if ionly >= 0 and read >= ionly:
|
||||
break
|
||||
@ -153,7 +155,7 @@ def fastaNucIterator(lineiterator,
|
||||
s.append(line[0:-1])
|
||||
line = next(iterator)
|
||||
except StopIteration:
|
||||
pass
|
||||
stop=True
|
||||
|
||||
sequence = b"".join(s)
|
||||
|
||||
|
@ -22,11 +22,12 @@ from libc.stdlib cimport free, malloc, realloc
|
||||
from libc.string cimport strcpy, strlen
|
||||
|
||||
|
||||
_featureMatcher = re.compile(b'^FEATURES.+\n(?=ORIGIN)',re.DOTALL + re.M)
|
||||
_featureMatcher = re.compile(b'^FEATURES.+\n(?=ORIGIN )',re.DOTALL + re.M)
|
||||
|
||||
_headerMatcher = re.compile(b'^LOCUS.+(?=\nFEATURES)', re.DOTALL + re.M)
|
||||
_seqMatcher = re.compile(b'(?<=ORIGIN).+(?=//\n)', re.DOTALL + re.M)
|
||||
_cleanSeq = re.compile(b'[ \n0-9]+')
|
||||
_seqMatcher = re.compile(b'ORIGIN .+(?=//\n)', re.DOTALL + re.M)
|
||||
_cleanSeq1 = re.compile(b'ORIGIN.+\n')
|
||||
_cleanSeq2 = re.compile(b'[ \n0-9]+')
|
||||
_acMatcher = re.compile(b'(?<=^ACCESSION ).+',re.M)
|
||||
_deMatcher = re.compile(b'(?<=^DEFINITION ).+\n( .+\n)*',re.M)
|
||||
_cleanDe = re.compile(b'\n *')
|
||||
@ -42,7 +43,8 @@ def genbankParser(bytes text):
|
||||
ft = _featureMatcher.search(text).group()
|
||||
|
||||
s = _seqMatcher.search(text).group()
|
||||
s = _cleanSeq.sub(b'', s).upper()
|
||||
s = _cleanSeq1.sub(b'', s)
|
||||
s = _cleanSeq2.sub(b'', s)
|
||||
|
||||
acs = _acMatcher.search(text).group()
|
||||
acs = acs.split()
|
||||
@ -51,23 +53,23 @@ def genbankParser(bytes text):
|
||||
|
||||
de = _deMatcher.search(header).group()
|
||||
de = _cleanDe.sub(b' ',de).strip().strip(b'.')
|
||||
|
||||
|
||||
tags = {}
|
||||
extractTaxon(ft, tags)
|
||||
|
||||
seq = Nuc_Seq(ac,
|
||||
s,
|
||||
definition=de,
|
||||
quality=None,
|
||||
offset=-1,
|
||||
tags=tags)
|
||||
|
||||
except Exception as e:
|
||||
print("\nCould not import sequence id:", text.split()[1], "(error raised:", e, ")")
|
||||
# Do not raise any Exception if you need the possibility to resume the generator
|
||||
# (Python generators can't resume after any exception is raised)
|
||||
return None
|
||||
|
||||
tags = {}
|
||||
extractTaxon(ft, tags)
|
||||
|
||||
seq = Nuc_Seq(ac,
|
||||
s,
|
||||
definition=de,
|
||||
quality=None,
|
||||
offset=-1,
|
||||
tags=tags)
|
||||
|
||||
|
||||
return seq
|
||||
|
||||
|
||||
@ -171,10 +173,12 @@ def genbankIterator_dir(dir_path,
|
||||
read = 0
|
||||
read_files = 0
|
||||
files = [filename for filename in glob.glob(os.path.join(path, b'*.gbff*'))]
|
||||
files.extend([filename for filename in glob.glob(os.path.join(path, b'*.seq*'))]) # new genbank extension
|
||||
files = list(set(files))
|
||||
for filename in files:
|
||||
if read==only:
|
||||
return
|
||||
print("Parsing file %s (%d/%d)" % (tostr(filename), read_files, len(files)))
|
||||
print("Parsing file %s (%d/%d)" % (tostr(filename), read_files+1, len(files)))
|
||||
f = uopen(filename)
|
||||
if only is not None:
|
||||
only_f = only-read
|
||||
|
@ -53,7 +53,11 @@ def entryIteratorFactory(lineiterator,
|
||||
|
||||
i = iterator
|
||||
|
||||
first=next(i)
|
||||
try:
|
||||
first=next(i)
|
||||
except StopIteration:
|
||||
first=""
|
||||
pass
|
||||
|
||||
format=b"tabular"
|
||||
|
||||
|
@ -210,10 +210,11 @@ def open_uri(uri,
|
||||
|
||||
error = None
|
||||
|
||||
if scheme==b"dms" or \
|
||||
(scheme==b"" and \
|
||||
(((not input) and "outputformat" not in config["obi"]) or \
|
||||
(input and "inputformat" not in config["obi"]))): # TODO maybe not best way
|
||||
if urib != b"-" and \
|
||||
(scheme==b"dms" or \
|
||||
(scheme==b"" and \
|
||||
(((not input) and "outputformat" not in config["obi"]) or \
|
||||
(input and "inputformat" not in config["obi"])))): # TODO maybe not best way
|
||||
|
||||
if default_dms is not None and b"/" not in urip.path: # assuming view to open in default DMS (TODO discuss)
|
||||
dms=(default_dms, urip.path)
|
||||
@ -275,11 +276,11 @@ def open_uri(uri,
|
||||
iseq = urib
|
||||
objclass = bytes
|
||||
else: # TODO update uopen to be able to write?
|
||||
if urip.path:
|
||||
file = open(urip.path, 'wb')
|
||||
else:
|
||||
if not urip.path or urip.path == b'-':
|
||||
file = sys.stdout.buffer
|
||||
|
||||
else:
|
||||
file = open(urip.path, 'wb')
|
||||
|
||||
if file is not None:
|
||||
qualifiers=parse_qs(urip.query)
|
||||
|
||||
@ -463,7 +464,7 @@ def open_uri(uri,
|
||||
if format is not None:
|
||||
if seqtype==b"nuc":
|
||||
objclass = Nuc_Seq # Nuc_Seq_Stored? TODO
|
||||
if format==b"fasta":
|
||||
if format==b"fasta" or format==b"silva":
|
||||
if input:
|
||||
iseq = fastaNucIterator(file,
|
||||
skip=skip,
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
from obitools3.dms.capi.obitypes cimport obitype_t, index_t
|
||||
|
||||
cpdef bytes format_separator(bytes format)
|
||||
cpdef bytes format_uniq_pattern(bytes format)
|
||||
cpdef int count_entries(file, bytes format)
|
||||
|
||||
cdef obi_errno_to_exception(index_t line_nb=*, object elt_id=*, str error_message=*)
|
||||
|
@ -24,11 +24,11 @@ import glob
|
||||
import gzip
|
||||
|
||||
|
||||
cpdef bytes format_separator(bytes format):
|
||||
cpdef bytes format_uniq_pattern(bytes format):
|
||||
if format == b"fasta":
|
||||
return b"\n>"
|
||||
elif format == b"fastq":
|
||||
return b"\n@"
|
||||
return b"\n\+\n"
|
||||
elif format == b"ngsfilter" or format == b"tabular":
|
||||
return b"\n"
|
||||
elif format == b"genbank" or format == b"embl":
|
||||
@ -42,7 +42,7 @@ cpdef bytes format_separator(bytes format):
|
||||
cpdef int count_entries(file, bytes format):
|
||||
|
||||
try:
|
||||
sep = format_separator(format)
|
||||
sep = format_uniq_pattern(format)
|
||||
if sep is None:
|
||||
return -1
|
||||
sep = re.compile(sep)
|
||||
@ -72,7 +72,7 @@ cpdef int count_entries(file, bytes format):
|
||||
return -1
|
||||
mmapped_file = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
|
||||
total_count += len(re.findall(sep, mmapped_file))
|
||||
if format != b"ngsfilter" and format != b"tabular" and format != b"embl" and format != b"genbank":
|
||||
if format != b"ngsfilter" and format != b"tabular" and format != b"embl" and format != b"genbank" and format != b"fastq":
|
||||
total_count += 1 # adding +1 for 1st entry because separators include \n (ngsfilter and tabular already count one more because of last \n)
|
||||
|
||||
except:
|
||||
@ -166,7 +166,9 @@ cdef object bytes2str_object(object value): # Only works if complex types are d
|
||||
value[k] = bytes2str(v)
|
||||
if type(k) == bytes:
|
||||
value[bytes2str(k)] = value.pop(k)
|
||||
elif isinstance(value, list):
|
||||
elif isinstance(value, list) or isinstance(value, tuple):
|
||||
if isinstance(value, tuple):
|
||||
value = list(value)
|
||||
for i in range(len(value)):
|
||||
if isinstance(value[i], list) or isinstance(value[i], dict):
|
||||
value[i] = bytes2str_object(value[i])
|
||||
|
@ -1,5 +1,5 @@
|
||||
major = 3
|
||||
minor = 0
|
||||
serial= '0-beta14'
|
||||
serial= '1b3'
|
||||
|
||||
version ="%d.%02d.%s" % (major,minor,serial)
|
||||
version ="%d.%d.%s" % (major,minor,serial)
|
||||
|
5
requirements.txt
Executable file
5
requirements.txt
Executable file
@ -0,0 +1,5 @@
|
||||
--extra-index-url https://pypi.python.org/simple/
|
||||
Cython>=0.24
|
||||
Sphinx>=1.2.0
|
||||
ipython>=3.0.0
|
||||
breathe>=4.0.0
|
26
setup.py
26
setup.py
@ -5,8 +5,9 @@ import re
|
||||
import subprocess
|
||||
|
||||
from distutils import log
|
||||
from distutils.core import setup
|
||||
|
||||
#from distutils.core import setup
|
||||
from setuptools import setup # to work with pip
|
||||
|
||||
from distutils.core import Extension
|
||||
from distutils.sysconfig import get_python_lib
|
||||
|
||||
@ -26,10 +27,11 @@ class Distribution(ori_Distribution):
|
||||
|
||||
ori_Distribution.__init__(self, attrs)
|
||||
|
||||
self.global_options.insert(0,('cobitools3', None, "intall location of the C library"
|
||||
self.global_options.insert(0,('cobitools3', None, "install location of the C library"
|
||||
))
|
||||
|
||||
from distutils.command.build import build as build_ori
|
||||
from setuptools.command.bdist_egg import bdist_egg as bdist_egg_ori
|
||||
from distutils.core import Command
|
||||
|
||||
|
||||
@ -70,6 +72,12 @@ class build(build_ori):
|
||||
build_ori.run(self)
|
||||
|
||||
|
||||
class bdist_egg(bdist_egg_ori):
|
||||
def run(self):
|
||||
self.run_command('build_clib')
|
||||
bdist_egg_ori.run(self)
|
||||
|
||||
|
||||
sys.path.append(os.path.abspath("python"))
|
||||
|
||||
|
||||
@ -88,9 +96,10 @@ PACKAGE = "OBITools3"
|
||||
VERSION = version
|
||||
AUTHOR = 'Celine Mercier'
|
||||
EMAIL = 'celine.mercier@metabarcoding.org'
|
||||
URL = "http://metabarcoding.org/obitools3"
|
||||
URL = "https://metabarcoding.org/obitools3"
|
||||
PLATFORMS = "posix"
|
||||
LICENSE = "CeCILL-V2"
|
||||
DESCRIPTION = "Tools and library for DNA metabarcoding",
|
||||
DESCRIPTION = "A package for the management of analyses and data in DNA metabarcoding."
|
||||
PYTHONMIN = '3.5'
|
||||
|
||||
SRC = 'python'
|
||||
@ -147,17 +156,24 @@ classifiers=['Development Status :: 4 - Beta',
|
||||
'Topic :: Utilities',
|
||||
]
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setup(name=PACKAGE,
|
||||
description=DESCRIPTION,
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
classifiers=classifiers,
|
||||
version=VERSION,
|
||||
author=AUTHOR,
|
||||
author_email=EMAIL,
|
||||
platforms=PLATFORMS,
|
||||
license=LICENSE,
|
||||
url=URL,
|
||||
ext_modules=xx,
|
||||
distclass=Distribution,
|
||||
cmdclass={'build': build,
|
||||
'bdist_egg': bdist_egg,
|
||||
'build_clib': build_clib},
|
||||
cobitools3=get_python_lib(),
|
||||
packages = findPackage('python'),
|
||||
|
@ -157,7 +157,7 @@ int build_reference_db(const char* dms_name,
|
||||
ecotx_t* lca_2 = NULL;
|
||||
ecotx_t* lca = NULL;
|
||||
index_t idx1, idx2;
|
||||
index_t i, j, k;
|
||||
index_t i, j, k, count;
|
||||
int32_t taxid_array_length;
|
||||
int32_t score_array_length;
|
||||
int32_t taxid_array_writable_length;
|
||||
@ -185,6 +185,7 @@ int build_reference_db(const char* dms_name,
|
||||
matrix_view_name = strcpy(matrix_view_name, o_view_name);
|
||||
strcat(matrix_view_name, "_matrix");
|
||||
|
||||
fprintf(stderr, "Aligning queries with reference database...\n");
|
||||
if (obi_lcs_align_one_column(dms_name,
|
||||
refs_view_name,
|
||||
"",
|
||||
@ -242,6 +243,7 @@ int build_reference_db(const char* dms_name,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
"",
|
||||
"",
|
||||
-1,
|
||||
@ -320,13 +322,19 @@ int build_reference_db(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
count = (matrix_with_lca_view->infos)->line_count;
|
||||
fprintf(stderr, "Computing LCAs...\n");
|
||||
|
||||
// Compute all the LCAs
|
||||
// For each pair
|
||||
for (i=0; i<(matrix_with_lca_view->infos)->line_count; i++)
|
||||
for (i=0; i<count; i++)
|
||||
{
|
||||
if (! keep_running)
|
||||
return -1;
|
||||
|
||||
if (i%1000 == 0)
|
||||
fprintf(stderr,"\rDone : %f %% ", (i / (float) count)*100);
|
||||
|
||||
// Read all taxids associated with the first sequence and compute their LCA
|
||||
// Read line index
|
||||
idx1 = obi_get_int_with_elt_idx_and_col_p_in_view(matrix_with_lca_view, matrix_idx1_column, i, 0);
|
||||
@ -363,6 +371,7 @@ int build_reference_db(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
fprintf(stderr,"\rDone : 100 %% \n");
|
||||
|
||||
// Clone refs view, add 2 arrays columns for lca and score, compute and write them
|
||||
|
||||
@ -384,6 +393,7 @@ int build_reference_db(const char* dms_name,
|
||||
1,
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
false,
|
||||
"",
|
||||
@ -407,6 +417,7 @@ int build_reference_db(const char* dms_name,
|
||||
1,
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
false,
|
||||
"",
|
||||
@ -442,13 +453,18 @@ int build_reference_db(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "Building LCA arrays...\n");
|
||||
|
||||
// For each sequence, look for all its alignments in the matrix, and for each different LCA taxid/score, order them and write them
|
||||
// Going through matrix once, filling refs arrays on the go for efficiency
|
||||
for (i=0; i<(matrix_with_lca_view->infos)->line_count; i++)
|
||||
for (i=0; i<count; i++)
|
||||
{
|
||||
if (! keep_running)
|
||||
return -1;
|
||||
|
||||
if (i%1000 == 0)
|
||||
fprintf(stderr,"\rDone : %f %% ", (i / (float) count)*100);
|
||||
|
||||
// Read ref line indexes
|
||||
idx1 = obi_get_int_with_elt_idx_and_col_p_in_view(matrix_with_lca_view, matrix_idx1_column, i, 0);
|
||||
idx2 = obi_get_int_with_elt_idx_and_col_p_in_view(matrix_with_lca_view, matrix_idx2_column, i, 0);
|
||||
@ -464,6 +480,8 @@ int build_reference_db(const char* dms_name,
|
||||
// Read alignment score
|
||||
score = obi_get_float_with_elt_idx_and_col_p_in_view(matrix_with_lca_view, matrix_score_column, i, 0);
|
||||
|
||||
//fprintf(stderr, "\n\ntaxid_lca=%d, score=%f, idx1=%d, idx2=%d", taxid_lca, score, idx1, idx2);
|
||||
|
||||
///////////////// Compute for first sequence \\\\\\\\\\\\\\\\\\\\\\\ (TODO function)
|
||||
|
||||
// Read arrays
|
||||
@ -480,9 +498,11 @@ int build_reference_db(const char* dms_name,
|
||||
// return -1;
|
||||
// }
|
||||
|
||||
//fprintf(stderr, "\n1st sequence");
|
||||
// If empty, add values
|
||||
if (taxid_array_length == 0)
|
||||
{
|
||||
//fprintf(stderr, "\nEmpty, add value");
|
||||
if (obi_set_array_with_col_p_in_view(o_view, final_lca_taxid_a_column, idx1, &taxid_lca, (uint8_t) (obi_sizeof(OBI_INT) * 8), 1) < 0)
|
||||
{
|
||||
obidebug(1, "\nError setting a LCA taxid array in a column when building a reference database");
|
||||
@ -496,6 +516,8 @@ int build_reference_db(const char* dms_name,
|
||||
}
|
||||
else
|
||||
{
|
||||
//fprintf(stderr, "\nNot empty");
|
||||
|
||||
j = 0;
|
||||
modified = false;
|
||||
while (j < taxid_array_length)
|
||||
@ -509,6 +531,9 @@ int build_reference_db(const char* dms_name,
|
||||
memcpy(score_array_writable, score_array, score_array_length*sizeof(obifloat_t));
|
||||
modified = true;
|
||||
|
||||
//fprintf(stderr, "\nSame LCA, replace %d and %f with %d and %f", lca_taxid_array_writable[j],
|
||||
// score_array_writable[j], taxid_lca, score);
|
||||
|
||||
// Better score for the same LCA, replace this LCA/score pair
|
||||
lca_taxid_array_writable[j] = taxid_lca;
|
||||
score_array_writable[j] = score;
|
||||
@ -535,6 +560,8 @@ int build_reference_db(const char* dms_name,
|
||||
{
|
||||
if (score > score_array[j])
|
||||
{
|
||||
//fprintf(stderr, "\nInsert new");
|
||||
|
||||
memcpy(lca_taxid_array_writable, lca_taxid_array, taxid_array_length*sizeof(obiint_t));
|
||||
memcpy(score_array_writable, score_array, score_array_length*sizeof(obifloat_t));
|
||||
modified = true;
|
||||
@ -579,10 +606,15 @@ int build_reference_db(const char* dms_name,
|
||||
memcpy(score_array_writable, score_array, score_array_length*sizeof(obifloat_t));
|
||||
modified = true;
|
||||
|
||||
//fprintf(stderr, "\nAppend at the end");
|
||||
|
||||
// Append LCA
|
||||
lca_taxid_array_writable[taxid_array_writable_length] = taxid_lca;
|
||||
score_array_writable[score_array_writable_length] = score;
|
||||
|
||||
taxid_array_writable_length++;
|
||||
score_array_writable_length++;
|
||||
|
||||
// Remove the previous (children) LCAs from the array if their score is equal or lower
|
||||
while ((j>0) && (score_array_writable[j-1] <= score))
|
||||
{
|
||||
@ -603,6 +635,13 @@ int build_reference_db(const char* dms_name,
|
||||
// Write new arrays
|
||||
if (modified)
|
||||
{
|
||||
// fprintf(stderr, "\n\nnew array:");
|
||||
// for (k=0;k<taxid_array_writable_length;k++)
|
||||
// {
|
||||
// lca = obi_taxo_get_taxon_with_taxid(tax, lca_taxid_array_writable[k]);
|
||||
// fprintf(stderr, "\nLCA=%d, %s, score=%f", lca_taxid_array_writable[k], lca->name, score_array_writable[k]);
|
||||
// }
|
||||
|
||||
if (obi_set_array_with_col_p_in_view(o_view, final_lca_taxid_a_column, idx1, lca_taxid_array_writable, (uint8_t) (obi_sizeof(OBI_INT) * 8), taxid_array_writable_length) < 0)
|
||||
{
|
||||
obidebug(1, "\nError setting a LCA taxid array in a column when building a reference database");
|
||||
@ -632,9 +671,13 @@ int build_reference_db(const char* dms_name,
|
||||
// return -1;
|
||||
// }
|
||||
|
||||
//fprintf(stderr, "\n2nd sequence");
|
||||
|
||||
// If empty, add values
|
||||
if (taxid_array_length == 0)
|
||||
{
|
||||
//fprintf(stderr, "\nEmpty, add value");
|
||||
|
||||
if (obi_set_array_with_col_p_in_view(o_view, final_lca_taxid_a_column, idx2, &taxid_lca, (uint8_t) (obi_sizeof(OBI_INT) * 8), 1) < 0)
|
||||
{
|
||||
obidebug(1, "\nError setting a LCA taxid array in a column when building a reference database");
|
||||
@ -648,6 +691,8 @@ int build_reference_db(const char* dms_name,
|
||||
}
|
||||
else
|
||||
{
|
||||
//fprintf(stderr, "\nNot empty");
|
||||
|
||||
j = 0;
|
||||
modified = false;
|
||||
while (j < taxid_array_length)
|
||||
@ -661,6 +706,9 @@ int build_reference_db(const char* dms_name,
|
||||
memcpy(score_array_writable, score_array, score_array_length*sizeof(obifloat_t));
|
||||
modified = true;
|
||||
|
||||
//fprintf(stderr, "\nSame LCA, replace %d and %f with %d and %f", lca_taxid_array_writable[j],
|
||||
// score_array_writable[j], taxid_lca, score);
|
||||
|
||||
// Better score for the same LCA, replace this LCA/score pair
|
||||
lca_taxid_array_writable[j] = taxid_lca;
|
||||
score_array_writable[j] = score;
|
||||
@ -687,6 +735,8 @@ int build_reference_db(const char* dms_name,
|
||||
{
|
||||
if (score > score_array[j])
|
||||
{
|
||||
//fprintf(stderr, "\nInsert new");
|
||||
|
||||
memcpy(lca_taxid_array_writable, lca_taxid_array, taxid_array_length*sizeof(obiint_t));
|
||||
memcpy(score_array_writable, score_array, score_array_length*sizeof(obifloat_t));
|
||||
modified = true;
|
||||
@ -727,6 +777,8 @@ int build_reference_db(const char* dms_name,
|
||||
|
||||
if (j == taxid_array_length) // same or parent LCA not found, need to be appended at the end
|
||||
{
|
||||
//fprintf(stderr, "\nAppend at the end");
|
||||
|
||||
memcpy(lca_taxid_array_writable, lca_taxid_array, taxid_array_length*sizeof(obiint_t));
|
||||
memcpy(score_array_writable, score_array, score_array_length*sizeof(obifloat_t));
|
||||
modified = true;
|
||||
@ -735,6 +787,9 @@ int build_reference_db(const char* dms_name,
|
||||
lca_taxid_array_writable[taxid_array_writable_length] = taxid_lca;
|
||||
score_array_writable[score_array_writable_length] = score;
|
||||
|
||||
taxid_array_writable_length++;
|
||||
score_array_writable_length++;
|
||||
|
||||
// Remove the previous (children) LCAs from the array if their score is equal or lower
|
||||
while ((j>0) && (score_array_writable[j-1] <= score))
|
||||
{
|
||||
@ -769,11 +824,17 @@ int build_reference_db(const char* dms_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
fprintf(stderr,"\rDone : 100 %% \n");
|
||||
|
||||
fprintf(stderr, "Writing results...\n");
|
||||
count = (o_view->infos)->line_count;
|
||||
// Fill empty LCA informations (because filling from potentially sparse alignment matrix) with the sequence taxid
|
||||
score=1.0; // technically getting LCA of identical sequences
|
||||
for (i=0; i<(o_view->infos)->line_count; i++)
|
||||
for (i=0; i<count; i++)
|
||||
{
|
||||
if (i%1000 == 0)
|
||||
fprintf(stderr,"\rDone : %f %% ", (i / (float) count)*100);
|
||||
|
||||
obi_get_array_with_col_p_in_view(o_view, final_lca_taxid_a_column, i, &taxid_array_length);
|
||||
if (taxid_array_length == 0) // no LCA set
|
||||
{
|
||||
@ -799,6 +860,7 @@ int build_reference_db(const char* dms_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
fprintf(stderr,"\rDone : 100 %% \n");
|
||||
|
||||
// Add information about the threshold used to build the DB
|
||||
snprintf(threshold_str, 5, "%f", threshold);
|
||||
@ -858,7 +920,6 @@ int build_reference_db(const char* dms_name,
|
||||
free(matrix_view_name);
|
||||
free(matrix_with_lca_view_name);
|
||||
|
||||
fprintf(stderr,"\rDone : 100 %% \n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -36,10 +36,12 @@ bool only_ATGC(const char* seq)
|
||||
{
|
||||
if (!((*c == 'A') || \
|
||||
(*c == 'T') || \
|
||||
(*c == 'U') || \
|
||||
(*c == 'G') || \
|
||||
(*c == 'C') || \
|
||||
(*c == 'a') || \
|
||||
(*c == 't') || \
|
||||
(*c == 'u') || \
|
||||
(*c == 'g') || \
|
||||
(*c == 'c')))
|
||||
{
|
||||
@ -182,6 +184,8 @@ byte_t* encode_seq_on_2_bits(const char* seq, int32_t length)
|
||||
break;
|
||||
case 't':
|
||||
case 'T':
|
||||
case 'u':
|
||||
case 'U':
|
||||
seq_b[i/4] |= NUC_T_2b;
|
||||
break;
|
||||
default:
|
||||
@ -288,6 +292,8 @@ byte_t* encode_seq_on_4_bits(const char* seq, int32_t length)
|
||||
break;
|
||||
case 't':
|
||||
case 'T':
|
||||
case 'u': // discussable
|
||||
case 'U':
|
||||
seq_b[i/2] |= NUC_T_4b;
|
||||
break;
|
||||
case 'r':
|
||||
|
44
src/encode.h
44
src/encode.h
@ -64,7 +64,7 @@ enum
|
||||
|
||||
|
||||
/**
|
||||
* @brief Checks if there are only 'atgcATGC' characters in a
|
||||
* @brief Checks if there are only 'atgcuATGCU' characters in a
|
||||
* character string.
|
||||
*
|
||||
* @param seq The sequence to check.
|
||||
@ -129,12 +129,13 @@ byte_t get_nucleotide_from_encoded_seq(byte_t* seq, int32_t idx, uint8_t encodin
|
||||
/**
|
||||
* @brief Encodes a DNA sequence with each nucleotide coded on 2 bits.
|
||||
*
|
||||
* A or a : 00
|
||||
* C or c : 01
|
||||
* T or t : 10
|
||||
* G or g : 11
|
||||
* A or a : 00
|
||||
* C or c : 01
|
||||
* T or t or U or u : 10
|
||||
* G or g : 11
|
||||
*
|
||||
* @warning The DNA sequence must contain only 'atgcATGC' characters.
|
||||
* @warning The DNA sequence must contain only 'atgcuATGCU' characters.
|
||||
* @warning Uracil ('U') bases are encoded as Thymine ('T') bases.
|
||||
*
|
||||
* @param seq The sequence to encode.
|
||||
* @param length The length of the sequence to encode.
|
||||
@ -169,23 +170,24 @@ char* decode_seq_on_2_bits(byte_t* seq_b, int32_t length_seq);
|
||||
/**
|
||||
* @brief Encodes a DNA sequence with each nucleotide coded on 4 bits.
|
||||
*
|
||||
* A or a : 0001
|
||||
* C or c : 0010
|
||||
* G or g : 0011
|
||||
* T or t : 0100
|
||||
* R or r : 0101
|
||||
* Y or y : 0110
|
||||
* S or s : 0111
|
||||
* W or w : 1000
|
||||
* K or k : 1001
|
||||
* M or m : 1010
|
||||
* B or b : 1011
|
||||
* D or d : 1100
|
||||
* H or h : 1101
|
||||
* V or v : 1110
|
||||
* N or n : 1111
|
||||
* A or a : 0001
|
||||
* C or c : 0010
|
||||
* G or g : 0011
|
||||
* T or t or U or u : 0100
|
||||
* R or r : 0101
|
||||
* Y or y : 0110
|
||||
* S or s : 0111
|
||||
* W or w : 1000
|
||||
* K or k : 1001
|
||||
* M or m : 1010
|
||||
* B or b : 1011
|
||||
* D or d : 1100
|
||||
* H or h : 1101
|
||||
* V or v : 1110
|
||||
* N or n : 1111
|
||||
*
|
||||
* @warning The DNA sequence must contain only IUPAC characters.
|
||||
* @warning Uracil ('U') bases are encoded as Thymine ('T') bases.
|
||||
*
|
||||
* @param seq The sequence to encode.
|
||||
* @param length The length of the sequence to encode.
|
||||
|
@ -413,7 +413,13 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
||||
return NULL;
|
||||
}
|
||||
|
||||
score = max_common_kmers + kmer_size - 1; // aka the number of nucleotides in the longest stretch of kmers perfectly matching
|
||||
if (max_common_kmers > 0)
|
||||
score = max_common_kmers + kmer_size - 1; // aka an approximation of the number of nucleotides matching in the overlap of the alignment.
|
||||
// It's an approximation because one mismatch produces kmer_size kmer mismatches if in the middle of the overlap,
|
||||
// and less for mismatches located towards the ends of the overlap. The case where there are the most mismatches is assumed,
|
||||
// meaning that the score will be often underestimated and never overestimated.
|
||||
else
|
||||
score = 0;
|
||||
abs_shift = abs(best_shift);
|
||||
|
||||
// Save result in Obi_ali structure
|
||||
@ -423,10 +429,15 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
||||
ali->shift = abs_shift;
|
||||
ali->consensus_seq = NULL;
|
||||
ali->consensus_qual = NULL;
|
||||
if (((best_shift <= 0) && (!switched_seqs)) || ((best_shift > 0) && switched_seqs))
|
||||
strcpy(ali->direction, "left");
|
||||
if (score == 0)
|
||||
ali->direction[0] = '\0';
|
||||
else
|
||||
strcpy(ali->direction, "right");
|
||||
{
|
||||
if (((best_shift <= 0) && (!switched_seqs)) || ((best_shift > 0) && switched_seqs))
|
||||
strcpy(ali->direction, "left");
|
||||
else
|
||||
strcpy(ali->direction, "right");
|
||||
}
|
||||
|
||||
// Build the consensus sequence if asked
|
||||
if (build_consensus)
|
||||
|
@ -27,7 +27,11 @@
|
||||
* @brief Alignment structure, with informations about the similarity and to rebuild the alignment.
|
||||
*/
|
||||
typedef struct Obi_ali {
|
||||
int score; /**< Alignment score, corresponding to the number of nucleotides in the longest stretch of kmers perfectly matching.
|
||||
int score; /**< Alignment score, corresponding to an approximation of the number of
|
||||
* nucleotides matching in the overlap of the alignment.
|
||||
* It's an approximation because one mismatch produces kmer_size kmer mismatches if in the middle of the overlap,
|
||||
* and less for mismatches located towards the ends of the overlap. The case where there are the most mismatches is assumed,
|
||||
* meaning that the score will be often underestimated and never overestimated.
|
||||
*/
|
||||
int consensus_length; /**< Length of the final consensus sequence.
|
||||
*/
|
||||
|
162
src/obi_clean.c
162
src/obi_clean.c
@ -88,42 +88,42 @@ static int create_output_columns(Obiview_p o_view,
|
||||
int sample_count)
|
||||
{
|
||||
// Status column
|
||||
if (obi_view_add_column(o_view, CLEAN_STATUS_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, sample_count, (sample_column->header)->elements_names, true, false, false, NULL, NULL, -1, CLEAN_STATUS_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, CLEAN_STATUS_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, sample_count, (sample_column->header)->elements_names, true, true, false, false, NULL, NULL, -1, CLEAN_STATUS_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", CLEAN_STATUS_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Head column
|
||||
if (obi_view_add_column(o_view, CLEAN_HEAD_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_HEAD_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, CLEAN_HEAD_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_HEAD_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", CLEAN_HEAD_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Sample count column
|
||||
if (obi_view_add_column(o_view, CLEAN_SAMPLECOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_SAMPLECOUNT_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, CLEAN_SAMPLECOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_SAMPLECOUNT_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", CLEAN_SAMPLECOUNT_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Head count column
|
||||
if (obi_view_add_column(o_view, CLEAN_HEADCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_HEADCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, CLEAN_HEADCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_HEADCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", CLEAN_HEADCOUNT_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Internal count column
|
||||
if (obi_view_add_column(o_view, CLEAN_INTERNALCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_INTERNALCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, CLEAN_INTERNALCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_INTERNALCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", CLEAN_INTERNALCOUNT_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Singleton count column
|
||||
if (obi_view_add_column(o_view, CLEAN_SINGLETONCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_SINGLETONCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, CLEAN_SINGLETONCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_SINGLETONCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", CLEAN_SINGLETONCOUNT_COLUMN_NAME);
|
||||
return -1;
|
||||
@ -229,6 +229,8 @@ int obi_clean(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
seq_count = (i_view->infos)->line_count;
|
||||
|
||||
// Open the sequence column
|
||||
if (strcmp((i_view->infos)->view_type, VIEW_TYPE_NUC_SEQS) == 0)
|
||||
iseq_column = obi_view_get_column(i_view, NUC_SEQUENCE_COLUMN);
|
||||
@ -245,8 +247,17 @@ int obi_clean(const char* dms_name,
|
||||
}
|
||||
|
||||
// Open the sample column if there is one
|
||||
if ((strcmp(sample_column_name, "") == 0) || (sample_column_name == NULL))
|
||||
sample_column = NULL;
|
||||
if ((strcmp(sample_column_name, "") == 0) || (sample_column_name == NULL) || (seq_count == 0))
|
||||
{
|
||||
fprintf(stderr, "Info: No sample information provided, assuming one sample.\n");
|
||||
sample_column = obi_view_get_column(i_view, COUNT_COLUMN);
|
||||
if (sample_column == NULL)
|
||||
{
|
||||
obidebug(1, "\nError getting the COUNT column");
|
||||
return -1;
|
||||
}
|
||||
sample_count = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
sample_column = obi_view_get_column(i_view, sample_column_name);
|
||||
@ -255,6 +266,13 @@ int obi_clean(const char* dms_name,
|
||||
obidebug(1, "\nError getting the sample column");
|
||||
return -1;
|
||||
}
|
||||
sample_count = (sample_column->header)->nb_elements_per_line;
|
||||
// Check that the sample column is a merged column with all sample informations
|
||||
if (sample_count == 1)
|
||||
{
|
||||
obidebug(1, "\n\nError: If a sample column is provided, it must contain 'merged' sample counts as built by obi uniq with the -m option\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Create the output view, or a temporary one if heads only
|
||||
@ -279,8 +297,6 @@ int obi_clean(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
sample_count = (sample_column->header)->nb_elements_per_line;
|
||||
|
||||
// Create the output columns
|
||||
if (create_output_columns(o_view, sample_column, sample_count) < 0)
|
||||
{
|
||||
@ -326,66 +342,67 @@ int obi_clean(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Build kmer tables
|
||||
ktable = hash_seq_column(i_view, iseq_column, 0);
|
||||
if (ktable == NULL)
|
||||
if (seq_count > 0)
|
||||
{
|
||||
obi_set_errno(OBI_CLEAN_ERROR);
|
||||
obidebug(1, "\nError building kmer tables before aligning");
|
||||
return -1;
|
||||
}
|
||||
// Build kmer tables
|
||||
ktable = hash_seq_column(i_view, iseq_column, 0);
|
||||
if (ktable == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_CLEAN_ERROR);
|
||||
obidebug(1, "\nError building kmer tables before aligning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
seq_count = (i_view->infos)->line_count;
|
||||
|
||||
// Allocate arrays for sample counts otherwise reading in mapped files takes longer
|
||||
complete_sample_count_array = (int*) malloc(seq_count * sample_count * sizeof(int));
|
||||
if (complete_sample_count_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the array of sample counts, size: %lld", seq_count * sample_count * sizeof(int));
|
||||
return -1;
|
||||
}
|
||||
for (samp=0; samp < sample_count; samp++)
|
||||
{
|
||||
for (k=0; k<seq_count; k++)
|
||||
complete_sample_count_array[k+(samp*seq_count)] = obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp);
|
||||
}
|
||||
|
||||
// Allocate arrays for blobs otherwise reading in mapped files takes longer
|
||||
blob_array = (Obi_blob_p*) malloc(seq_count * sizeof(Obi_blob_p));
|
||||
if (blob_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the array of blobs");
|
||||
return -1;
|
||||
}
|
||||
for (k=0; k<seq_count; k++)
|
||||
{
|
||||
blob_array[k] = obi_get_blob_with_elt_idx_and_col_p_in_view(i_view, iseq_column, k, 0);
|
||||
}
|
||||
|
||||
// Allocate alignment result array (byte at 0 if not aligned yet,
|
||||
// 1 if sequence at index has a similarity above the threshold with the current sequence,
|
||||
// 2 if sequence at index has a similarity below the threshold with the current sequence)
|
||||
alignment_result_array = (byte_t*) calloc(seq_count, sizeof(byte_t));
|
||||
if (alignment_result_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for alignment result array");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Initialize all sequences to singletons or NA if no sequences in that sample
|
||||
for (k=0; k<seq_count; k++)
|
||||
{
|
||||
// Allocate arrays for sample counts otherwise reading in mapped files takes longer
|
||||
complete_sample_count_array = (int*) malloc(seq_count * sample_count * sizeof(int));
|
||||
if (complete_sample_count_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the array of sample counts, size: %lld", seq_count * sample_count * sizeof(int));
|
||||
return -1;
|
||||
}
|
||||
for (samp=0; samp < sample_count; samp++)
|
||||
{
|
||||
if (obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp) != OBIInt_NA) // Only initialize samples where there are some sequences
|
||||
for (k=0; k<seq_count; k++)
|
||||
complete_sample_count_array[k+(samp*seq_count)] = obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp);
|
||||
}
|
||||
|
||||
// Allocate arrays for blobs otherwise reading in mapped files takes longer
|
||||
blob_array = (Obi_blob_p*) malloc(seq_count * sizeof(Obi_blob_p));
|
||||
if (blob_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the array of blobs");
|
||||
return -1;
|
||||
}
|
||||
for (k=0; k<seq_count; k++)
|
||||
{
|
||||
blob_array[k] = obi_get_blob_with_elt_idx_and_col_p_in_view(i_view, iseq_column, k, 0);
|
||||
}
|
||||
|
||||
// Allocate alignment result array (byte at 0 if not aligned yet,
|
||||
// 1 if sequence at index has a similarity above the threshold with the current sequence,
|
||||
// 2 if sequence at index has a similarity below the threshold with the current sequence)
|
||||
alignment_result_array = (byte_t*) calloc(seq_count, sizeof(byte_t));
|
||||
if (alignment_result_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for alignment result array");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Initialize all sequences to singletons or NA if no sequences in that sample
|
||||
for (k=0; k<seq_count; k++)
|
||||
{
|
||||
for (samp=0; samp < sample_count; samp++)
|
||||
{
|
||||
if (obi_set_char_with_elt_idx_and_col_p_in_view(o_view, status_column, k, samp, 's') < 0)
|
||||
if (obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp) != OBIInt_NA) // Only initialize samples where there are some sequences
|
||||
{
|
||||
obidebug(1, "\nError initializing all sequences to singletons");
|
||||
return -1;
|
||||
if (obi_set_char_with_elt_idx_and_col_p_in_view(o_view, status_column, k, samp, 's') < 0)
|
||||
{
|
||||
obidebug(1, "\nError initializing all sequences to singletons");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -537,19 +554,22 @@ int obi_clean(const char* dms_name,
|
||||
}
|
||||
}
|
||||
|
||||
free_kmer_tables(ktable, seq_count);
|
||||
free(complete_sample_count_array);
|
||||
free(blob_array);
|
||||
free(alignment_result_array);
|
||||
if (seq_count > 0)
|
||||
{
|
||||
free_kmer_tables(ktable, seq_count);
|
||||
free(complete_sample_count_array);
|
||||
free(blob_array);
|
||||
free(alignment_result_array);
|
||||
}
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
|
||||
if (stop)
|
||||
return -1;
|
||||
|
||||
if (heads_only)
|
||||
if (heads_only && (seq_count > 0))
|
||||
{
|
||||
line_selection = malloc((o_view->infos)->line_count * sizeof(index_t));
|
||||
line_selection = malloc((((o_view->infos)->line_count) + 1) * sizeof(index_t));
|
||||
if (line_selection == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
@ -621,7 +641,7 @@ int obi_clean(const char* dms_name,
|
||||
}
|
||||
|
||||
// Flag the end of the line selection
|
||||
if (heads_only)
|
||||
if (heads_only && (seq_count > 0))
|
||||
line_selection[l] = -1;
|
||||
|
||||
// Create new view with line selection if heads only
|
||||
|
@ -52,7 +52,8 @@
|
||||
*
|
||||
* @param dms A pointer on an OBIDMS.
|
||||
* @param i_view_name The name of the input view.
|
||||
* @param sample_column_name The name of the OBI_STR column in the input view where the sample information is kept.
|
||||
* @param sample_column_name The name of the column in the input view where the sample information is kept.
|
||||
* Must be merged informations as built by the obi uniq tool (checked by the function).
|
||||
* NULL or "" (empty string) if there is no sample information.
|
||||
* @param o_view_name The name of the output view where the results should be written (should not already exist).
|
||||
* @param o_view_comments The comments that should be associated with the output view.
|
||||
|
@ -150,49 +150,49 @@ static int print_seq(Obiview_p i_view, Obiview_p o_view,
|
||||
static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
||||
{
|
||||
// Original length column
|
||||
if (obi_view_add_column(o_view, ECOPCR_SEQLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SEQLEN_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_SEQLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SEQLEN_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_SEQLEN_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Amplicon length column
|
||||
if (obi_view_add_column(o_view, ECOPCR_AMPLICONLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_AMPLICONLEN_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_AMPLICONLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_AMPLICONLEN_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_AMPLICONLEN_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Taxid column
|
||||
if (obi_view_add_column(o_view, TAXID_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, TAXID_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", TAXID_COLUMN);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Taxonomic rank column
|
||||
if (obi_view_add_column(o_view, ECOPCR_RANK_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_RANK_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_RANK_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_RANK_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_RANK_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Species taxid column
|
||||
if (obi_view_add_column(o_view, ECOPCR_SPECIES_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_SPECIES_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_TAXID_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Genus taxid column
|
||||
if (obi_view_add_column(o_view, ECOPCR_GENUS_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_GENUS_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_TAXID_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Family taxid column
|
||||
if (obi_view_add_column(o_view, ECOPCR_FAMILY_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_FAMILY_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_TAXID_COLUMN_NAME);
|
||||
return -1;
|
||||
@ -201,7 +201,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
||||
if (kingdom_mode)
|
||||
{
|
||||
// Kingdom taxid column
|
||||
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_TAXID_COLUMN_NAME);
|
||||
return -1;
|
||||
@ -210,7 +210,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
||||
else
|
||||
{
|
||||
// Superkingdom taxid column
|
||||
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME);
|
||||
return -1;
|
||||
@ -218,28 +218,28 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
||||
}
|
||||
|
||||
// Scientific name column
|
||||
if (obi_view_add_column(o_view, ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SCIENTIFIC_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SCIENTIFIC_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Species name column
|
||||
if (obi_view_add_column(o_view, ECOPCR_SPECIES_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_SPECIES_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_NAME_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Genus name column
|
||||
if (obi_view_add_column(o_view, ECOPCR_GENUS_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_GENUS_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_NAME_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Family name column
|
||||
if (obi_view_add_column(o_view, ECOPCR_FAMILY_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_FAMILY_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_NAME_COLUMN_NAME);
|
||||
return -1;
|
||||
@ -248,7 +248,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
||||
if (kingdom_mode)
|
||||
{
|
||||
// Kingdom name column
|
||||
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_NAME_COLUMN_NAME);
|
||||
return -1;
|
||||
@ -257,7 +257,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
||||
else
|
||||
{
|
||||
// Superkingdom name column
|
||||
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME);
|
||||
return -1;
|
||||
@ -265,49 +265,49 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
||||
}
|
||||
|
||||
// Strand column
|
||||
if (obi_view_add_column(o_view, ECOPCR_STRAND_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_STRAND_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_STRAND_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_STRAND_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_STRAND_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Primer 1 column
|
||||
if (obi_view_add_column(o_view, ECOPCR_PRIMER1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER1_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_PRIMER1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER1_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER1_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Primer 2 column
|
||||
if (obi_view_add_column(o_view, ECOPCR_PRIMER2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER2_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_PRIMER2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER2_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER2_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Error 1 column
|
||||
if (obi_view_add_column(o_view, ECOPCR_ERROR1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_ERROR1_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_ERROR1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_ERROR1_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_ERROR1_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Error 2 column
|
||||
if (obi_view_add_column(o_view, ECOPCR_ERROR2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_ERROR2_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_ERROR2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_ERROR2_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_ERROR2_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Temperature 1 column
|
||||
if (obi_view_add_column(o_view, ECOPCR_TEMP1_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_TEMP1_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_TEMP1_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_TEMP1_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_TEMP1_COLUMN_NAME);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Temperature 2 column
|
||||
if (obi_view_add_column(o_view, ECOPCR_TEMP2_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_TEMP2_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOPCR_TEMP2_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_TEMP2_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the %s column", ECOPCR_TEMP2_COLUMN_NAME);
|
||||
return -1;
|
||||
@ -1061,7 +1061,7 @@ int obi_ecopcr(const char* i_dms_name,
|
||||
length = 0;
|
||||
if (posj > posi)
|
||||
length = posj - posi - o1->patlen - o2->patlen;
|
||||
if (posj < posi)
|
||||
else if (circular > 0)
|
||||
length = posj + apatseq->seqlen - posi - o1->patlen - o2->patlen;
|
||||
if ((length>0) && // For when primers touch or overlap
|
||||
(!min_len || (length >= min_len)) &&
|
||||
@ -1151,7 +1151,7 @@ int obi_ecopcr(const char* i_dms_name,
|
||||
length = 0;
|
||||
if (posj > posi)
|
||||
length = posj - posi + 1 - o2->patlen - o1->patlen; /* - o1->patlen : deleted by <EC> (prior to the OBITools3) */
|
||||
if (posj < posi)
|
||||
else if (circular > 0)
|
||||
length = posj + apatseq->seqlen - posi - o1->patlen - o2->patlen;
|
||||
if ((length>0) && // For when primers touch or overlap
|
||||
(!min_len || (length >= min_len)) &&
|
||||
@ -1232,7 +1232,7 @@ int obi_ecopcr(const char* i_dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
fprintf(stderr,"\rDone : 100 %% ");
|
||||
fprintf(stderr,"\rDone : 100 %% \n");
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
|
104
src/obi_ecotag.c
104
src/obi_ecotag.c
@ -71,9 +71,12 @@ static int create_output_columns(Obiview_p o_view);
|
||||
* @param name The assigned scientific name.
|
||||
* @param assigned_status_column A pointer on the column where the assigned status should be written.
|
||||
* @param assigned The assigned status (whether the sequence was assigned to a taxon or not).
|
||||
* @param best_match_column A pointer on the column where the list of ids of the best matches should be written.
|
||||
* @param best_match_ids_column A pointer on the column where the list of ids of the best matches should be written.
|
||||
* @param best_match_ids The list of ids of the best matches as an array of the concatenated ids separated by '\0'.
|
||||
* @param best_match_ids_length The total length of the array of ids of best matches.
|
||||
* @param best_match_taxids_column A pointer on the column where the list of taxids of the best matches should be written.
|
||||
* @param best_match_taxids The list of taxids of the best matches as an array of the taxids.
|
||||
* @param best_match_taxids_length The length of the array of taxids of best matches.
|
||||
* @param score_column A pointer on the column where the score should be written.
|
||||
* @param score The similarity score of the sequence with its best match(es).
|
||||
*
|
||||
@ -87,7 +90,8 @@ int print_assignment_result(Obiview_p output_view, index_t line,
|
||||
OBIDMS_column_p assigned_taxid_column, int32_t taxid,
|
||||
OBIDMS_column_p assigned_name_column, const char* name,
|
||||
OBIDMS_column_p assigned_status_column, bool assigned,
|
||||
OBIDMS_column_p best_match_column, const char* best_match_ids, int best_match_ids_length,
|
||||
OBIDMS_column_p best_match_ids_column, const char* best_match_ids, int best_match_ids_length,
|
||||
OBIDMS_column_p best_match_taxids_column, const int32_t* best_match_taxids, int best_match_taxids_length,
|
||||
OBIDMS_column_p score_column, double score);
|
||||
|
||||
|
||||
@ -100,37 +104,44 @@ int print_assignment_result(Obiview_p output_view, index_t line,
|
||||
static int create_output_columns(Obiview_p o_view)
|
||||
{
|
||||
// Score column
|
||||
if (obi_view_add_column(o_view, ECOTAG_SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOTAG_SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the score in ecotag");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Assigned taxid column
|
||||
if (obi_view_add_column(o_view, ECOTAG_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOTAG_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the assigned taxid in ecotag");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Assigned scientific name column
|
||||
if (obi_view_add_column(o_view, ECOTAG_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOTAG_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the assigned scientific name in ecotag");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Assignement status column
|
||||
if (obi_view_add_column(o_view, ECOTAG_STATUS_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOTAG_STATUS_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the assignment status in ecotag");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Column for array of best match ids
|
||||
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, true, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, true, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the array of ids of the best match in ecotag");
|
||||
obidebug(1, "\nError creating the column for the array of ids of best matches in ecotag");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Column for array of best match taxids
|
||||
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_TAXIDS_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, true, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the array of taxids of best matches in ecotag");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -142,7 +153,8 @@ int print_assignment_result(Obiview_p output_view, index_t line,
|
||||
OBIDMS_column_p assigned_taxid_column, int32_t taxid,
|
||||
OBIDMS_column_p assigned_name_column, const char* name,
|
||||
OBIDMS_column_p assigned_status_column, bool assigned,
|
||||
OBIDMS_column_p best_match_column, const char* best_match_ids, int best_match_ids_length,
|
||||
OBIDMS_column_p best_match_ids_column, const char* best_match_ids, int best_match_ids_length,
|
||||
OBIDMS_column_p best_match_taxids_column, const int32_t* best_match_taxids, int best_match_taxids_length,
|
||||
OBIDMS_column_p score_column, double score)
|
||||
{
|
||||
// Write the assigned taxid
|
||||
@ -167,9 +179,16 @@ int print_assignment_result(Obiview_p output_view, index_t line,
|
||||
}
|
||||
|
||||
// Write the best match ids
|
||||
if (obi_set_array_with_col_p_in_view(output_view, best_match_column, line, best_match_ids, (uint8_t)(sizeof(char)*8), best_match_ids_length) < 0)
|
||||
if (obi_set_array_with_col_p_in_view(output_view, best_match_ids_column, line, best_match_ids, (uint8_t)(sizeof(char)*8), best_match_ids_length) < 0)
|
||||
{
|
||||
obidebug(1, "\nError writing a assignment status in a column when writing ecotag results");
|
||||
obidebug(1, "\nError writing the array of best match ids in a column when writing ecotag results");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Write the best match taxids
|
||||
if (obi_set_array_with_col_p_in_view(output_view, best_match_taxids_column, line, best_match_taxids, (uint8_t)(sizeof(OBI_INT)*8), best_match_taxids_length) < 0)
|
||||
{
|
||||
obidebug(1, "\nError writing the array of best match taxids in a column when writing ecotag results");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -199,7 +218,8 @@ int obi_ecotag(const char* dms_name,
|
||||
const char* taxonomy_name,
|
||||
const char* output_view_name,
|
||||
const char* output_view_comments,
|
||||
double ecotag_threshold) // TODO different threshold for the similarity sphere around ref seqs
|
||||
double ecotag_threshold,
|
||||
double bubble_threshold)
|
||||
{
|
||||
|
||||
// For each sequence
|
||||
@ -220,6 +240,7 @@ int obi_ecotag(const char* dms_name,
|
||||
index_t query_seq_idx, ref_seq_idx;
|
||||
double score, best_score;
|
||||
double threshold;
|
||||
double lca_threshold;
|
||||
int lcs_length;
|
||||
int ali_length;
|
||||
Kmer_table_p ktable;
|
||||
@ -235,6 +256,8 @@ int obi_ecotag(const char* dms_name,
|
||||
char* best_match_ids;
|
||||
char* best_match_ids_to_store;
|
||||
int32_t best_match_ids_length;
|
||||
int32_t* best_match_taxids;
|
||||
int32_t* best_match_taxids_to_store;
|
||||
int best_match_count;
|
||||
int buffer_size;
|
||||
int best_match_ids_buffer_size;
|
||||
@ -263,7 +286,8 @@ int obi_ecotag(const char* dms_name,
|
||||
OBIDMS_column_p assigned_taxid_column = NULL;
|
||||
OBIDMS_column_p assigned_name_column = NULL;
|
||||
OBIDMS_column_p assigned_status_column = NULL;
|
||||
OBIDMS_column_p best_match_column = NULL;
|
||||
OBIDMS_column_p best_match_ids_column = NULL;
|
||||
OBIDMS_column_p best_match_taxids_column = NULL;
|
||||
OBIDMS_column_p lca_taxid_a_column = NULL;
|
||||
OBIDMS_column_p score_a_column = NULL;
|
||||
OBIDMS_column_p ref_taxid_column = NULL;
|
||||
@ -367,10 +391,10 @@ int obi_ecotag(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
free(db_threshold_str);
|
||||
if (ecotag_threshold < db_threshold)
|
||||
if (bubble_threshold < db_threshold)
|
||||
{
|
||||
fprintf(stderr, "\nError: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).\n\n",
|
||||
ecotag_threshold, db_threshold);
|
||||
bubble_threshold, db_threshold);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -396,7 +420,8 @@ int obi_ecotag(const char* dms_name,
|
||||
assigned_taxid_column = obi_view_get_column(output_view, ECOTAG_TAXID_COLUMN_NAME);
|
||||
assigned_name_column = obi_view_get_column(output_view, ECOTAG_NAME_COLUMN_NAME);
|
||||
assigned_status_column = obi_view_get_column(output_view, ECOTAG_STATUS_COLUMN_NAME);
|
||||
best_match_column = obi_view_get_column(output_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME);
|
||||
best_match_ids_column = obi_view_get_column(output_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME);
|
||||
best_match_taxids_column = obi_view_get_column(output_view, ECOTAG_BEST_MATCH_TAXIDS_COLUMN_NAME);
|
||||
score_column = obi_view_get_column(output_view, ECOTAG_SCORE_COLUMN_NAME);
|
||||
|
||||
// Open the used reference columns
|
||||
@ -453,9 +478,17 @@ int obi_ecotag(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
best_match_taxids = (int32_t*) malloc(buffer_size* sizeof(int32_t));
|
||||
if (best_match_taxids == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the best match taxid array in ecotag");
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i=0; i < query_count; i++)
|
||||
{
|
||||
if (i%100 == 0)
|
||||
if (i%1000 == 0)
|
||||
fprintf(stderr,"\rDone : %f %% ", (i / (float) query_count)*100);
|
||||
|
||||
best_match_count = 0;
|
||||
@ -514,7 +547,7 @@ int obi_ecotag(const char* dms_name,
|
||||
|
||||
// Store in best match array
|
||||
|
||||
// Grow match array if needed
|
||||
// Grow match and taxid array if needed
|
||||
if (best_match_count == buffer_size)
|
||||
{
|
||||
buffer_size = buffer_size*2;
|
||||
@ -525,6 +558,13 @@ int obi_ecotag(const char* dms_name,
|
||||
obidebug(1, "\nError reallocating match array when assigning");
|
||||
return -1;
|
||||
}
|
||||
best_match_taxids = (int32_t*) realloc(best_match_taxids, buffer_size*sizeof(int32_t));
|
||||
if (best_match_taxids == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError reallocating match taxids array when assigning");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
id = obi_get_str_with_elt_idx_and_col_p_in_view(ref_view, ref_id_column, j, 0);
|
||||
@ -545,6 +585,7 @@ int obi_ecotag(const char* dms_name,
|
||||
|
||||
// Save match
|
||||
best_match_array[best_match_count] = j;
|
||||
best_match_taxids[best_match_count] = obi_get_int_with_elt_idx_and_col_p_in_view(ref_view, ref_taxid_column, j, 0);
|
||||
best_match_count++;
|
||||
strcpy(best_match_ids+best_match_ids_length, id);
|
||||
best_match_ids_length = best_match_ids_length + id_len + 1;
|
||||
@ -558,11 +599,16 @@ int obi_ecotag(const char* dms_name,
|
||||
{
|
||||
best_match_idx = best_match_array[j];
|
||||
|
||||
// Find the LCA for the chosen threshold
|
||||
// Find the LCA for the highest threshold between best_score and the chosen bubble threshold
|
||||
score_array = obi_get_array_with_col_p_in_view(ref_view, score_a_column, best_match_idx, &lca_array_length);
|
||||
|
||||
if (bubble_threshold < best_score)
|
||||
lca_threshold = best_score;
|
||||
else
|
||||
lca_threshold = bubble_threshold;
|
||||
|
||||
k = 0;
|
||||
while ((k < lca_array_length) && (score_array[k] >= ecotag_threshold))
|
||||
while ((k < lca_array_length) && (score_array[k] >= lca_threshold))
|
||||
k++;
|
||||
|
||||
if (k>0)
|
||||
@ -570,12 +616,12 @@ int obi_ecotag(const char* dms_name,
|
||||
lca_array = obi_get_array_with_col_p_in_view(ref_view, lca_taxid_a_column, best_match_idx, &lca_array_length);
|
||||
if (j>0)
|
||||
{
|
||||
lca = obi_taxo_get_taxon_with_taxid(taxonomy, lca_taxid);
|
||||
if (lca == NULL)
|
||||
{
|
||||
obidebug(1, "\nError getting a taxon from a taxid when doing taxonomic assignment");
|
||||
return -1;
|
||||
}
|
||||
// lca = obi_taxo_get_taxon_with_taxid(taxonomy, lca_taxid);
|
||||
// if (lca == NULL)
|
||||
// {
|
||||
// obidebug(1, "\nError getting a taxon from a taxid when doing taxonomic assignment");
|
||||
// return -1;
|
||||
// }
|
||||
lca_in_array = obi_taxo_get_taxon_with_taxid(taxonomy, lca_array[k-1]);
|
||||
if (lca_in_array == NULL)
|
||||
{
|
||||
@ -629,6 +675,7 @@ int obi_ecotag(const char* dms_name,
|
||||
else
|
||||
lca_name = lca->name;
|
||||
best_match_ids_to_store = best_match_ids;
|
||||
best_match_taxids_to_store = best_match_taxids;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -636,6 +683,7 @@ int obi_ecotag(const char* dms_name,
|
||||
lca_name = OBIStr_NA;
|
||||
lca_taxid = OBIInt_NA;
|
||||
best_match_ids_to_store = OBITuple_NA;
|
||||
best_match_taxids_to_store = OBITuple_NA;
|
||||
score = OBIFloat_NA;
|
||||
}
|
||||
|
||||
@ -644,7 +692,8 @@ int obi_ecotag(const char* dms_name,
|
||||
assigned_taxid_column, lca_taxid,
|
||||
assigned_name_column, lca_name,
|
||||
assigned_status_column, assigned,
|
||||
best_match_column, best_match_ids_to_store, best_match_ids_length,
|
||||
best_match_ids_column, best_match_ids_to_store, best_match_ids_length,
|
||||
best_match_taxids_column, best_match_taxids_to_store, best_match_count,
|
||||
score_column, best_score
|
||||
) < 0)
|
||||
return -1;
|
||||
@ -652,6 +701,7 @@ int obi_ecotag(const char* dms_name,
|
||||
|
||||
free(best_match_array);
|
||||
free(best_match_ids);
|
||||
free(best_match_taxids);
|
||||
|
||||
obi_close_taxonomy(taxonomy);
|
||||
obi_save_and_close_view(query_view);
|
||||
|
@ -23,7 +23,8 @@
|
||||
#define ECOTAG_TAXID_COLUMN_NAME "TAXID"
|
||||
#define ECOTAG_NAME_COLUMN_NAME "SCIENTIFIC_NAME"
|
||||
#define ECOTAG_STATUS_COLUMN_NAME "ID_STATUS"
|
||||
#define ECOTAG_BEST_MATCH_IDS_COLUMN_NAME "BEST_MATCH"
|
||||
#define ECOTAG_BEST_MATCH_IDS_COLUMN_NAME "BEST_MATCH_IDS"
|
||||
#define ECOTAG_BEST_MATCH_TAXIDS_COLUMN_NAME "BEST_MATCH_TAXIDS"
|
||||
#define ECOTAG_SCORE_COLUMN_NAME "BEST_IDENTITY"
|
||||
|
||||
|
||||
@ -41,12 +42,14 @@
|
||||
* @param output_view_name The name to give to the output view.
|
||||
* @param output_view_comments The comments to associate to the output view.
|
||||
* @param ecotag_threshold The threshold at which to assign.
|
||||
* @param bubble_threshold The threshold at which to look for an LCA (i.e. minimum identity considered for the assignment circle);
|
||||
* the threshold actually used will be the highest between this value and the best assignment score found.
|
||||
*
|
||||
* The algorithm works like this:
|
||||
* For each query sequence:
|
||||
* Align with reference database
|
||||
* Keep the indices of all the best matches
|
||||
* For each kept index, get the LCA at that threshold as stored in the reference database, then the LCA of those LCAs
|
||||
* For each kept index, get the LCA at the highest threshold between bubble_threshold and the best assignment score found (as stored in the reference database), then the LCA of those LCAs
|
||||
* Write result (max score, threshold, taxid and scientific name of the LCA assigned, list of the ids of the best matches)
|
||||
*
|
||||
* @returns A value indicating the success of the operation.
|
||||
@ -64,7 +67,8 @@ int obi_ecotag(const char* dms_name,
|
||||
const char* taxonomy_name,
|
||||
const char* output_view_name,
|
||||
const char* output_view_comments,
|
||||
double ecotag_threshold);
|
||||
double ecotag_threshold,
|
||||
double bubble_threshold);
|
||||
|
||||
|
||||
#endif /* OBI_ECOTAG_H_ */
|
||||
|
@ -155,35 +155,35 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
||||
bool normalize, int reference, bool similarity_mode)
|
||||
{
|
||||
// Create the column for the ids of the 1st sequence aligned
|
||||
if (obi_view_add_column(output_view, ID1_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, id1_indexer_name, NULL, -1, ID1_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, ID1_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, id1_indexer_name, NULL, -1, ID1_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the first column for the sequence ids when aligning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create the column for the ids of the 2nd sequence aligned
|
||||
if (obi_view_add_column(output_view, ID2_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, id2_indexer_name, NULL, -1, ID2_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, ID2_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, id2_indexer_name, NULL, -1, ID2_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the second column for the sequence ids when aligning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create the column for the index (in the input view) of the first sequences aligned
|
||||
if (obi_view_add_column(output_view, IDX1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, IDX1_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, IDX1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, IDX1_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the first column for the sequence indices when aligning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create the column for the index (in the input view) of the second sequences aligned
|
||||
if (obi_view_add_column(output_view, IDX2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, IDX2_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, IDX2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, IDX2_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the second column for the sequence indices when aligning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create the column for the LCS length
|
||||
if (obi_view_add_column(output_view, LCS_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, LCS_LENGTH_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, LCS_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, LCS_LENGTH_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the LCS length when aligning");
|
||||
return -1;
|
||||
@ -192,7 +192,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
||||
// Create the column for the alignment length if it is computed
|
||||
if ((reference == ALILEN) && (normalize || !similarity_mode))
|
||||
{
|
||||
if (obi_view_add_column(output_view, ALI_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ALI_LENGTH_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, ALI_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ALI_LENGTH_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the alignment length when aligning");
|
||||
return -1;
|
||||
@ -201,7 +201,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
||||
// Create the column for the alignment score
|
||||
if (normalize)
|
||||
{
|
||||
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
|
||||
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the score when aligning");
|
||||
return -1;
|
||||
@ -209,7 +209,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
||||
}
|
||||
else
|
||||
{
|
||||
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
|
||||
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the column for the score when aligning");
|
||||
return -1;
|
||||
@ -219,14 +219,14 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
||||
if (print_seq)
|
||||
{
|
||||
// Create the column for the first sequences aligned
|
||||
if (obi_view_add_column(output_view, SEQ1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, seq1_indexer_name, NULL, -1, SEQ1_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, SEQ1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, seq1_indexer_name, NULL, -1, SEQ1_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the first column for the sequences when aligning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create the column for the second sequences aligned
|
||||
if (obi_view_add_column(output_view, SEQ2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, seq2_indexer_name, NULL, -1, SEQ2_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, SEQ2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, seq2_indexer_name, NULL, -1, SEQ2_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the second column for the sequences when aligning");
|
||||
return -1;
|
||||
@ -235,14 +235,14 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
||||
if (print_count)
|
||||
{
|
||||
// Create the column for the count of the first sequences aligned
|
||||
if (obi_view_add_column(output_view, COUNT1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, COUNT1_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, COUNT1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, COUNT1_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the first column for the sequence counts when aligning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create the column for the count of the second sequences aligned
|
||||
if (obi_view_add_column(output_view, COUNT2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, COUNT2_COLUMN_COMMENTS, true) < 0)
|
||||
if (obi_view_add_column(output_view, COUNT2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, COUNT2_COLUMN_COMMENTS, true) < 0)
|
||||
{
|
||||
obidebug(1, "\nError creating the second column for the sequence counts when aligning");
|
||||
return -1;
|
||||
|
137
src/obiavl.c
137
src/obiavl.c
@ -582,6 +582,7 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
||||
{
|
||||
size_t file_size;
|
||||
size_t new_data_size;
|
||||
size_t header_size;
|
||||
double multiple;
|
||||
int file_descriptor;
|
||||
|
||||
@ -589,6 +590,8 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
||||
multiple = ceil((double) (ONE_IF_ZERO((avl->header)->nb_items * sizeof(AVL_node_t))) / (double) getpagesize());
|
||||
new_data_size = ((size_t) multiple) * getpagesize();
|
||||
|
||||
header_size = (avl->header)->header_size;
|
||||
|
||||
// Check that it is actually greater than the current size of the file, otherwise no need to truncate
|
||||
if ((avl->header)->avl_size == new_data_size)
|
||||
return 0;
|
||||
@ -596,16 +599,22 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
||||
// Get the file descriptor
|
||||
file_descriptor = avl->avl_fd;
|
||||
|
||||
// Unmap the tree before truncating the file
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(avl->tree, (avl->header)->avl_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the tree of an AVL before truncating");
|
||||
return -1;
|
||||
}
|
||||
if (munmap(avl->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the tree of an AVL before truncating");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Truncate the file
|
||||
file_size = (avl->header)->header_size + new_data_size;
|
||||
file_size = header_size + new_data_size;
|
||||
if (ftruncate(file_descriptor, file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
@ -613,7 +622,22 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the data
|
||||
// Remap the header and the data
|
||||
|
||||
avl->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
file_descriptor,
|
||||
0
|
||||
);
|
||||
if (avl->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError re-mmapping the header of an AVL after truncating");
|
||||
return -1;
|
||||
}
|
||||
|
||||
avl->tree = mmap(NULL,
|
||||
new_data_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
@ -640,6 +664,7 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
||||
{
|
||||
size_t file_size;
|
||||
index_t new_data_size;
|
||||
size_t header_size;
|
||||
double multiple;
|
||||
int file_descriptor;
|
||||
|
||||
@ -647,14 +672,17 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
||||
multiple = ceil((double) (ONE_IF_ZERO((avl_data->header)->data_size_used)) / (double) getpagesize());
|
||||
new_data_size = ((index_t) multiple) * getpagesize();
|
||||
|
||||
header_size = (avl_data->header)->header_size;
|
||||
|
||||
// Check that it is actually greater than the current size of the file, otherwise no need to truncate
|
||||
if ((avl_data->header)->data_size_max == new_data_size)
|
||||
if ((avl_data->header)->data_size_max >= new_data_size)
|
||||
return 0;
|
||||
|
||||
// Get the file descriptor
|
||||
file_descriptor = avl_data->data_fd;
|
||||
|
||||
// Unmap the data before truncating the file
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
|
||||
if (munmap(avl_data->data, (avl_data->header)->data_size_max) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
@ -662,16 +690,39 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (munmap(avl_data->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the header of an AVL before truncating");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Truncate the file
|
||||
file_size = (avl_data->header)->header_size + new_data_size;
|
||||
file_size = header_size + new_data_size;
|
||||
if (ftruncate(file_descriptor, file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError truncating an AVL data file");
|
||||
obidebug(1, "\nError truncating an AVL data file, old data size = %lld, new data size = %lld", (avl_data->header)->data_size_max, new_data_size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the data
|
||||
|
||||
avl_data->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
file_descriptor,
|
||||
0
|
||||
);
|
||||
|
||||
if (avl_data->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError re-mmapping the header of an AVL after truncating");
|
||||
return -1;
|
||||
}
|
||||
|
||||
avl_data->data = mmap(NULL,
|
||||
new_data_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
@ -710,6 +761,20 @@ int grow_avl(OBIDMS_avl_p avl) // TODO Lock when needed
|
||||
header_size = (avl->header)->header_size;
|
||||
file_size = header_size + new_data_size;
|
||||
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(avl->tree, old_data_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the tree of an AVL tree file before enlarging");
|
||||
return -1;
|
||||
}
|
||||
if (munmap(avl->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the header of an AVL tree file before enlarging");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Enlarge the file
|
||||
if (ftruncate(avl_file_descriptor, file_size) < 0)
|
||||
{
|
||||
@ -718,12 +783,20 @@ int grow_avl(OBIDMS_avl_p avl) // TODO Lock when needed
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap and re-map the data
|
||||
// Re-map
|
||||
|
||||
if (munmap(avl->tree, old_data_size) < 0)
|
||||
avl->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
avl_file_descriptor,
|
||||
0
|
||||
);
|
||||
|
||||
if (avl->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the tree of an AVL tree file before enlarging");
|
||||
obidebug(1, "\nError re-mmapping the header of an AVL tree file after enlarging the file");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -768,6 +841,20 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
||||
header_size = (avl_data->header)->header_size;
|
||||
file_size = header_size + new_data_size;
|
||||
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(avl_data->data, old_data_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the data of an AVL tree data file before enlarging");
|
||||
return -1;
|
||||
}
|
||||
if (munmap(avl_data->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the header of an AVL tree data file before enlarging");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Enlarge the file
|
||||
if (ftruncate(avl_data_file_descriptor, file_size) < 0)
|
||||
{
|
||||
@ -776,12 +863,19 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap and re-map the data
|
||||
// Re-map
|
||||
|
||||
if (munmap(avl_data->data, old_data_size) < 0)
|
||||
avl_data->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
avl_data_file_descriptor,
|
||||
0
|
||||
);
|
||||
if (avl_data->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the data of an AVL tree data file before enlarging");
|
||||
obidebug(1, "\nError re-mmapping the header of an AVL tree data file after enlarging the file");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -792,7 +886,6 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
||||
avl_data_file_descriptor,
|
||||
header_size
|
||||
);
|
||||
|
||||
if (avl_data->data == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
@ -2259,7 +2352,13 @@ index_t obi_avl_add(OBIDMS_avl_p avl, Obi_blob_p value)
|
||||
parent = next;
|
||||
|
||||
// Compare the crc of the value with the crc of the current node
|
||||
comp = (current_node->crc64) - crc;
|
||||
//comp = (current_node->crc64) - crc;
|
||||
if ((current_node->crc64) == crc)
|
||||
comp = 0;
|
||||
else if ((current_node->crc64) > crc)
|
||||
comp = 1;
|
||||
else
|
||||
comp = -1;
|
||||
|
||||
if (comp == 0)
|
||||
{ // check if really same value
|
||||
@ -2354,7 +2453,13 @@ index_t obi_avl_find(OBIDMS_avl_p avl, Obi_blob_p value)
|
||||
current_node = (avl->tree)+next;
|
||||
|
||||
// Compare the crc of the value with the crc of the current node
|
||||
comp = (current_node->crc64) - crc;
|
||||
//comp = (current_node->crc64) - crc;
|
||||
if ((current_node->crc64) == crc)
|
||||
comp = 0;
|
||||
else if ((current_node->crc64) > crc)
|
||||
comp = 1;
|
||||
else
|
||||
comp = -1;
|
||||
|
||||
if (comp == 0)
|
||||
{ // Check if really same value
|
||||
|
133
src/obidms.c
133
src/obidms.c
@ -316,6 +316,15 @@ static int enlarge_infos_file(OBIDMS_p dms, size_t new_size)
|
||||
multiple = ceil((double) new_size / (double) getpagesize());
|
||||
rounded_new_size = multiple * getpagesize();
|
||||
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(dms->infos, (dms->infos)->file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBIDMS_UNKNOWN_ERROR);
|
||||
obidebug(1, "\nError munmapping a DMS information file when enlarging");
|
||||
close(infos_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Enlarge the file
|
||||
if (ftruncate(infos_file_descriptor, rounded_new_size) < 0)
|
||||
{
|
||||
@ -325,15 +334,7 @@ static int enlarge_infos_file(OBIDMS_p dms, size_t new_size)
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap and remap the file
|
||||
if (munmap(dms->infos, (dms->infos)->file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBIDMS_UNKNOWN_ERROR);
|
||||
obidebug(1, "\nError munmapping a DMS information file when enlarging");
|
||||
close(infos_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the file
|
||||
dms->infos = mmap(NULL,
|
||||
rounded_new_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
@ -1409,6 +1410,107 @@ DIR* opendir_in_dms(OBIDMS_p dms, const char* path_name)
|
||||
}
|
||||
|
||||
|
||||
char* obi_dms_formatted_infos(OBIDMS_p dms, bool detailed)
|
||||
{
|
||||
char* dms_infos = NULL;
|
||||
char* view_infos = NULL;
|
||||
char* view_name = NULL;
|
||||
char* tax_name = NULL;
|
||||
char* all_tax_dir_path = NULL;
|
||||
int i;
|
||||
struct dirent* dp;
|
||||
Obiview_p view;
|
||||
|
||||
// DMS name
|
||||
dms_infos = (char*) malloc((strlen("# DMS name: ")+strlen(dms->dms_name)+strlen("\n# Views:\n")+1) * sizeof(char));
|
||||
if (dms_infos == NULL)
|
||||
{
|
||||
obidebug(1, "\nError allocating memory for DMS formatted infos");
|
||||
return NULL;
|
||||
}
|
||||
strcpy(dms_infos, "# DMS name: ");
|
||||
strcat(dms_infos, dms->dms_name);
|
||||
strcat(dms_infos, "\n# Views:\n");
|
||||
|
||||
// Go through views and get their infos
|
||||
rewinddir(dms->view_directory);
|
||||
while ((dp = readdir(dms->view_directory)) != NULL)
|
||||
{
|
||||
if ((dp->d_name)[0] == '.')
|
||||
continue;
|
||||
i=0;
|
||||
while ((dp->d_name)[i] != '.')
|
||||
i++;
|
||||
view_name = (char*) malloc((i+1) * sizeof(char));
|
||||
if (view_name == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for a view name when getting formatted DMS infos: file %s", dp->d_name);
|
||||
return NULL;
|
||||
}
|
||||
strncpy(view_name, dp->d_name, i);
|
||||
view_name[i] = '\0';
|
||||
view = obi_open_view(dms, view_name);
|
||||
if (view == NULL)
|
||||
{
|
||||
obidebug(1, "\nError opening a view to get DMS formatted infos");
|
||||
return NULL;
|
||||
}
|
||||
if (detailed)
|
||||
view_infos = obi_view_formatted_infos(view, detailed);
|
||||
else
|
||||
view_infos = obi_view_formatted_infos_one_line(view);
|
||||
if (view_infos == NULL)
|
||||
{
|
||||
obidebug(1, "\nError getting a view infos to get DMS formatted infos");
|
||||
return NULL;
|
||||
}
|
||||
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen(view_infos)+1) * sizeof(char));
|
||||
if (dms_infos == NULL)
|
||||
{
|
||||
obidebug(1, "\nError reallocating memory for DMS formatted infos");
|
||||
return NULL;
|
||||
}
|
||||
strcat(dms_infos, view_infos);
|
||||
if (obi_save_and_close_view(view) < 0)
|
||||
{
|
||||
obidebug(1, "\nError closing view while getting DMS formatted infos");
|
||||
return NULL;
|
||||
}
|
||||
if (detailed)
|
||||
{
|
||||
dms_infos = realloc(dms_infos, (strlen(dms_infos)+2) * sizeof(char));
|
||||
strcat(dms_infos, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Add taxonomies
|
||||
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen("\n# Taxonomies:\n")+1) * sizeof(char));
|
||||
if (dms_infos == NULL)
|
||||
{
|
||||
obidebug(1, "\nError reallocating memory for DMS formatted infos");
|
||||
return NULL;
|
||||
}
|
||||
strcat(dms_infos, "# Taxonomies:\n");
|
||||
rewinddir(dms->tax_directory);
|
||||
while ((dp = readdir(dms->tax_directory)) != NULL)
|
||||
{
|
||||
if ((dp->d_name)[0] == '.')
|
||||
continue;
|
||||
tax_name = dp->d_name;
|
||||
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen(" # ")+strlen(view_infos)+1) * sizeof(char));
|
||||
if (dms_infos == NULL)
|
||||
{
|
||||
obidebug(1, "\nError reallocating memory for DMS formatted infos");
|
||||
return NULL;
|
||||
}
|
||||
strcat(dms_infos, " # ");
|
||||
strcat(dms_infos, tax_name);
|
||||
}
|
||||
return dms_infos;
|
||||
}
|
||||
|
||||
|
||||
// TODO move somewhere else maybe
|
||||
// TODO discuss arguments
|
||||
obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number)
|
||||
@ -1474,8 +1576,8 @@ obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, c
|
||||
|
||||
// Create new column
|
||||
column_2 = obi_create_column(dms_2, column_name, header_1->returned_data_type, header_1->line_count,
|
||||
header_1->nb_elements_per_line, header_1->elements_names, true, header_1->tuples,
|
||||
header_1->to_eval, new_avl_name, (header_1->associated_column).column_name,
|
||||
header_1->nb_elements_per_line, header_1->elements_names, true, header_1->dict_column,
|
||||
header_1->tuples, header_1->to_eval, new_avl_name, (header_1->associated_column).column_name,
|
||||
(header_1->associated_column).version, header_1->comments);
|
||||
|
||||
if (column_2 == NULL)
|
||||
@ -1496,7 +1598,7 @@ obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, c
|
||||
memcpy(column_2->data, column_1->data, header_1->data_size);
|
||||
|
||||
// Copy the AVL files if there are some (overwriting the automatically created files)
|
||||
if ((header_1->returned_data_type == OBI_STR) || (header_1->returned_data_type == OBI_SEQ) || (header_1->returned_data_type == OBI_QUAL))
|
||||
if ((header_1->tuples) || ((header_1->returned_data_type == OBI_STR) || (header_1->returned_data_type == OBI_SEQ) || (header_1->returned_data_type == OBI_QUAL)))
|
||||
{
|
||||
avl_name_1 = (char*) malloc((strlen(header_1->indexer_name) + 1) * sizeof(char));
|
||||
if (avl_name_1 == NULL)
|
||||
@ -1659,6 +1761,12 @@ int obi_import_view(const char* dms_path_1, const char* dms_path_2, const char*
|
||||
else // Non-typed view
|
||||
view_2 = obi_new_view(dms_2, view_name_2, NULL, NULL, (view_1->infos)->comments);
|
||||
|
||||
if (view_2 == NULL)
|
||||
{
|
||||
obidebug(1, "\nError creating the new view to import a view in a DMS");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Import line count
|
||||
view_2->infos->line_count = view_1->infos->line_count;
|
||||
|
||||
@ -1706,6 +1814,7 @@ int obi_import_view(const char* dms_path_1, const char* dms_path_2, const char*
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
NULL,
|
||||
NULL,
|
||||
-1,
|
||||
|
19
src/obidms.h
19
src/obidms.h
@ -40,7 +40,7 @@
|
||||
*/
|
||||
#define MAX_NB_OPENED_INDEXERS (1000) /**< The maximum number of indexers open at the same time.
|
||||
*/
|
||||
#define MAX_PATH_LEN (1024) /**< Maximum length for the character string defining a
|
||||
#define MAX_PATH_LEN (2048) /**< Maximum length for the character string defining a
|
||||
* file or directory path.
|
||||
*/
|
||||
|
||||
@ -459,6 +459,23 @@ char* obi_dms_get_full_path(OBIDMS_p dms, const char* path_name);
|
||||
DIR* opendir_in_dms(OBIDMS_p dms, const char* path_name);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Returns the informations of a DMS with a human readable format (dms name, taxonomies and view infos).
|
||||
*
|
||||
* @warning The returned pointer has to be freed by the caller.
|
||||
*
|
||||
* @param column A pointer on a DMS.
|
||||
* @param detailed Whether the informations should contain detailed view infos.
|
||||
*
|
||||
* @returns A pointer on a character array where the formatted DMS informations are stored.
|
||||
* @retval NULL if an error occurred.
|
||||
*
|
||||
* @since September 2020
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
char* obi_dms_formatted_infos(OBIDMS_p dms, bool detailed);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Imports a column, copying it from a DMS to another DMS, and returns the version of the column in the destination DMS.
|
||||
*
|
||||
|
@ -3649,6 +3649,18 @@ ecotx_t* obi_taxo_get_taxon_with_taxid(OBIDMS_taxonomy_p taxonomy, int32_t taxid
|
||||
}
|
||||
|
||||
|
||||
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
|
||||
{
|
||||
return (((taxonomy->names)->names)[idx]).name;
|
||||
}
|
||||
|
||||
|
||||
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
|
||||
{
|
||||
return (((taxonomy->names)->names)[idx]).taxon;
|
||||
}
|
||||
|
||||
|
||||
int obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid) // TODO discuss that this doesn't work with deprecated taxids
|
||||
{
|
||||
ecotx_t* next_parent;
|
||||
|
@ -447,8 +447,51 @@ ecotx_t* obi_taxo_get_superkingdom(ecotx_t* taxon, OBIDMS_taxonomy_p taxonomy);
|
||||
const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks);
|
||||
|
||||
|
||||
// TODO
|
||||
/**
|
||||
* @brief Function checking whether a taxid is included in a subset of the taxonomy.
|
||||
*
|
||||
* @param taxonomy A pointer on the taxonomy structure.
|
||||
* @param restrict_to_taxids An array of taxids. The researched taxid must be under at least one of those array taxids.
|
||||
* @param count Number of taxids in restrict_to_taxids.
|
||||
* @param taxid The taxid to check.
|
||||
*
|
||||
* @returns A value indicating whether the taxid is included in the chosen subset of the taxonomy.
|
||||
* @retval 0 if the taxid is not included in the subset of the taxonomy.
|
||||
* @retval 1 if the taxid is included in the subset of the taxonomy.
|
||||
*
|
||||
* @since October 2020
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
int obi_taxo_is_taxid_included(OBIDMS_taxonomy_p taxonomy,
|
||||
int32_t* restrict_to_taxids,
|
||||
int32_t count,
|
||||
int32_t taxid);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Function returning the name of a taxon from its index in the taxonomy name index (econameidx_t).
|
||||
*
|
||||
* @param taxonomy A pointer on the taxonomy structure.
|
||||
* @param idx The index at which the name is in the taxonomy name index (econameidx_t).
|
||||
*
|
||||
* @returns The taxon name.
|
||||
*
|
||||
* @since October 2020
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Function returning a taxon structure from its index in the taxonomy name index (econameidx_t).
|
||||
*
|
||||
* @param taxonomy A pointer on the taxonomy structure.
|
||||
* @param idx The index at which the taxon is in the taxonomy name index (econameidx_t).
|
||||
*
|
||||
* @returns The taxon structure.
|
||||
*
|
||||
* @since October 2020
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx);
|
||||
|
||||
|
@ -1024,6 +1024,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
||||
index_t nb_elements_per_line,
|
||||
char* elements_names,
|
||||
bool elt_names_formatted,
|
||||
bool dict_column,
|
||||
bool tuples,
|
||||
bool to_eval,
|
||||
const char* indexer_name,
|
||||
@ -1282,6 +1283,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
||||
header->nb_elements_per_line = nb_elements_per_line;
|
||||
header->stored_data_type = stored_data_type;
|
||||
header->returned_data_type = returned_data_type;
|
||||
header->dict_column = dict_column;
|
||||
header->tuples = tuples;
|
||||
header->to_eval = to_eval;
|
||||
header->creation_date = time(NULL);
|
||||
@ -1312,19 +1314,10 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Store the associated column reference if needed // TODO discuss cases
|
||||
if (data_type == OBI_QUAL)
|
||||
// Store the associated column reference if needed
|
||||
if ((associated_column_name != NULL) && (*associated_column_name != '\0'))
|
||||
{
|
||||
if ((associated_column_name == NULL) || (*associated_column_name == '\0'))
|
||||
{
|
||||
obidebug(1, "\nError: The name of the associated column when creating a new column is NULL");
|
||||
munmap(new_column->header, header_size);
|
||||
close(column_file_descriptor);
|
||||
free(new_column);
|
||||
return NULL;
|
||||
}
|
||||
strcpy((header->associated_column).column_name, associated_column_name);
|
||||
|
||||
if (associated_column_version == -1)
|
||||
{
|
||||
obidebug(1, "\nError: The version of the associated column when creating a new column is not defined");
|
||||
@ -1336,6 +1329,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
||||
(header->associated_column).version = associated_column_version;
|
||||
}
|
||||
|
||||
|
||||
// If the data type is OBI_STR, OBI_SEQ or OBI_QUAL, the associated obi_indexer is opened or created
|
||||
if ((returned_data_type == OBI_STR) || (returned_data_type == OBI_SEQ) || (returned_data_type == OBI_QUAL) || tuples)
|
||||
{
|
||||
@ -1350,6 +1344,8 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
||||
}
|
||||
strncpy(header->indexer_name, final_indexer_name, INDEXER_MAX_NAME);
|
||||
}
|
||||
else
|
||||
new_column->indexer = NULL;
|
||||
|
||||
// Fill the data with NA values
|
||||
obi_ini_to_NA_values(new_column, 0, nb_lines);
|
||||
@ -1558,6 +1554,8 @@ OBIDMS_column_p obi_open_column(OBIDMS_p dms,
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
else
|
||||
column->indexer = NULL;
|
||||
|
||||
if (close(column_file_descriptor) < 0)
|
||||
{
|
||||
@ -1615,6 +1613,7 @@ OBIDMS_column_p obi_clone_column(OBIDMS_p dms,
|
||||
nb_elements_per_line,
|
||||
(column_to_clone->header)->elements_names,
|
||||
true,
|
||||
(column_to_clone->header)->dict_column,
|
||||
(column_to_clone->header)->tuples,
|
||||
(column_to_clone->header)->to_eval,
|
||||
(column_to_clone->header)->indexer_name,
|
||||
@ -1693,8 +1692,8 @@ int obi_close_column(OBIDMS_column_p column)
|
||||
if (obi_dms_unlist_column(column->dms, column) < 0)
|
||||
ret_val = -1;
|
||||
|
||||
// If the data type is OBI_STR, OBI_SEQ or OBI_QUAL, the associated indexer is closed
|
||||
if (((column->header)->returned_data_type == OBI_STR) || ((column->header)->returned_data_type == OBI_SEQ) || ((column->header)->returned_data_type == OBI_QUAL))
|
||||
// If it's a tuple column or the data type is OBI_STR, OBI_SEQ or OBI_QUAL, the associated indexer is closed
|
||||
if ((column->indexer) != NULL)
|
||||
if (obi_close_indexer(column->indexer) < 0)
|
||||
ret_val = -1;
|
||||
|
||||
@ -1729,16 +1728,32 @@ int obi_close_column(OBIDMS_column_p column)
|
||||
int obi_clone_column_indexer(OBIDMS_column_p column)
|
||||
{
|
||||
char* new_indexer_name;
|
||||
int i;
|
||||
|
||||
new_indexer_name = obi_build_indexer_name((column->header)->name, (column->header)->version);
|
||||
if (new_indexer_name == NULL)
|
||||
return -1;
|
||||
|
||||
column->indexer = obi_clone_indexer(column->indexer, new_indexer_name); // TODO Need to lock this somehow?
|
||||
if (column->indexer == NULL)
|
||||
i=0;
|
||||
while (true) // find avl name not already used
|
||||
{
|
||||
obidebug(1, "\nError cloning a column's indexer to make it writable");
|
||||
return -1;
|
||||
new_indexer_name = obi_build_indexer_name((column->header)->name, ((column->header)->version)+i);
|
||||
if (new_indexer_name == NULL)
|
||||
return -1;
|
||||
|
||||
column->indexer = obi_clone_indexer(column->indexer, new_indexer_name); // TODO Need to lock this somehow?
|
||||
if (column->indexer == NULL)
|
||||
{
|
||||
if (errno == EEXIST)
|
||||
{
|
||||
free(new_indexer_name);
|
||||
i++;
|
||||
}
|
||||
else
|
||||
{
|
||||
free(new_indexer_name);
|
||||
obidebug(1, "\nError cloning a column's indexer to make it writable");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
strcpy((column->header)->indexer_name, new_indexer_name);
|
||||
@ -1754,6 +1769,7 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
{
|
||||
size_t file_size;
|
||||
size_t data_size;
|
||||
size_t header_size;
|
||||
index_t new_line_count;
|
||||
double multiple;
|
||||
int column_file_descriptor;
|
||||
@ -1776,6 +1792,8 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
|
||||
data_size = obi_array_sizeof((column->header)->stored_data_type, new_line_count, (column->header)->nb_elements_per_line);
|
||||
|
||||
header_size = (column->header)->header_size;
|
||||
|
||||
// Check that it is actually greater than the current data size, otherwise no need to truncate
|
||||
if ((column->header)->data_size == data_size)
|
||||
return 0;
|
||||
@ -1840,7 +1858,7 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap the data before truncating the file
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(column->data, (column->header)->data_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||
@ -1848,9 +1866,16 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
close(column_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
if (munmap(column->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||
obidebug(1, "\nError munmapping the header of a column before truncating");
|
||||
close(column_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Truncate the column file
|
||||
file_size = (column->header)->header_size + data_size;
|
||||
file_size = header_size + data_size;
|
||||
if (ftruncate(column_file_descriptor, file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||
@ -1859,13 +1884,30 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the data
|
||||
// Remap the header and the data
|
||||
|
||||
column->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
column_file_descriptor,
|
||||
0
|
||||
);
|
||||
|
||||
if (column->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||
obidebug(1, "\nError re-mmapping the header of a column after truncating");
|
||||
close(column_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
column->data = mmap(NULL,
|
||||
data_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
column_file_descriptor,
|
||||
(column->header)->header_size
|
||||
header_size
|
||||
);
|
||||
|
||||
if (column->data == MAP_FAILED)
|
||||
@ -1974,6 +2016,10 @@ int obi_enlarge_column(OBIDMS_column_p column)
|
||||
// Calculate the new file size
|
||||
old_line_count = (column->header)->line_count;
|
||||
new_line_count = ceil((double) old_line_count * (double) COLUMN_GROWTH_FACTOR);
|
||||
if (new_line_count > old_line_count+100000)
|
||||
new_line_count = old_line_count+100000;
|
||||
else if (new_line_count < old_line_count+1000)
|
||||
new_line_count = old_line_count+1000;
|
||||
|
||||
if (new_line_count > MAXIMUM_LINE_COUNT)
|
||||
{
|
||||
@ -2415,17 +2461,81 @@ char* obi_get_formatted_elements_names(OBIDMS_column_p column)
|
||||
}
|
||||
|
||||
|
||||
char* obi_column_formatted_infos(OBIDMS_column_p column)
|
||||
char* obi_column_formatted_infos(OBIDMS_column_p column, bool detailed)
|
||||
{
|
||||
char* column_infos;
|
||||
char* elt_names;
|
||||
|
||||
column_infos = malloc(1024 * sizeof(char));
|
||||
char* column_infos = NULL;
|
||||
char* elt_names = NULL;
|
||||
char* data_type_str = NULL;
|
||||
char* comments = NULL;
|
||||
|
||||
// Get element names informations
|
||||
elt_names = obi_get_formatted_elements_names(column);
|
||||
if (elt_names == NULL)
|
||||
{
|
||||
obidebug(1, "\nError getting formatted elements names for formatted columns infos");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Get data type informations
|
||||
data_type_str = name_data_type((column->header)->returned_data_type);
|
||||
if (data_type_str == NULL)
|
||||
{
|
||||
obidebug(1, "\nError getting formatted data type for formatted columns infos");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Get commments if detailed informations required
|
||||
if (detailed)
|
||||
comments = (column->header)->comments;
|
||||
|
||||
// Build the string of formatted infos, allocating memory as needed
|
||||
|
||||
// Data type
|
||||
column_infos = (char*) malloc((strlen("data type: ")+strlen(data_type_str)+1) * sizeof(char));
|
||||
if (column_infos == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for formatted column infos");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strcpy(column_infos, "data type: ");
|
||||
strcat(column_infos, data_type_str);
|
||||
|
||||
// Element names if more than 1
|
||||
if ((column->header)->nb_elements_per_line > 1)
|
||||
{
|
||||
column_infos = realloc(column_infos, (strlen(column_infos)+strlen(", elements: ")+strlen(elt_names)+1) * sizeof(char));
|
||||
if (column_infos == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for formatted column infos");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strcat(column_infos, ", elements: ");
|
||||
strcat(column_infos, elt_names);
|
||||
}
|
||||
|
||||
if (detailed && (strlen(comments)>2)) // Add all comments if required and not empty
|
||||
{
|
||||
column_infos = realloc(column_infos, (strlen(column_infos)+strlen("\nComments:\n")+strlen(comments)+1) * sizeof(char));
|
||||
if (column_infos == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for formatted column infos");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strcat(column_infos, "\nComments:\n");
|
||||
strcat(column_infos, comments);
|
||||
}
|
||||
|
||||
// "data type: OBI_TYPE, element names: [formatted element names](, all comments)"
|
||||
|
||||
free(elt_names);
|
||||
free(data_type_str);
|
||||
|
||||
return column_infos;
|
||||
}
|
||||
|
||||
@ -2472,7 +2582,6 @@ int obi_column_prepare_to_set_value(OBIDMS_column_p column, index_t line_nb, ind
|
||||
}
|
||||
|
||||
|
||||
|
||||
int obi_column_prepare_to_get_value(OBIDMS_column_p column, index_t line_nb)
|
||||
{
|
||||
if ((line_nb+1) > ((column->header)->line_count))
|
||||
|
@ -34,7 +34,7 @@
|
||||
#define NB_ELTS_MAX_IF_DEFAULT_NAME (1000000) /**< The maximum number of elements per line if the default element names
|
||||
* are used ("0\01\02\0...\0n"), considering ELEMENTS_NAMES_MAX. // TODO not up to date
|
||||
*/
|
||||
#define COLUMN_GROWTH_FACTOR (1.3) /**< The growth factor when a column is enlarged.
|
||||
#define COLUMN_GROWTH_FACTOR (2) /**< The growth factor when a column is enlarged.
|
||||
*/
|
||||
#define MAXIMUM_LINE_COUNT (1000000000) /**< The maximum line count for the data of a column (1E9). //TODO
|
||||
*/
|
||||
@ -77,6 +77,8 @@ typedef struct OBIDMS_column_header {
|
||||
OBIType_t stored_data_type; /**< Type of the data that is actually stored in the data
|
||||
* part of the column.
|
||||
*/
|
||||
bool dict_column; /**< Whether the column contains dictionary-like values.
|
||||
*/
|
||||
bool tuples; /**< A boolean indicating whether the column contains indices referring to indexed tuples.
|
||||
*/
|
||||
bool to_eval; /**< A boolean indicating whether the column contains expressions that should be evaluated
|
||||
@ -249,6 +251,7 @@ size_t obi_calculate_header_size(index_t nb_elements_per_line, int64_t elts_name
|
||||
* @param elements_names The names of the elements with ';' as separator (no terminal ';'),
|
||||
* NULL or "" if the default names are to be used ("0\01\02\0...\0n").
|
||||
* @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()).
|
||||
* @param dict_column A boolean indicating whether the column should contain dictionary-like values.
|
||||
* @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples.
|
||||
* @param to_eval A boolean indicating whether the column contains expressions that should be evaluated
|
||||
* (typically OBI_STR columns containing character strings to be evaluated by Python).
|
||||
@ -271,6 +274,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
||||
index_t nb_elements_per_line,
|
||||
char* elements_names,
|
||||
bool elt_names_formatted,
|
||||
bool dict_column,
|
||||
bool tuples,
|
||||
bool to_eval,
|
||||
const char* indexer_name,
|
||||
@ -505,12 +509,37 @@ index_t obi_column_get_element_index_from_name(OBIDMS_column_p column, const cha
|
||||
char* obi_get_elements_names(OBIDMS_column_p column);
|
||||
|
||||
|
||||
// TODO
|
||||
//char* obi_get_formatted_elements_names(OBIDMS_column_p column);
|
||||
/**
|
||||
* @brief Recovers the elements names of the lines of a column with a human readable format ("0; 1; 2; ...; n\0").
|
||||
*
|
||||
* @warning The returned pointer has to be freed by the caller.
|
||||
*
|
||||
* @param column A pointer on an OBIDMS column.
|
||||
*
|
||||
* @returns A pointer on a character array where the elements names are stored.
|
||||
* @retval NULL if an error occurred.
|
||||
*
|
||||
* @since September 2020
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
char* obi_get_formatted_elements_names(OBIDMS_column_p column);
|
||||
|
||||
|
||||
// TODO
|
||||
//char* obi_column_formatted_infos(OBIDMS_column_p column);
|
||||
/**
|
||||
* @brief Returns the informations of a column with a human readable format (data type, element names, comments).
|
||||
*
|
||||
* @warning The returned pointer has to be freed by the caller.
|
||||
*
|
||||
* @param column A pointer on an OBIDMS column.
|
||||
* @param detailed Whether the informations should contain column comments or just data type and element names.
|
||||
*
|
||||
* @returns A pointer on a character array where the formatted column informations are stored.
|
||||
* @retval NULL if an error occurred.
|
||||
*
|
||||
* @since September 2020
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
char* obi_column_formatted_infos(OBIDMS_column_p column, bool detailed);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -25,7 +25,7 @@
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*
|
||||
*/
|
||||
bool volatile keep_running;
|
||||
extern bool volatile keep_running;
|
||||
void sig_handler(int signum);
|
||||
|
||||
|
||||
|
260
src/obiview.c
260
src/obiview.c
@ -17,6 +17,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include <inttypes.h>
|
||||
#include <math.h>
|
||||
#include <time.h>
|
||||
//#include <ctype.h>
|
||||
|
||||
#include "obiview.h"
|
||||
@ -254,11 +255,15 @@ static int update_lines(Obiview_p view, index_t line_count);
|
||||
/**
|
||||
* @brief Internal function to clone a column in the context of a view.
|
||||
*
|
||||
* Used to edit a closed column.
|
||||
*
|
||||
* Clones with the right line selection and replaces the cloned columns with the new ones in the view.
|
||||
* If there is a line selection, all columns have to be cloned, otherwise only the column of interest is cloned.
|
||||
*
|
||||
* @param view A pointer on the view.
|
||||
* @param column_name The name of the column in the view that should be cloned.
|
||||
* @param clone_associated Whether to clone the associated column
|
||||
* (should always be true except when calling from the function itself to avoid infinite recursion).
|
||||
*
|
||||
* @returns A pointer on the new column.
|
||||
* @retval NULL if an error occurred.
|
||||
@ -266,7 +271,7 @@ static int update_lines(Obiview_p view, index_t line_count);
|
||||
* @since February 2016
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
static OBIDMS_column_p clone_column_in_view(Obiview_p view, const char* column_name);
|
||||
static OBIDMS_column_p clone_column_in_view(Obiview_p view, const char* column_name, bool clone_associated);
|
||||
|
||||
|
||||
/**
|
||||
@ -633,6 +638,15 @@ static int enlarge_view_file(Obiview_p view, size_t new_size)
|
||||
multiple = ceil((double) new_size / (double) getpagesize());
|
||||
rounded_new_size = multiple * getpagesize();
|
||||
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(view->infos, (view->infos)->file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBIVIEW_ERROR);
|
||||
obidebug(1, "\nError munmapping a view file when enlarging");
|
||||
close(obiview_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Enlarge the file
|
||||
if (ftruncate(obiview_file_descriptor, rounded_new_size) < 0)
|
||||
{
|
||||
@ -642,15 +656,7 @@ static int enlarge_view_file(Obiview_p view, size_t new_size)
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap and remap the file
|
||||
if (munmap(view->infos, (view->infos)->file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBIVIEW_ERROR);
|
||||
obidebug(1, "\nError munmapping a view file when enlarging");
|
||||
close(obiview_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the file
|
||||
view->infos = mmap(NULL,
|
||||
rounded_new_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
@ -845,7 +851,7 @@ static int update_lines(Obiview_p view, index_t line_count)
|
||||
// Clone the column first if needed
|
||||
if (!(column->writable))
|
||||
{
|
||||
column = clone_column_in_view(view, (((view->infos)->column_references)[i]).alias);
|
||||
column = clone_column_in_view(view, (((view->infos)->column_references)[i]).alias, true);
|
||||
if (column == NULL)
|
||||
{
|
||||
obidebug(1, "\nError cloning a column in a view when updating its line count");
|
||||
@ -870,12 +876,14 @@ static int update_lines(Obiview_p view, index_t line_count)
|
||||
}
|
||||
|
||||
|
||||
static OBIDMS_column_p clone_column_in_view(Obiview_p view, const char* column_name)
|
||||
static OBIDMS_column_p clone_column_in_view(Obiview_p view, const char* column_name, bool clone_associated)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
OBIDMS_column_p column = NULL;
|
||||
OBIDMS_column_p new_column = NULL;
|
||||
OBIDMS_column_p column_buffer;
|
||||
OBIDMS_column_p associated_cloned_column = NULL;
|
||||
char* associated_column_alias = NULL;
|
||||
|
||||
// Check that the view is not read-only
|
||||
if (view->read_only)
|
||||
@ -916,11 +924,62 @@ static OBIDMS_column_p clone_column_in_view(Obiview_p view, const char* column_n
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Look for associated column to clone and reassociate
|
||||
if ((column_buffer->header->associated_column).column_name[0] != '\0')
|
||||
{
|
||||
// Get the associated column alias
|
||||
j=0;
|
||||
while (((strcmp((((view->infos)->column_references)[j]).column_refs.column_name, (column_buffer->header->associated_column).column_name)) ||
|
||||
((((view->infos)->column_references)[j]).column_refs.version != (column_buffer->header->associated_column).version)) &&
|
||||
j<(view->infos)->column_count) // TODO function for that
|
||||
j++;
|
||||
|
||||
if (j == (view->infos)->column_count) // not found
|
||||
{
|
||||
obi_set_errno(OBIVIEW_ERROR);
|
||||
obidebug(1, "\nCould not find associated column when cloning a column for editing");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// No line selection: only this column is cloned, clone and reassociate the associated column
|
||||
if ((view->line_selection == NULL) && clone_associated)
|
||||
{
|
||||
associated_column_alias = (((view->infos)->column_references)[j]).alias;
|
||||
// Clone the associated column
|
||||
associated_cloned_column = clone_column_in_view(view, associated_column_alias, false);
|
||||
// Reassociate both ways
|
||||
strcpy((associated_cloned_column->header->associated_column).column_name, column->header->name);
|
||||
(associated_cloned_column->header->associated_column).version = column->header->version;
|
||||
strcpy((column->header->associated_column).column_name, associated_cloned_column->header->name);
|
||||
(column->header->associated_column).version = associated_cloned_column->header->version;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Line selection: all columns are cloned, check if associated column has been cloned previously (it precedes this one in the list) to reassociate
|
||||
if (j < i)
|
||||
{
|
||||
// Get pointer to associated column
|
||||
associated_cloned_column = *((OBIDMS_column_p*)ll_get(view->columns, j));
|
||||
if (associated_cloned_column == NULL)
|
||||
{
|
||||
obi_set_errno(OBIVIEW_ERROR);
|
||||
obidebug(1, "\nError getting a column to clone from the linked list of column pointers of a view");
|
||||
return NULL;
|
||||
}
|
||||
// Reassociate both ways
|
||||
strcpy((associated_cloned_column->header->associated_column).column_name, column->header->name);
|
||||
(associated_cloned_column->header->associated_column).version = column->header->version;
|
||||
strcpy((column->header->associated_column).column_name, associated_cloned_column->header->name);
|
||||
(column->header->associated_column).version = associated_cloned_column->header->version;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close old cloned column
|
||||
obi_close_column(column_buffer);
|
||||
|
||||
if (!strcmp((((view->infos)->column_references)[i]).alias, column_name))
|
||||
// Found the column to return
|
||||
// Get the column to return
|
||||
new_column = column;
|
||||
}
|
||||
}
|
||||
@ -1037,8 +1096,9 @@ static int finish_view(Obiview_p view)
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Add count column if it's a NUC_SEQ_VIEW with no count column // TODO discuss
|
||||
if ((!strcmp((view->infos)->view_type, VIEW_TYPE_NUC_SEQS)) && (!obi_view_column_exists(view, COUNT_COLUMN)))
|
||||
// Add count column if it's a NUC_SEQ_VIEW with no count column (and there's no MERGED_sample column) // TODO discuss
|
||||
if ((!strcmp((view->infos)->view_type, VIEW_TYPE_NUC_SEQS)) && (!obi_view_column_exists(view, COUNT_COLUMN))
|
||||
&& (!obi_view_column_exists(view, "MERGED_sample"))) // TODO should eventually compute from merged samples?
|
||||
{
|
||||
if (obi_create_auto_count_column(view) < 0)
|
||||
{
|
||||
@ -1127,6 +1187,7 @@ static int close_view(Obiview_p view)
|
||||
obidebug(1, "\nError getting a column to close from the linked list of column pointers of a view");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (obi_close_column(column) < 0)
|
||||
{
|
||||
obidebug(1, "\nError closing a column while closing a view");
|
||||
@ -1192,7 +1253,7 @@ static int prepare_to_set_value_in_column(Obiview_p view, OBIDMS_column_p* colum
|
||||
return -1;
|
||||
}
|
||||
|
||||
(*column_pp) = clone_column_in_view(view, column_name);
|
||||
(*column_pp) = clone_column_in_view(view, column_name, true);
|
||||
if ((*column_pp) == NULL)
|
||||
{
|
||||
obidebug(1, "\nError trying to clone a column to modify it");
|
||||
@ -1652,7 +1713,7 @@ Obiview_p obi_new_view(OBIDMS_p dms, const char* view_name, Obiview_p view_to_cl
|
||||
// If there is a new line selection, build it by combining it with the one from the view to clone if there is one
|
||||
else if (line_selection != NULL)
|
||||
{
|
||||
view->line_selection = obi_create_column(view->dms, LINES_COLUMN_NAME, OBI_IDX, 0, 1, NULL, false, false, false, NULL, NULL, -1, NULL);
|
||||
view->line_selection = obi_create_column(view->dms, LINES_COLUMN_NAME, OBI_IDX, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, NULL);
|
||||
if ((view->line_selection) == NULL)
|
||||
{
|
||||
obidebug(1, "\nError creating a column corresponding to a line selection");
|
||||
@ -1802,6 +1863,7 @@ Obiview_p obi_new_view(OBIDMS_p dms, const char* view_name, Obiview_p view_to_cl
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
NULL,
|
||||
NULL,
|
||||
-1,
|
||||
@ -1843,6 +1905,7 @@ Obiview_p obi_new_view_nuc_seqs(OBIDMS_p dms, const char* view_name, Obiview_p v
|
||||
{
|
||||
Obiview_p view;
|
||||
OBIDMS_column_p associated_nuc_column;
|
||||
OBIDMS_column_p associated_qual_column;
|
||||
int nb_predicates;
|
||||
|
||||
if (view_to_clone != NULL)
|
||||
@ -1869,19 +1932,19 @@ Obiview_p obi_new_view_nuc_seqs(OBIDMS_p dms, const char* view_name, Obiview_p v
|
||||
if ((view_to_clone == NULL) && create_default_columns)
|
||||
{
|
||||
// Adding sequence column
|
||||
if (obi_view_add_column(view, NUC_SEQUENCE_COLUMN, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) // discuss using same indexer "NUC_SEQ_INDEXER"
|
||||
if (obi_view_add_column(view, NUC_SEQUENCE_COLUMN, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0) // discuss using same indexer "NUC_SEQ_INDEXER"
|
||||
{
|
||||
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
||||
return NULL;
|
||||
}
|
||||
// Adding id column
|
||||
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
||||
return NULL;
|
||||
}
|
||||
// Adding definition column
|
||||
if (obi_view_add_column(view, DEFINITION_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(view, DEFINITION_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
||||
return NULL;
|
||||
@ -1890,11 +1953,15 @@ Obiview_p obi_new_view_nuc_seqs(OBIDMS_p dms, const char* view_name, Obiview_p v
|
||||
if (quality_column)
|
||||
{
|
||||
associated_nuc_column = obi_view_get_column(view, NUC_SEQUENCE_COLUMN);
|
||||
if (obi_view_add_column(view, QUALITY_COLUMN, -1, NULL, OBI_QUAL, 0, 1, NULL, false, false, false, NULL, (associated_nuc_column->header)->name, (associated_nuc_column->header)->version, "{}", true) < 0) // TODO discuss automatic association
|
||||
if (obi_view_add_column(view, QUALITY_COLUMN, -1, NULL, OBI_QUAL, 0, 1, NULL, false, false, false, false, NULL, (associated_nuc_column->header)->name, (associated_nuc_column->header)->version, "{}", true) < 0) // TODO discuss automatic association
|
||||
{
|
||||
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
||||
return NULL;
|
||||
}
|
||||
// Associating both ways: associating nuc sequences column to quality column
|
||||
associated_qual_column = obi_view_get_column(view, QUALITY_COLUMN);
|
||||
strcpy((associated_nuc_column->header->associated_column).column_name, associated_qual_column->header->name);
|
||||
(associated_nuc_column->header->associated_column).version = associated_qual_column->header->version;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1921,7 +1988,7 @@ Obiview_p obi_new_view_nuc_seqs(OBIDMS_p dms, const char* view_name, Obiview_p v
|
||||
(view->predicate_functions)[(view->nb_predicates)] = view_has_nuc_sequence_column;
|
||||
(view->predicate_functions)[(view->nb_predicates) + 1] = view_has_id_column;
|
||||
(view->predicate_functions)[(view->nb_predicates) + 2] = view_has_definition_column;
|
||||
// if (quality_column) # TODO discuss. Commented bc for example with obi annotate, clone view so clone predicate, then modify seq, so quality is deleted, and predicate boom
|
||||
// if (quality_column) # TODO fix by triggering predicate deleting if quality deleting. Commented bc for example with obi annotate, clone view so clone predicate, then modify seq, so quality is deleted, and predicate boom
|
||||
// (view->predicate_functions)[(view->nb_predicates) + 3] = view_has_quality_column;
|
||||
|
||||
view->nb_predicates = nb_predicates;
|
||||
@ -2211,7 +2278,7 @@ Obiview_p obi_open_view(OBIDMS_p dms, const char* view_name)
|
||||
|
||||
// TODO return a pointer on the column?
|
||||
int obi_view_add_column(Obiview_p view,
|
||||
char* column_name,
|
||||
char* column_name,
|
||||
obiversion_t version_number,
|
||||
const char* alias,
|
||||
OBIType_t data_type,
|
||||
@ -2219,6 +2286,7 @@ int obi_view_add_column(Obiview_p view,
|
||||
index_t nb_elements_per_line,
|
||||
char* elements_names,
|
||||
bool elt_names_formatted,
|
||||
bool dict_column,
|
||||
bool tuples,
|
||||
bool to_eval,
|
||||
const char* indexer_name,
|
||||
@ -2301,7 +2369,7 @@ int obi_view_add_column(Obiview_p view,
|
||||
// Open or create the column
|
||||
if (create)
|
||||
{ // Create column
|
||||
column = obi_create_column(view->dms, column_name, data_type, nb_lines, nb_elements_per_line, elements_names, elt_names_formatted, tuples, to_eval, indexer_name, associated_column_name, associated_column_version, comments);
|
||||
column = obi_create_column(view->dms, column_name, data_type, nb_lines, nb_elements_per_line, elements_names, elt_names_formatted, dict_column, tuples, to_eval, indexer_name, associated_column_name, associated_column_version, comments);
|
||||
if (column == NULL)
|
||||
{
|
||||
obidebug(1, "\nError creating a column to add to a view");
|
||||
@ -2540,6 +2608,144 @@ int obi_view_create_column_alias(Obiview_p view, const char* current_name, const
|
||||
}
|
||||
|
||||
|
||||
char* obi_view_formatted_infos(Obiview_p view, bool detailed)
|
||||
{
|
||||
int i;
|
||||
char* view_infos = NULL;
|
||||
char* view_name = NULL;
|
||||
time_t creation_date;
|
||||
char* creation_date_str = NULL;
|
||||
index_t line_count;
|
||||
char line_count_str[256];
|
||||
OBIDMS_column_p column;
|
||||
char* column_alias = NULL;
|
||||
char* column_infos = NULL;
|
||||
char* comments = NULL;
|
||||
|
||||
// View name
|
||||
view_name = (view->infos)->name;
|
||||
view_infos = (char*) malloc((strlen("# View name:\n")+strlen(view_name)+1) * sizeof(char));
|
||||
strcpy(view_infos, "# View name:\n");
|
||||
strcat(view_infos, view_name);
|
||||
|
||||
// Date created
|
||||
if (view->read_only) // Date not saved until view is finished writing
|
||||
{
|
||||
creation_date = (view->infos)->creation_date;
|
||||
creation_date_str = ctime(&creation_date);
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Date created:\n")+strlen(creation_date_str)+1) * sizeof(char));
|
||||
strcat(view_infos, "\n# Date created:\n");
|
||||
strcat(view_infos, creation_date_str);
|
||||
}
|
||||
|
||||
// Line count
|
||||
line_count = (view->infos)->line_count;
|
||||
snprintf(line_count_str, sizeof line_count_str, "%lld", line_count);
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Line count:\n")+strlen(line_count_str)+1) * sizeof(char));
|
||||
strcat(view_infos, "# Line count:\n");
|
||||
strcat(view_infos, line_count_str);
|
||||
|
||||
// Columns: go through each, print their alias then their infos
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Columns:")+1) * sizeof(char));
|
||||
strcat(view_infos, "\n# Columns:");
|
||||
for (i=0; i<((view->infos)->column_count); i++)
|
||||
{
|
||||
column = *((OBIDMS_column_p*)ll_get(view->columns, i));
|
||||
if (column == NULL)
|
||||
{
|
||||
obidebug(1, "\nError getting a column from the linked list of column pointers of a view to format view infos");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Column alias
|
||||
column_alias = (((view->infos)->column_references)[i]).alias;
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n")+strlen(column_alias)+strlen(", ")+1) * sizeof(char));
|
||||
strcat(view_infos, "\n");
|
||||
strcat(view_infos, column_alias);
|
||||
strcat(view_infos, ", ");
|
||||
|
||||
// Column infos
|
||||
column_infos = obi_column_formatted_infos(column, detailed);
|
||||
if (column_infos == NULL)
|
||||
{
|
||||
obidebug(1, "\nError getting column infos to format view infos");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(column_infos)+1) * sizeof(char));
|
||||
strcat(view_infos, column_infos);
|
||||
free(column_infos);
|
||||
}
|
||||
|
||||
// Get commments if detailed informations required
|
||||
if (detailed)
|
||||
{
|
||||
comments = (view->infos)->comments;
|
||||
if (strlen(comments)>2) // Add all comments if not empty
|
||||
{
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Comments:\n")+strlen(comments)+1) * sizeof(char));
|
||||
if (view_infos == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for formatted view infos");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strcat(view_infos, "\n# Comments:\n");
|
||||
strcat(view_infos, comments);
|
||||
}
|
||||
}
|
||||
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+2) * sizeof(char));
|
||||
strcat(view_infos, "\n");
|
||||
|
||||
return view_infos;
|
||||
}
|
||||
|
||||
|
||||
char* obi_view_formatted_infos_one_line(Obiview_p view)
|
||||
{
|
||||
int i;
|
||||
char* view_infos = NULL;
|
||||
char* view_name = NULL;
|
||||
time_t creation_date;
|
||||
char* creation_date_str = NULL;
|
||||
index_t line_count;
|
||||
char line_count_str[256];
|
||||
|
||||
// View name
|
||||
view_name = (view->infos)->name;
|
||||
view_infos = (char*) malloc((strlen(" # ")+strlen(view_name)+2) * sizeof(char));
|
||||
strcpy(view_infos, " # ");
|
||||
strcat(view_infos, view_name);
|
||||
strcat(view_infos, ":");
|
||||
|
||||
// Date created
|
||||
if (view->read_only) // Date not saved until view is finished writing
|
||||
{
|
||||
creation_date = (view->infos)->creation_date;
|
||||
creation_date_str = ctime(&creation_date);
|
||||
// Delete \n added by ctime
|
||||
creation_date_str[strlen(creation_date_str)-1] = '\0';
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(" Date created: ")+strlen(creation_date_str)+1) * sizeof(char));
|
||||
strcat(view_infos, " Date created: ");
|
||||
strcat(view_infos, creation_date_str);
|
||||
}
|
||||
|
||||
// Line count
|
||||
line_count = (view->infos)->line_count;
|
||||
snprintf(line_count_str, sizeof line_count_str, "%lld", line_count);
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(" ; Line count: ")+strlen(line_count_str)+1) * sizeof(char));
|
||||
strcat(view_infos, " ; Line count: ");
|
||||
strcat(view_infos, line_count_str);
|
||||
|
||||
view_infos = realloc(view_infos, (strlen(view_infos)+2) * sizeof(char));
|
||||
strcat(view_infos, "\n");
|
||||
|
||||
return view_infos;
|
||||
}
|
||||
|
||||
|
||||
int obi_view_write_comments(Obiview_p view, const char* comments)
|
||||
{
|
||||
size_t new_size;
|
||||
@ -3027,7 +3233,7 @@ int obi_create_auto_count_column(Obiview_p view)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (obi_view_add_column(view, COUNT_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(view, COUNT_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "Error adding an automatic count column in a view");
|
||||
return -1;
|
||||
@ -3079,7 +3285,7 @@ int obi_create_auto_id_column(Obiview_p view, const char* prefix)
|
||||
}
|
||||
|
||||
// Create the new ID column
|
||||
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||
{
|
||||
obidebug(1, "Error adding an automatic ID column in a view");
|
||||
return -1;
|
||||
|
135
src/obiview.h
135
src/obiview.h
@ -30,54 +30,56 @@
|
||||
#include "obiblob.h"
|
||||
|
||||
|
||||
#define OBIVIEW_NAME_MAX_LENGTH (249) /**< The maximum length of an OBIDMS view name, without the extension.
|
||||
*/
|
||||
#define VIEW_TYPE_MAX_LENGTH (1024) /**< The maximum length of the type name of a view.
|
||||
*/
|
||||
#define LINES_COLUMN_NAME "LINES" /**< The name of the column containing the line selections
|
||||
* in all views.
|
||||
*/
|
||||
#define VIEW_TYPE_NUC_SEQS "NUC_SEQS_VIEW" /**< The type name of views based on nucleotide sequences
|
||||
* and their metadata.
|
||||
*/
|
||||
#define NUC_SEQUENCE_COLUMN "NUC_SEQ" /**< The name of the column containing the nucleotide sequences
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define ID_COLUMN "ID" /**< The name of the column containing the sequence identifiers
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define DEFINITION_COLUMN "DEFINITION" /**< The name of the column containing the sequence definitions
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define REVERSE_QUALITY_COLUMN "REVERSE_QUALITY" /**< The name of the column containing the sequence qualities
|
||||
* of the reverse read (generated by ngsfilter, used by alignpairedend).
|
||||
*/
|
||||
#define OBIVIEW_NAME_MAX_LENGTH (249) /**< The maximum length of an OBIDMS view name, without the extension.
|
||||
*/
|
||||
#define VIEW_TYPE_MAX_LENGTH (1024) /**< The maximum length of the type name of a view.
|
||||
*/
|
||||
#define LINES_COLUMN_NAME "LINES" /**< The name of the column containing the line selections
|
||||
* in all views.
|
||||
*/
|
||||
#define VIEW_TYPE_NUC_SEQS "NUC_SEQS_VIEW" /**< The type name of views based on nucleotide sequences
|
||||
* and their metadata.
|
||||
*/
|
||||
#define NUC_SEQUENCE_COLUMN "NUC_SEQ" /**< The name of the column containing the nucleotide sequences
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define ID_COLUMN "ID" /**< The name of the column containing the sequence identifiers
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define DEFINITION_COLUMN "DEFINITION" /**< The name of the column containing the sequence definitions
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define REVERSE_QUALITY_COLUMN "REVERSE_QUALITY" /**< The name of the column containing the sequence qualities
|
||||
* of the reverse read (generated by ngsfilter, used by alignpairedend).
|
||||
*/
|
||||
#define REVERSE_SEQUENCE_COLUMN "REVERSE_SEQUENCE" /**< The name of the column containing the sequence
|
||||
* of the reverse read (generated by ngsfilter, used by alignpairedend).
|
||||
*/
|
||||
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define COUNT_COLUMN "COUNT" /**< The name of the column containing the sequence counts
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define TAXID_COLUMN "TAXID" /**< The name of the column containing the taxids. TODO subtype of INT column?
|
||||
*/
|
||||
#define MERGED_TAXID_COLUMN "MERGED_TAXID" /**< The name of the column containing the merged taxids information.
|
||||
*/
|
||||
#define MERGED_PREFIX "MERGED_" /**< The prefix to prepend to column names when merging informations during obi uniq.
|
||||
*/
|
||||
#define TAXID_DIST_COLUMN "TAXID_DIST" /**< The name of the column containing a dictionary of taxid:[list of ids] when merging informations during obi uniq.
|
||||
*/
|
||||
#define MERGED_COLUMN "MERGED" /**< The name of the column containing a list of ids when merging informations during obi uniq.
|
||||
*/
|
||||
#define ID_PREFIX "seq" /**< The default prefix of sequence identifiers in automatic ID columns.
|
||||
*/
|
||||
#define PREDICATE_KEY "predicates" /**< The key used in the json-formatted view comments to store predicates.
|
||||
*/
|
||||
* of the reverse read (generated by ngsfilter, used by alignpairedend).
|
||||
*/
|
||||
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define COUNT_COLUMN "COUNT" /**< The name of the column containing the sequence counts
|
||||
* in NUC_SEQS_VIEW views.
|
||||
*/
|
||||
#define SCIENTIFIC_NAME_COLUMN "SCIENTIFIC_NAME" /**< The name of the column containing the taxon scientific name.
|
||||
*/
|
||||
#define TAXID_COLUMN "TAXID" /**< The name of the column containing the taxids. TODO subtype of INT column?
|
||||
*/
|
||||
#define MERGED_TAXID_COLUMN "MERGED_TAXID" /**< The name of the column containing the merged taxids information.
|
||||
*/
|
||||
#define MERGED_PREFIX "MERGED_" /**< The prefix to prepend to column names when merging informations during obi uniq.
|
||||
*/
|
||||
#define TAXID_DIST_COLUMN "TAXID_DIST" /**< The name of the column containing a dictionary of taxid:[list of ids] when merging informations during obi uniq.
|
||||
*/
|
||||
#define MERGED_COLUMN "MERGED" /**< The name of the column containing a list of ids when merging informations during obi uniq.
|
||||
*/
|
||||
#define ID_PREFIX "seq" /**< The default prefix of sequence identifiers in automatic ID columns.
|
||||
*/
|
||||
#define PREDICATE_KEY "predicates" /**< The key used in the json-formatted view comments to store predicates.
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
@ -398,6 +400,7 @@ Obiview_p obi_open_view(OBIDMS_p dms, const char* view_name);
|
||||
* @param elements_names The names of the elements with ';' as separator (no terminal ';'),
|
||||
* if the column is created; NULL or "" if the default names are to be used ("0\01\02\0...\0n").
|
||||
* @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()).
|
||||
* @param dict_column Whether the column contains dictionary-like values.
|
||||
* @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples.
|
||||
* @param to_eval A boolean indicating whether the column contains expressions that should be evaluated
|
||||
* (typically OBI_STR columns containing character strings to be evaluated by Python).
|
||||
@ -406,7 +409,7 @@ Obiview_p obi_open_view(OBIDMS_p dms, const char* view_name);
|
||||
* @param associated_column_name The name of the associated column if there is one (otherwise NULL or ""), if the column is created.
|
||||
* @param associated_column_version The version of the associated column if there is one (otherwise -1), if the column is created.
|
||||
* @param comments Optional comments associated with the column if it is created (NULL or "" if no comments associated).
|
||||
* @param create Whether the column should be created (create == true) or opened (create == false).
|
||||
* @param create Whether the column should be created (create == true) or already exists (create == false).
|
||||
*
|
||||
* @returns A value indicating the success of the operation.
|
||||
* @retval 0 if the operation was successfully completed.
|
||||
@ -416,7 +419,7 @@ Obiview_p obi_open_view(OBIDMS_p dms, const char* view_name);
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
int obi_view_add_column(Obiview_p view,
|
||||
char* column_name,
|
||||
char* column_name,
|
||||
obiversion_t version_number,
|
||||
const char* alias,
|
||||
OBIType_t data_type,
|
||||
@ -424,6 +427,7 @@ int obi_view_add_column(Obiview_p view,
|
||||
index_t nb_elements_per_line,
|
||||
char* elements_names,
|
||||
bool elt_names_formatted,
|
||||
bool dict_column,
|
||||
bool tuples,
|
||||
bool to_eval,
|
||||
const char* indexer_name,
|
||||
@ -519,6 +523,39 @@ OBIDMS_column_p* obi_view_get_pointer_on_column_in_view(Obiview_p view, const ch
|
||||
int obi_view_create_column_alias(Obiview_p view, const char* current_name, const char* alias);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Returns the informations of a view with a human readable format (view name, date created, line count, column informations, comments).
|
||||
*
|
||||
* @warning The returned pointer has to be freed by the caller.
|
||||
*
|
||||
* @param column A pointer on a view.
|
||||
* @param detailed Whether the informations should contain view comments.
|
||||
*
|
||||
* @returns A pointer on a character array where the formatted view informations are stored.
|
||||
* @retval NULL if an error occurred.
|
||||
*
|
||||
* @since September 2020
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
char* obi_view_formatted_infos(Obiview_p view, bool detailed);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Returns the informations of a view with a human readable format on one line (view name, date created, line count).
|
||||
*
|
||||
* @warning The returned pointer has to be freed by the caller.
|
||||
*
|
||||
* @param column A pointer on a view.
|
||||
*
|
||||
* @returns A pointer on a character array where the formatted view informations are stored.
|
||||
* @retval NULL if an error occurred.
|
||||
*
|
||||
* @since September 2020
|
||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||
*/
|
||||
char* obi_view_formatted_infos_one_line(Obiview_p view);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Internal function writing new comments in a view file.
|
||||
*
|
||||
|
@ -686,6 +686,9 @@ int calculateSizeToAllocate(int maxLen, int LCSmin)
|
||||
size *= 3;
|
||||
size += 16;
|
||||
|
||||
size += 10; // band-aid for memory bug I don't understand (triggered on specific db on ubuntu)
|
||||
// bug might have to do with the way different systems behave when aligning the address in obi_get_memory_aligned_on_16
|
||||
|
||||
return(size*sizeof(int16_t));
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user