Compare commits
107 Commits
Author | SHA1 | Date | |
---|---|---|---|
55b2679b23 | |||
9ea2124adc | |||
2130a949c7 | |||
eeb93afa7d | |||
755ce179ad | |||
7e492578b3 | |||
02e9df3ad1 | |||
55ada80500 | |||
ef9d9674b0 | |||
4f39bb2418 | |||
0a2b8adb50 | |||
f9b99a9397 | |||
ce2833c04b | |||
f64b3da30b | |||
388b3e0410 | |||
c9db990b83 | |||
3f253feb5e | |||
85d2bab607 | |||
53b3d81137 | |||
f6353fbf28 | |||
5a8b9dca5d | |||
8bd6d6c8e9 | |||
405e6ef420 | |||
fedacfafe7 | |||
2d66e0e965 | |||
f43856b712 | |||
9e0c319806 | |||
58b42cd977 | |||
34de90bce6 | |||
4be9f36f99 | |||
f10e78ba3c | |||
88c8463ed7 | |||
89168271ef | |||
82d2642000 | |||
99c1cd60d6 | |||
ce7ae4ac55 | |||
0b4283bb58 | |||
747f3efbb2 | |||
6c1a3aff47 | |||
e2932b05f2 | |||
32345b9ec4 | |||
9334cf6cc6 | |||
8ec13a294c | |||
3e45c34491 | |||
c2f3d90dc1 | |||
6b732d11d3 | |||
9eb833a0af | |||
6b7b0e3bd1 | |||
47691a8f58 | |||
b908b581c8 | |||
03c174fd7a | |||
2156588ff6 | |||
6ff29c6a6a | |||
51a3c68fb5 | |||
da91ffc2c7 | |||
c884615522 | |||
cb53381863 | |||
72b3e5d872 | |||
238e9f70f3 | |||
e099a16624 | |||
847c9c816d | |||
6026129ca8 | |||
169b6514b4 | |||
89b0c48141 | |||
7c02782e3c | |||
ecc4c2c78b | |||
f5413381fd | |||
3e93cfff7b | |||
6d445fe3ad | |||
824deb7e21 | |||
d579bb2749 | |||
10e5ebdbc0 | |||
8833110490 | |||
bd38449f2d | |||
904823c827 | |||
af68a1024c | |||
425fe25bd2 | |||
d48aed38d4 | |||
5e32f8523e | |||
8f1d94fd24 | |||
38f42cb0fb | |||
7f0f63cf26 | |||
cba78111c9 | |||
41fbae7b6c | |||
ad1fd3c341 | |||
fbf0f7dfb6 | |||
fda0edd0d8 | |||
382e37a6ae | |||
5cc3e29f75 | |||
a8e2aee281 | |||
13adb479d3 | |||
8ba7acdfe1 | |||
38051b1e4f | |||
52a2e21b38 | |||
d27a5b9115 | |||
20bd3350b4 | |||
2e191372d7 | |||
112e12cab0 | |||
b9b4cec5b5 | |||
199f3772e8 | |||
422a6450fa | |||
137c109f86 | |||
b6648ae81e | |||
f6dffbecfe | |||
c4696ac865 | |||
11a0945a9b | |||
f23c40c905 |
@ -1,3 +1,9 @@
|
|||||||
|
import codecs
|
||||||
|
|
||||||
|
def unescaped_str(arg_str):
|
||||||
|
return arg_str.encode('latin-1', 'backslashreplace').decode('unicode-escape')
|
||||||
|
|
||||||
|
|
||||||
def __addInputOption(optionManager):
|
def __addInputOption(optionManager):
|
||||||
|
|
||||||
optionManager.add_argument(
|
optionManager.add_argument(
|
||||||
@ -39,6 +45,30 @@ def __addImportInputOption(optionManager):
|
|||||||
const=b'fastq',
|
const=b'fastq',
|
||||||
help="Input file is in fastq format")
|
help="Input file is in fastq format")
|
||||||
|
|
||||||
|
group.add_argument('--silva-input',
|
||||||
|
action="store_const", dest="obi:inputformat",
|
||||||
|
default=None,
|
||||||
|
const=b'silva',
|
||||||
|
help="Input file is in SILVA fasta format. If NCBI taxonomy provided with --taxonomy, taxid and scientific name will be added for each sequence.")
|
||||||
|
|
||||||
|
group.add_argument('--rdp-input',
|
||||||
|
action="store_const", dest="obi:inputformat",
|
||||||
|
default=None,
|
||||||
|
const=b'rdp',
|
||||||
|
help="Input file is in RDP training set fasta format. If NCBI taxonomy provided with --taxonomy, taxid and scientific name will be added for each sequence.")
|
||||||
|
|
||||||
|
group.add_argument('--unite-input',
|
||||||
|
action="store_const", dest="obi:inputformat",
|
||||||
|
default=None,
|
||||||
|
const=b'unite',
|
||||||
|
help="Input file is in UNITE fasta format. If NCBI taxonomy provided with --taxonomy, taxid and scientific name will be added for each sequence.")
|
||||||
|
|
||||||
|
group.add_argument('--sintax-input',
|
||||||
|
action="store_const", dest="obi:inputformat",
|
||||||
|
default=None,
|
||||||
|
const=b'sintax',
|
||||||
|
help="Input file is in SINTAX fasta format. If NCBI taxonomy provided with --taxonomy, taxid and scientific name will be added for each sequence.")
|
||||||
|
|
||||||
group.add_argument('--embl-input',
|
group.add_argument('--embl-input',
|
||||||
action="store_const", dest="obi:inputformat",
|
action="store_const", dest="obi:inputformat",
|
||||||
default=None,
|
default=None,
|
||||||
@ -119,15 +149,15 @@ def __addImportInputOption(optionManager):
|
|||||||
def __addTabularOption(optionManager):
|
def __addTabularOption(optionManager):
|
||||||
group = optionManager.add_argument_group("Input and output format options for tabular files")
|
group = optionManager.add_argument_group("Input and output format options for tabular files")
|
||||||
|
|
||||||
group.add_argument('--header',
|
group.add_argument('--no-header',
|
||||||
action="store_true", dest="obi:header",
|
action="store_false", dest="obi:header",
|
||||||
default=False,
|
default=True,
|
||||||
help="First line of tabular file contains column names")
|
help="Don't print the header (first line with column names")
|
||||||
|
|
||||||
group.add_argument('--sep',
|
group.add_argument('--sep',
|
||||||
action="store", dest="obi:sep",
|
action="store", dest="obi:sep",
|
||||||
default="\t",
|
default="\t",
|
||||||
type=str,
|
type=unescaped_str,
|
||||||
help="Column separator")
|
help="Column separator")
|
||||||
|
|
||||||
|
|
||||||
@ -159,6 +189,16 @@ def __addTabularInputOption(optionManager):
|
|||||||
help="Lines starting by this char are considered as comment")
|
help="Lines starting by this char are considered as comment")
|
||||||
|
|
||||||
|
|
||||||
|
def __addTabularOutputOption(optionManager):
|
||||||
|
group = optionManager.add_argument_group("Output format options for tabular files")
|
||||||
|
|
||||||
|
__addTabularOption(optionManager)
|
||||||
|
|
||||||
|
group.add_argument('--na-int-stay-na',
|
||||||
|
action="store_false", dest="obi:na_int_to_0",
|
||||||
|
help="NA (Non available) integer values should be exported as NA in tabular output (default: they are converted to 0 for tabular output).") # TODO
|
||||||
|
|
||||||
|
|
||||||
def __addTaxdumpInputOption(optionManager): # TODO maybe not the best way to do it
|
def __addTaxdumpInputOption(optionManager): # TODO maybe not the best way to do it
|
||||||
group = optionManager.add_argument_group("Input format options for taxdump")
|
group = optionManager.add_argument_group("Input format options for taxdump")
|
||||||
|
|
||||||
@ -192,6 +232,10 @@ def addTabularInputOption(optionManager):
|
|||||||
__addTabularInputOption(optionManager)
|
__addTabularInputOption(optionManager)
|
||||||
|
|
||||||
|
|
||||||
|
def addTabularOutputOption(optionManager):
|
||||||
|
__addTabularOutputOption(optionManager)
|
||||||
|
|
||||||
|
|
||||||
def addTaxonomyOption(optionManager):
|
def addTaxonomyOption(optionManager):
|
||||||
__addTaxonomyOption(optionManager)
|
__addTaxonomyOption(optionManager)
|
||||||
|
|
||||||
@ -204,6 +248,7 @@ def addAllInputOption(optionManager):
|
|||||||
__addInputOption(optionManager)
|
__addInputOption(optionManager)
|
||||||
__addImportInputOption(optionManager)
|
__addImportInputOption(optionManager)
|
||||||
__addTabularInputOption(optionManager)
|
__addTabularInputOption(optionManager)
|
||||||
|
__addTabularOutputOption(optionManager)
|
||||||
__addTaxonomyOption(optionManager)
|
__addTaxonomyOption(optionManager)
|
||||||
__addTaxdumpInputOption(optionManager)
|
__addTaxdumpInputOption(optionManager)
|
||||||
|
|
||||||
@ -264,6 +309,35 @@ def __addExportOutputOption(optionManager):
|
|||||||
const=b'tabular',
|
const=b'tabular',
|
||||||
help="Output file is in tabular format")
|
help="Output file is in tabular format")
|
||||||
|
|
||||||
|
group.add_argument('--metabaR-output',
|
||||||
|
action="store_const", dest="obi:outputformat",
|
||||||
|
default=None,
|
||||||
|
const=b'metabaR',
|
||||||
|
help="Export the files needed by the obifiles_to_metabarlist function of the metabaR package")
|
||||||
|
|
||||||
|
group.add_argument('--metabaR-prefix',
|
||||||
|
action="store", dest="obi:metabarprefix",
|
||||||
|
type=str,
|
||||||
|
help="Prefix for the files when using --metabaR-output option")
|
||||||
|
|
||||||
|
group.add_argument('--metabaR-ngsfilter',
|
||||||
|
action="store", dest="obi:metabarngsfilter",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="URI to the ngsfilter view when using --metabaR-output option (if not provided, it is not exported)")
|
||||||
|
|
||||||
|
group.add_argument('--metabaR-samples',
|
||||||
|
action="store", dest="obi:metabarsamples",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="URI to the sample metadata view when using --metabaR-output option (if not provided, it is built as just a list of the sample names)")
|
||||||
|
|
||||||
|
group.add_argument('--only-keys',
|
||||||
|
action="append", dest="obi:only_keys",
|
||||||
|
type=str,
|
||||||
|
default=[],
|
||||||
|
help="Only export the given keys (columns).")
|
||||||
|
|
||||||
group.add_argument('--print-na',
|
group.add_argument('--print-na',
|
||||||
action="store_true", dest="obi:printna",
|
action="store_true", dest="obi:printna",
|
||||||
default=False,
|
default=False,
|
||||||
@ -296,14 +370,14 @@ def addTabularOutputOption(optionManager):
|
|||||||
|
|
||||||
def addExportOutputOption(optionManager):
|
def addExportOutputOption(optionManager):
|
||||||
__addExportOutputOption(optionManager)
|
__addExportOutputOption(optionManager)
|
||||||
__addTabularOption(optionManager)
|
__addTabularOutputOption(optionManager)
|
||||||
|
|
||||||
|
|
||||||
def addAllOutputOption(optionManager):
|
def addAllOutputOption(optionManager):
|
||||||
__addOutputOption(optionManager)
|
__addOutputOption(optionManager)
|
||||||
__addDMSOutputOption(optionManager)
|
__addDMSOutputOption(optionManager)
|
||||||
__addExportOutputOption(optionManager)
|
__addExportOutputOption(optionManager)
|
||||||
__addTabularOption(optionManager)
|
__addTabularOutputOption(optionManager)
|
||||||
|
|
||||||
|
|
||||||
def addNoProgressBarOption(optionManager):
|
def addNoProgressBarOption(optionManager):
|
||||||
|
231
python/obitools3/commands/addtaxids.pyx
Executable file
231
python/obitools3/commands/addtaxids.pyx
Executable file
@ -0,0 +1,231 @@
|
|||||||
|
#cython: language_level=3
|
||||||
|
|
||||||
|
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||||
|
from obitools3.dms import DMS
|
||||||
|
from obitools3.dms.view.view cimport View, Line_selection
|
||||||
|
from obitools3.uri.decode import open_uri
|
||||||
|
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
|
||||||
|
from obitools3.dms.view import RollbackException
|
||||||
|
from obitools3.dms.column.column cimport Column
|
||||||
|
from functools import reduce
|
||||||
|
from obitools3.apps.config import logger
|
||||||
|
from obitools3.utils cimport tobytes, str2bytes, tostr
|
||||||
|
from io import BufferedWriter
|
||||||
|
from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
|
||||||
|
ID_COLUMN, \
|
||||||
|
DEFINITION_COLUMN, \
|
||||||
|
QUALITY_COLUMN, \
|
||||||
|
COUNT_COLUMN, \
|
||||||
|
TAXID_COLUMN
|
||||||
|
from obitools3.dms.capi.obitypes cimport OBI_INT
|
||||||
|
from obitools3.dms.capi.obitaxonomy cimport MIN_LOCAL_TAXID
|
||||||
|
import time
|
||||||
|
import math
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
|
|
||||||
|
__title__="Annotate sequences with their corresponding NCBI taxid found from the taxon scientific name"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def addOptions(parser):
|
||||||
|
|
||||||
|
addMinimalInputOption(parser)
|
||||||
|
addTaxonomyOption(parser)
|
||||||
|
addMinimalOutputOption(parser)
|
||||||
|
addNoProgressBarOption(parser)
|
||||||
|
|
||||||
|
group=parser.add_argument_group('obi addtaxids specific options')
|
||||||
|
|
||||||
|
group.add_argument('-t', '--taxid-tag',
|
||||||
|
action="store",
|
||||||
|
dest="addtaxids:taxid_tag",
|
||||||
|
metavar="<TAXID_TAG>",
|
||||||
|
default=b"TAXID",
|
||||||
|
help="Name of the tag to store the found taxid "
|
||||||
|
"(default: 'TAXID').")
|
||||||
|
|
||||||
|
group.add_argument('-n', '--taxon-name-tag',
|
||||||
|
action="store",
|
||||||
|
dest="addtaxids:taxon_name_tag",
|
||||||
|
metavar="<SCIENTIFIC_NAME_TAG>",
|
||||||
|
default=b"SCIENTIFIC_NAME",
|
||||||
|
help="Name of the tag giving the scientific name of the taxon "
|
||||||
|
"(default: 'SCIENTIFIC_NAME').")
|
||||||
|
|
||||||
|
group.add_argument('-g', '--try-genus-match',
|
||||||
|
action="store_true", dest="addtaxids:try_genus_match",
|
||||||
|
default=False,
|
||||||
|
help="Try matching the first word of <SCIENTIFIC_NAME_TAG> when can't find corresponding taxid for a taxon. "
|
||||||
|
"If there is a match it is added in the 'parent_taxid' tag. (Can be used by 'obi taxonomy' to add the taxon under that taxid).")
|
||||||
|
|
||||||
|
group.add_argument('-a', '--restricting-ancestor',
|
||||||
|
action="store",
|
||||||
|
dest="addtaxids:restricting_ancestor",
|
||||||
|
metavar="<RESTRICTING_ANCESTOR>",
|
||||||
|
default=None,
|
||||||
|
help="Enables to restrict the search of taxids under an ancestor specified by its taxid.")
|
||||||
|
|
||||||
|
group.add_argument('-l', '--log-file',
|
||||||
|
action="store",
|
||||||
|
dest="addtaxids:log_file",
|
||||||
|
metavar="<LOG_FILE>",
|
||||||
|
default='',
|
||||||
|
help="Path to a log file to write informations about not found taxids.")
|
||||||
|
|
||||||
|
|
||||||
|
def run(config):
|
||||||
|
|
||||||
|
DMS.obi_atexit()
|
||||||
|
|
||||||
|
logger("info", "obi addtaxids")
|
||||||
|
|
||||||
|
# Open the input
|
||||||
|
input = open_uri(config['obi']['inputURI'])
|
||||||
|
if input is None:
|
||||||
|
raise Exception("Could not read input view")
|
||||||
|
i_dms = input[0]
|
||||||
|
i_view = input[1]
|
||||||
|
i_view_name = input[1].name
|
||||||
|
|
||||||
|
# Open the output: only the DMS, as the output view is going to be created by cloning the input view
|
||||||
|
# (could eventually be done via an open_uri() argument)
|
||||||
|
output = open_uri(config['obi']['outputURI'],
|
||||||
|
input=False,
|
||||||
|
dms_only=True)
|
||||||
|
if output is None:
|
||||||
|
raise Exception("Could not create output view")
|
||||||
|
o_dms = output[0]
|
||||||
|
output_0 = output[0]
|
||||||
|
o_view_name = output[1]
|
||||||
|
|
||||||
|
# stdout output: create temporary view
|
||||||
|
if type(output_0)==BufferedWriter:
|
||||||
|
o_dms = i_dms
|
||||||
|
i=0
|
||||||
|
o_view_name = b"temp"
|
||||||
|
while o_view_name in i_dms: # Making sure view name is unique in output DMS
|
||||||
|
o_view_name = o_view_name+b"_"+str2bytes(str(i))
|
||||||
|
i+=1
|
||||||
|
imported_view_name = o_view_name
|
||||||
|
|
||||||
|
# If the input and output DMS are not the same, import the input view in the output DMS before cloning it to modify it
|
||||||
|
# (could be the other way around: clone and modify in the input DMS then import the new view in the output DMS)
|
||||||
|
if i_dms != o_dms:
|
||||||
|
imported_view_name = i_view_name
|
||||||
|
i=0
|
||||||
|
while imported_view_name in o_dms: # Making sure view name is unique in output DMS
|
||||||
|
imported_view_name = i_view_name+b"_"+str2bytes(str(i))
|
||||||
|
i+=1
|
||||||
|
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], i_view_name, imported_view_name)
|
||||||
|
i_view = o_dms[imported_view_name]
|
||||||
|
|
||||||
|
# Clone output view from input view
|
||||||
|
o_view = i_view.clone(o_view_name)
|
||||||
|
if o_view is None:
|
||||||
|
raise Exception("Couldn't create output view")
|
||||||
|
i_view.close()
|
||||||
|
|
||||||
|
# Open taxonomy
|
||||||
|
taxo_uri = open_uri(config['obi']['taxoURI'])
|
||||||
|
if taxo_uri is None or taxo_uri[2] == bytes:
|
||||||
|
raise Exception("Couldn't open taxonomy")
|
||||||
|
taxo = taxo_uri[1]
|
||||||
|
|
||||||
|
# Initialize the progress bar
|
||||||
|
if config['obi']['noprogressbar'] == False:
|
||||||
|
pb = ProgressBar(len(o_view), config)
|
||||||
|
else:
|
||||||
|
pb = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
if config['addtaxids']['log_file']:
|
||||||
|
logfile = open(config['addtaxids']['log_file'], 'w')
|
||||||
|
else:
|
||||||
|
logfile = None
|
||||||
|
if config['addtaxids']['try_genus_match']:
|
||||||
|
try_genus = True
|
||||||
|
else:
|
||||||
|
try_genus = False
|
||||||
|
if 'restricting_ancestor' in config['addtaxids']:
|
||||||
|
res_anc = int(config['addtaxids']['restricting_ancestor'])
|
||||||
|
else:
|
||||||
|
res_anc = None
|
||||||
|
taxid_column_name = config['addtaxids']['taxid_tag']
|
||||||
|
parent_taxid_column_name = "PARENT_TAXID" # TODO macro
|
||||||
|
taxon_name_column_name = config['addtaxids']['taxon_name_tag']
|
||||||
|
taxid_column = Column.new_column(o_view, taxid_column_name, OBI_INT)
|
||||||
|
parent_taxid_column = Column.new_column(o_view, parent_taxid_column_name, OBI_INT)
|
||||||
|
taxon_name_column = o_view[taxon_name_column_name]
|
||||||
|
|
||||||
|
found_count = 0
|
||||||
|
not_found_count = 0
|
||||||
|
parent_found_count = 0
|
||||||
|
|
||||||
|
for i in range(len(o_view)):
|
||||||
|
PyErr_CheckSignals()
|
||||||
|
if pb is not None:
|
||||||
|
pb(i)
|
||||||
|
taxon_name = taxon_name_column[i]
|
||||||
|
taxon = taxo.get_taxon_by_name(taxon_name, res_anc)
|
||||||
|
if taxon is not None:
|
||||||
|
taxid_column[i] = taxon.taxid
|
||||||
|
found_count+=1
|
||||||
|
elif try_genus: # try finding genus or other parent taxon from the first word
|
||||||
|
#print(i, o_view[i].id)
|
||||||
|
taxon_name_sp = taxon_name.split(b" ")
|
||||||
|
taxon = taxo.get_taxon_by_name(taxon_name_sp[0], res_anc)
|
||||||
|
if taxon is not None:
|
||||||
|
parent_taxid_column[i] = taxon.taxid
|
||||||
|
parent_found_count+=1
|
||||||
|
if logfile:
|
||||||
|
print("Found parent taxon for", tostr(taxon_name), file=logfile)
|
||||||
|
else:
|
||||||
|
not_found_count+=1
|
||||||
|
if logfile:
|
||||||
|
print("No taxid found for", tostr(taxon_name), file=logfile)
|
||||||
|
else:
|
||||||
|
not_found_count+=1
|
||||||
|
if logfile:
|
||||||
|
print("No taxid found for", tostr(taxon_name), file=logfile)
|
||||||
|
|
||||||
|
except Exception, e:
|
||||||
|
raise RollbackException("obi addtaxids error, rollbacking view: "+str(e), o_view)
|
||||||
|
|
||||||
|
if pb is not None:
|
||||||
|
pb(i, force=True)
|
||||||
|
print("", file=sys.stderr)
|
||||||
|
|
||||||
|
logger("info", "\nTaxids found: "+str(found_count)+"/"+str(len(o_view))+" ("+str(round(found_count*100.0/len(o_view), 2))+"%)")
|
||||||
|
if config['addtaxids']['try_genus_match']:
|
||||||
|
logger("info", "\nParent taxids found: "+str(parent_found_count)+"/"+str(len(o_view))+" ("+str(round(parent_found_count*100.0/len(o_view), 2))+"%)")
|
||||||
|
logger("info", "\nTaxids not found: "+str(not_found_count)+"/"+str(len(o_view))+" ("+str(round(not_found_count*100.0/len(o_view), 2))+"%)")
|
||||||
|
|
||||||
|
# Save command config in View and DMS comments
|
||||||
|
command_line = " ".join(sys.argv[1:])
|
||||||
|
input_dms_name=[input[0].name]
|
||||||
|
input_view_name=[i_view_name]
|
||||||
|
if 'taxoURI' in config['obi'] and config['obi']['taxoURI'] is not None:
|
||||||
|
input_dms_name.append(config['obi']['taxoURI'].split("/")[-3])
|
||||||
|
input_view_name.append("taxonomy/"+config['obi']['taxoURI'].split("/")[-1])
|
||||||
|
o_view.write_config(config, "addtaxids", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||||
|
o_dms.record_command_line(command_line)
|
||||||
|
|
||||||
|
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||||
|
#print(repr(o_view), file=sys.stderr)
|
||||||
|
|
||||||
|
# stdout output: write to buffer
|
||||||
|
if type(output_0)==BufferedWriter:
|
||||||
|
logger("info", "Printing to output...")
|
||||||
|
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||||
|
o_view.close()
|
||||||
|
|
||||||
|
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||||
|
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||||
|
View.delete_view(o_dms, imported_view_name)
|
||||||
|
o_dms.close(force=True)
|
||||||
|
i_dms.close(force=True)
|
||||||
|
|
||||||
|
logger("info", "Done.")
|
@ -19,7 +19,7 @@ import time
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
__title__="Aligns one sequence column with itself or two sequence columns"
|
__title__="Align one sequence column with itself or two sequence columns"
|
||||||
|
|
||||||
|
|
||||||
def addOptions(parser):
|
def addOptions(parser):
|
||||||
@ -158,7 +158,7 @@ def run(config):
|
|||||||
i_view_name = i_uri.split(b"/")[0]
|
i_view_name = i_uri.split(b"/")[0]
|
||||||
i_column_name = b""
|
i_column_name = b""
|
||||||
i_element_name = b""
|
i_element_name = b""
|
||||||
if len(i_uri.split(b"/")) == 2:
|
if len(i_uri.split(b"/")) >= 2:
|
||||||
i_column_name = i_uri.split(b"/")[1]
|
i_column_name = i_uri.split(b"/")[1]
|
||||||
if len(i_uri.split(b"/")) == 3:
|
if len(i_uri.split(b"/")) == 3:
|
||||||
i_element_name = i_uri.split(b"/")[2]
|
i_element_name = i_uri.split(b"/")[2]
|
||||||
@ -181,7 +181,7 @@ def run(config):
|
|||||||
i_dms_name_2 = i_dms_2.name
|
i_dms_name_2 = i_dms_2.name
|
||||||
i_uri_2 = input_2[1]
|
i_uri_2 = input_2[1]
|
||||||
original_i_view_name_2 = i_uri_2.split(b"/")[0]
|
original_i_view_name_2 = i_uri_2.split(b"/")[0]
|
||||||
if len(i_uri_2.split(b"/")) == 2:
|
if len(i_uri_2.split(b"/")) >= 2:
|
||||||
i_column_name_2 = i_uri_2.split(b"/")[1]
|
i_column_name_2 = i_uri_2.split(b"/")[1]
|
||||||
if len(i_uri_2.split(b"/")) == 3:
|
if len(i_uri_2.split(b"/")) == 3:
|
||||||
i_element_name_2 = i_uri_2.split(b"/")[2]
|
i_element_name_2 = i_uri_2.split(b"/")[2]
|
||||||
|
@ -23,7 +23,7 @@ import os
|
|||||||
|
|
||||||
from cpython.exc cimport PyErr_CheckSignals
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
__title__="Aligns paired-ended reads"
|
__title__="Align paired-ended reads"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -205,19 +205,25 @@ def run(config):
|
|||||||
if type(entries) == list:
|
if type(entries) == list:
|
||||||
forward = entries[0]
|
forward = entries[0]
|
||||||
reverse = entries[1]
|
reverse = entries[1]
|
||||||
aligner = Kmer_similarity(forward, \
|
if len(forward) == 0 or len(reverse) == 0:
|
||||||
view2=reverse, \
|
aligner = None
|
||||||
kmer_size=config['alignpairedend']['kmersize'], \
|
else:
|
||||||
reversed_column=None)
|
aligner = Kmer_similarity(forward, \
|
||||||
|
view2=reverse, \
|
||||||
|
kmer_size=config['alignpairedend']['kmersize'], \
|
||||||
|
reversed_column=None)
|
||||||
else:
|
else:
|
||||||
aligner = Kmer_similarity(entries, \
|
if len(entries) == 0:
|
||||||
column2=entries[REVERSE_SEQUENCE_COLUMN], \
|
aligner = None
|
||||||
qual_column2=entries[REVERSE_QUALITY_COLUMN], \
|
else:
|
||||||
kmer_size=config['alignpairedend']['kmersize'], \
|
aligner = Kmer_similarity(entries, \
|
||||||
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool
|
column2=entries[REVERSE_SEQUENCE_COLUMN], \
|
||||||
|
qual_column2=entries[REVERSE_QUALITY_COLUMN], \
|
||||||
|
kmer_size=config['alignpairedend']['kmersize'], \
|
||||||
|
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool
|
||||||
|
|
||||||
ba = alignmentIterator(entries, aligner)
|
ba = alignmentIterator(entries, aligner)
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
for ali in ba:
|
for ali in ba:
|
||||||
|
|
||||||
@ -227,6 +233,10 @@ def run(config):
|
|||||||
PyErr_CheckSignals()
|
PyErr_CheckSignals()
|
||||||
|
|
||||||
consensus = o_view[i]
|
consensus = o_view[i]
|
||||||
|
|
||||||
|
if two_views:
|
||||||
|
consensus[b"R1_parent"] = forward[i].id
|
||||||
|
consensus[b"R2_parent"] = reverse[i].id
|
||||||
|
|
||||||
if not two_views:
|
if not two_views:
|
||||||
seqF = entries[i]
|
seqF = entries[i]
|
||||||
@ -251,7 +261,7 @@ def run(config):
|
|||||||
pb(i, force=True)
|
pb(i, force=True)
|
||||||
print("", file=sys.stderr)
|
print("", file=sys.stderr)
|
||||||
|
|
||||||
if kmer_ali :
|
if kmer_ali and aligner is not None:
|
||||||
aligner.free()
|
aligner.free()
|
||||||
|
|
||||||
# Save command config in View and DMS comments
|
# Save command config in View and DMS comments
|
||||||
|
@ -16,6 +16,8 @@ from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
|
|||||||
QUALITY_COLUMN, \
|
QUALITY_COLUMN, \
|
||||||
COUNT_COLUMN, \
|
COUNT_COLUMN, \
|
||||||
TAXID_COLUMN
|
TAXID_COLUMN
|
||||||
|
from obitools3.dms.capi.obitypes cimport OBI_STR
|
||||||
|
from obitools3.dms.column.column cimport Column
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import math
|
import math
|
||||||
@ -187,6 +189,8 @@ def sequenceTaggerGenerator(config, taxo=None):
|
|||||||
else:
|
else:
|
||||||
scn=None
|
scn=None
|
||||||
seq[rank]=rtaxid
|
seq[rank]=rtaxid
|
||||||
|
if "%s_name"%rank not in seq.view:
|
||||||
|
Column.new_column(seq.view, "%s_name"%rank, OBI_STR)
|
||||||
seq["%s_name"%rank]=scn
|
seq["%s_name"%rank]=scn
|
||||||
|
|
||||||
if add_rank:
|
if add_rank:
|
||||||
|
@ -16,7 +16,7 @@ import sys
|
|||||||
from cpython.exc cimport PyErr_CheckSignals
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
|
|
||||||
__title__="Tag a set of sequences for PCR and sequencing errors identification"
|
__title__="Build a reference database for ecotag"
|
||||||
|
|
||||||
|
|
||||||
def addOptions(parser):
|
def addOptions(parser):
|
||||||
@ -31,10 +31,9 @@ def addOptions(parser):
|
|||||||
group.add_argument('--threshold','-t',
|
group.add_argument('--threshold','-t',
|
||||||
action="store", dest="build_ref_db:threshold",
|
action="store", dest="build_ref_db:threshold",
|
||||||
metavar='<THRESHOLD>',
|
metavar='<THRESHOLD>',
|
||||||
default=0.0,
|
default=0.99,
|
||||||
type=float,
|
type=float,
|
||||||
help="Score threshold as a normalized identity, e.g. 0.95 for an identity of 95%%. Default: 0.00"
|
help="Score threshold as a normalized identity, e.g. 0.95 for an identity of 95%%. Default: 0.99.")
|
||||||
" (no threshold).")
|
|
||||||
|
|
||||||
|
|
||||||
def run(config):
|
def run(config):
|
||||||
|
@ -4,7 +4,7 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
|||||||
from obitools3.dms import DMS
|
from obitools3.dms import DMS
|
||||||
from obitools3.dms.view.view cimport View
|
from obitools3.dms.view.view cimport View
|
||||||
from obitools3.uri.decode import open_uri
|
from obitools3.uri.decode import open_uri
|
||||||
from obitools3.apps.optiongroups import addMinimalOutputOption
|
from obitools3.apps.optiongroups import addMinimalOutputOption, addNoProgressBarOption
|
||||||
from obitools3.dms.view import RollbackException
|
from obitools3.dms.view import RollbackException
|
||||||
from obitools3.apps.config import logger
|
from obitools3.apps.config import logger
|
||||||
from obitools3.utils cimport str2bytes
|
from obitools3.utils cimport str2bytes
|
||||||
@ -22,12 +22,13 @@ import sys
|
|||||||
from cpython.exc cimport PyErr_CheckSignals
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
|
|
||||||
__title__="Concatenate views."
|
__title__="Concatenate views"
|
||||||
|
|
||||||
|
|
||||||
def addOptions(parser):
|
def addOptions(parser):
|
||||||
|
|
||||||
addMinimalOutputOption(parser)
|
addMinimalOutputOption(parser)
|
||||||
|
addNoProgressBarOption(parser)
|
||||||
|
|
||||||
group=parser.add_argument_group('obi cat specific options')
|
group=parser.add_argument_group('obi cat specific options')
|
||||||
|
|
||||||
@ -47,9 +48,9 @@ def run(config):
|
|||||||
|
|
||||||
logger("info", "obi cat")
|
logger("info", "obi cat")
|
||||||
|
|
||||||
# Open the views to concatenate
|
# Check the views to concatenate
|
||||||
iview_list = []
|
|
||||||
idms_list = []
|
idms_list = []
|
||||||
|
iview_list = []
|
||||||
total_len = 0
|
total_len = 0
|
||||||
remove_qual = False
|
remove_qual = False
|
||||||
remove_rev_qual = False
|
remove_rev_qual = False
|
||||||
@ -67,8 +68,9 @@ def run(config):
|
|||||||
if REVERSE_QUALITY_COLUMN not in i_view: # same as above for reverse quality
|
if REVERSE_QUALITY_COLUMN not in i_view: # same as above for reverse quality
|
||||||
remove_rev_qual = True
|
remove_rev_qual = True
|
||||||
total_len += len(i_view)
|
total_len += len(i_view)
|
||||||
iview_list.append(i_view)
|
|
||||||
idms_list.append(i_dms)
|
idms_list.append(i_dms)
|
||||||
|
iview_list.append(i_view.name)
|
||||||
|
i_view.close()
|
||||||
|
|
||||||
# Open the output: only the DMS
|
# Open the output: only the DMS
|
||||||
output = open_uri(config['obi']['outputURI'],
|
output = open_uri(config['obi']['outputURI'],
|
||||||
@ -95,10 +97,12 @@ def run(config):
|
|||||||
Column.new_column(o_view, REVERSE_QUALITY_COLUMN, OBI_QUAL, associated_column_name=REVERSE_SEQUENCE_COLUMN, associated_column_version=o_view[REVERSE_SEQUENCE_COLUMN].version)
|
Column.new_column(o_view, REVERSE_QUALITY_COLUMN, OBI_QUAL, associated_column_name=REVERSE_SEQUENCE_COLUMN, associated_column_version=o_view[REVERSE_SEQUENCE_COLUMN].version)
|
||||||
|
|
||||||
# Initialize multiple elements columns
|
# Initialize multiple elements columns
|
||||||
if type(output_0)==BufferedWriter:
|
if type(output_0)!=BufferedWriter:
|
||||||
dict_cols = {}
|
dict_cols = {}
|
||||||
for v in iview_list:
|
for v_uri in config["cat"]["views_to_cat"]:
|
||||||
|
v = open_uri(v_uri)[1]
|
||||||
for coln in v.keys():
|
for coln in v.keys():
|
||||||
|
col = v[coln]
|
||||||
if v[coln].nb_elements_per_line > 1:
|
if v[coln].nb_elements_per_line > 1:
|
||||||
if coln not in dict_cols:
|
if coln not in dict_cols:
|
||||||
dict_cols[coln] = {}
|
dict_cols[coln] = {}
|
||||||
@ -108,9 +112,10 @@ def run(config):
|
|||||||
else:
|
else:
|
||||||
dict_cols[coln]['eltnames'] = set(v[coln].elements_names + list(dict_cols[coln]['eltnames']))
|
dict_cols[coln]['eltnames'] = set(v[coln].elements_names + list(dict_cols[coln]['eltnames']))
|
||||||
dict_cols[coln]['nbelts'] = len(dict_cols[coln]['eltnames'])
|
dict_cols[coln]['nbelts'] = len(dict_cols[coln]['eltnames'])
|
||||||
|
v.close()
|
||||||
for coln in dict_cols:
|
for coln in dict_cols:
|
||||||
Column.new_column(o_view, coln, dict_cols[coln]['obitype'],
|
Column.new_column(o_view, coln, dict_cols[coln]['obitype'],
|
||||||
nb_elements_per_line=dict_cols[coln]['nbelts'], elements_names=list(dict_cols[coln]['eltnames']))
|
nb_elements_per_line=dict_cols[coln]['nbelts'], elements_names=list(dict_cols[coln]['eltnames']), dict_column=True)
|
||||||
|
|
||||||
# Initialize the progress bar
|
# Initialize the progress bar
|
||||||
if not config['obi']['noprogressbar']:
|
if not config['obi']['noprogressbar']:
|
||||||
@ -119,7 +124,8 @@ def run(config):
|
|||||||
pb = None
|
pb = None
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
for v in iview_list:
|
for v_uri in config["cat"]["views_to_cat"]:
|
||||||
|
v = open_uri(v_uri)[1]
|
||||||
for entry in v:
|
for entry in v:
|
||||||
PyErr_CheckSignals()
|
PyErr_CheckSignals()
|
||||||
if pb is not None:
|
if pb is not None:
|
||||||
@ -128,8 +134,13 @@ def run(config):
|
|||||||
rep = repr(entry)
|
rep = repr(entry)
|
||||||
output_0.write(str2bytes(rep)+b"\n")
|
output_0.write(str2bytes(rep)+b"\n")
|
||||||
else:
|
else:
|
||||||
o_view[i] = entry
|
try:
|
||||||
|
o_view[i] = entry
|
||||||
|
except:
|
||||||
|
print("\nError with entry:", repr(entry))
|
||||||
|
print(repr(o_view))
|
||||||
i+=1
|
i+=1
|
||||||
|
v.close()
|
||||||
|
|
||||||
# Deletes quality columns if needed
|
# Deletes quality columns if needed
|
||||||
if type(output_0)!=BufferedWriter:
|
if type(output_0)!=BufferedWriter:
|
||||||
@ -144,7 +155,7 @@ def run(config):
|
|||||||
|
|
||||||
# Save command config in DMS comments
|
# Save command config in DMS comments
|
||||||
command_line = " ".join(sys.argv[1:])
|
command_line = " ".join(sys.argv[1:])
|
||||||
o_view.write_config(config, "cat", command_line, input_dms_name=[d.name for d in idms_list], input_view_name=[v.name for v in iview_list])
|
o_view.write_config(config, "cat", command_line, input_dms_name=[d.name for d in idms_list], input_view_name=[vname for vname in iview_list])
|
||||||
o_dms.record_command_line(command_line)
|
o_dms.record_command_line(command_line)
|
||||||
|
|
||||||
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||||
|
@ -54,11 +54,11 @@ def addOptions(parser):
|
|||||||
default=False,
|
default=False,
|
||||||
help="Only sequences labeled as heads are kept in the output. Default: False")
|
help="Only sequences labeled as heads are kept in the output. Default: False")
|
||||||
|
|
||||||
group.add_argument('--cluster-tags', '-C',
|
# group.add_argument('--cluster-tags', '-C',
|
||||||
action="store_true",
|
# action="store_true",
|
||||||
dest="clean:cluster-tags",
|
# dest="clean:cluster-tags",
|
||||||
default=False,
|
# default=False,
|
||||||
help="Adds tags for each sequence giving its cluster's head and weight for each sample.")
|
# help="Adds tags for each sequence giving its cluster's head and weight for each sample.")
|
||||||
|
|
||||||
group.add_argument('--thread-count','-p', # TODO should probably be in a specific option group
|
group.add_argument('--thread-count','-p', # TODO should probably be in a specific option group
|
||||||
action="store", dest="clean:thread-count",
|
action="store", dest="clean:thread-count",
|
||||||
@ -142,4 +142,5 @@ def run(config):
|
|||||||
|
|
||||||
i_dms.close(force=True)
|
i_dms.close(force=True)
|
||||||
|
|
||||||
logger("info", "Done.")
|
logger("info", "Done.")
|
||||||
|
|
@ -10,7 +10,7 @@ from obitools3.dms.capi.obiview cimport COUNT_COLUMN
|
|||||||
from cpython.exc cimport PyErr_CheckSignals
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
|
|
||||||
__title__="Counts sequence records"
|
__title__="Count sequence records"
|
||||||
|
|
||||||
|
|
||||||
def addOptions(parser):
|
def addOptions(parser):
|
||||||
@ -29,6 +29,12 @@ def addOptions(parser):
|
|||||||
default=False,
|
default=False,
|
||||||
help="Prints only the total count of sequence records (if a sequence has no `count` attribute, its default count is 1) (default: False).")
|
help="Prints only the total count of sequence records (if a sequence has no `count` attribute, its default count is 1) (default: False).")
|
||||||
|
|
||||||
|
group.add_argument('-c','--count-tag',
|
||||||
|
action="store", dest="count:countcol",
|
||||||
|
default='COUNT',
|
||||||
|
type=str,
|
||||||
|
help="Name of the tag/column associated with the count information (default: COUNT).")
|
||||||
|
|
||||||
|
|
||||||
def run(config):
|
def run(config):
|
||||||
|
|
||||||
@ -41,18 +47,20 @@ def run(config):
|
|||||||
if input is None:
|
if input is None:
|
||||||
raise Exception("Could not read input")
|
raise Exception("Could not read input")
|
||||||
entries = input[1]
|
entries = input[1]
|
||||||
|
|
||||||
|
countcol = config['count']['countcol']
|
||||||
|
|
||||||
count1 = len(entries)
|
count1 = len(entries)
|
||||||
count2 = 0
|
count2 = 0
|
||||||
|
|
||||||
if COUNT_COLUMN in entries and ((config['count']['sequence'] == config['count']['all']) or (config['count']['all'])) :
|
if countcol in entries and ((config['count']['sequence'] == config['count']['all']) or (config['count']['all'])) :
|
||||||
for e in entries:
|
for e in entries:
|
||||||
PyErr_CheckSignals()
|
PyErr_CheckSignals()
|
||||||
count2+=e[COUNT_COLUMN]
|
count2+=e[countcol]
|
||||||
|
|
||||||
if COUNT_COLUMN in entries and (config['count']['sequence'] == config['count']['all']):
|
if countcol in entries and (config['count']['sequence'] == config['count']['all']):
|
||||||
print(count1,count2)
|
print(count1,count2)
|
||||||
elif COUNT_COLUMN in entries and config['count']['all']:
|
elif countcol in entries and config['count']['all']:
|
||||||
print(count2)
|
print(count2)
|
||||||
else:
|
else:
|
||||||
print(count1)
|
print(count1)
|
||||||
|
@ -175,6 +175,14 @@ def run(config):
|
|||||||
o_dms_name = output[0].name
|
o_dms_name = output[0].name
|
||||||
o_view_name = output[1]
|
o_view_name = output[1]
|
||||||
|
|
||||||
|
# Open the taxonomy DMS
|
||||||
|
taxdms = open_uri(config['obi']['taxoURI'],
|
||||||
|
dms_only=True)
|
||||||
|
if taxdms is None:
|
||||||
|
raise Exception("Could not open taxonomy DMS")
|
||||||
|
tax_dms = taxdms[0]
|
||||||
|
tax_dms_name = taxdms[0].name
|
||||||
|
|
||||||
# Read taxonomy name
|
# Read taxonomy name
|
||||||
taxonomy_name = config['obi']['taxoURI'].split("/")[-1] # Robust in theory
|
taxonomy_name = config['obi']['taxoURI'].split("/")[-1] # Robust in theory
|
||||||
|
|
||||||
@ -197,7 +205,8 @@ def run(config):
|
|||||||
|
|
||||||
# TODO: primers in comments?
|
# TODO: primers in comments?
|
||||||
|
|
||||||
if obi_ecopcr(i_dms.name_with_full_path, tobytes(i_view_name), tobytes(taxonomy_name), \
|
if obi_ecopcr(i_dms.name_with_full_path, tobytes(i_view_name),
|
||||||
|
tax_dms.name_with_full_path, tobytes(taxonomy_name), \
|
||||||
o_dms.name_with_full_path, tobytes(o_view_name), comments, \
|
o_dms.name_with_full_path, tobytes(o_view_name), comments, \
|
||||||
tobytes(config['ecopcr']['primer1']), tobytes(config['ecopcr']['primer2']), \
|
tobytes(config['ecopcr']['primer1']), tobytes(config['ecopcr']['primer2']), \
|
||||||
config['ecopcr']['error'], \
|
config['ecopcr']['error'], \
|
||||||
|
@ -41,6 +41,17 @@ def addOptions(parser):
|
|||||||
help="Minimum identity to consider for assignment, as a normalized identity, e.g. 0.95 for an identity of 95%%. "
|
help="Minimum identity to consider for assignment, as a normalized identity, e.g. 0.95 for an identity of 95%%. "
|
||||||
"Default: 0.00 (no threshold).")
|
"Default: 0.00 (no threshold).")
|
||||||
|
|
||||||
|
group.add_argument('--minimum-circle','-c',
|
||||||
|
action="store", dest="ecotag:bubble_threshold",
|
||||||
|
metavar='<CIRCLE_THRESHOLD>',
|
||||||
|
default=0.99,
|
||||||
|
type=float,
|
||||||
|
help="Minimum identity considered for the assignment circle "
|
||||||
|
"(sequence is assigned to the LCA of all sequences within a similarity circle of the best matches; "
|
||||||
|
"the threshold for this circle is the highest value between <CIRCLE_THRESHOLD> and the best assignment score found for the query sequence). "
|
||||||
|
"Give value as a normalized identity, e.g. 0.95 for an identity of 95%%. "
|
||||||
|
"Default: 0.99.")
|
||||||
|
|
||||||
def run(config):
|
def run(config):
|
||||||
|
|
||||||
DMS.obi_atexit()
|
DMS.obi_atexit()
|
||||||
@ -66,9 +77,8 @@ def run(config):
|
|||||||
ref_view_name = ref[1]
|
ref_view_name = ref[1]
|
||||||
|
|
||||||
# Check that the threshold demanded is greater than or equal to the threshold used to build the reference database
|
# Check that the threshold demanded is greater than or equal to the threshold used to build the reference database
|
||||||
if config['ecotag']['threshold'] < eval(ref_dms[ref_view_name].comments["ref_db_threshold"]) :
|
if config['ecotag']['bubble_threshold'] < eval(ref_dms[ref_view_name].comments["ref_db_threshold"]) :
|
||||||
print("Error: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).",
|
raise Exception(f"Error: The threshold demanded ({config['ecotag']['bubble_threshold']}) is lower than the threshold used to build the reference database ({float(ref_dms[ref_view_name].comments['ref_db_threshold'])}).")
|
||||||
config['ecotag']['threshold'], ref_dms[ref_view_name].comments["ref_db_threshold"])
|
|
||||||
|
|
||||||
# Open the output: only the DMS
|
# Open the output: only the DMS
|
||||||
output = open_uri(config['obi']['outputURI'],
|
output = open_uri(config['obi']['outputURI'],
|
||||||
@ -113,8 +123,9 @@ def run(config):
|
|||||||
if obi_ecotag(i_dms.name_with_full_path, tobytes(i_view_name), \
|
if obi_ecotag(i_dms.name_with_full_path, tobytes(i_view_name), \
|
||||||
ref_dms.name_with_full_path, tobytes(ref_view_name), \
|
ref_dms.name_with_full_path, tobytes(ref_view_name), \
|
||||||
taxo_dms.name_with_full_path, tobytes(taxonomy_name), \
|
taxo_dms.name_with_full_path, tobytes(taxonomy_name), \
|
||||||
tobytes(o_view_name), comments,
|
tobytes(o_view_name), comments, \
|
||||||
config['ecotag']['threshold']) < 0:
|
config['ecotag']['threshold'], \
|
||||||
|
config['ecotag']['bubble_threshold']) < 0:
|
||||||
raise Exception("Error running ecotag")
|
raise Exception("Error running ecotag")
|
||||||
|
|
||||||
# If the input and output DMS are not the same, export result view to output DMS
|
# If the input and output DMS are not the same, export result view to output DMS
|
||||||
|
@ -6,6 +6,9 @@ from obitools3.apps.config import logger
|
|||||||
from obitools3.dms import DMS
|
from obitools3.dms import DMS
|
||||||
from obitools3.dms.obiseq import Nuc_Seq
|
from obitools3.dms.obiseq import Nuc_Seq
|
||||||
from obitools3.dms.capi.obiview cimport QUALITY_COLUMN
|
from obitools3.dms.capi.obiview cimport QUALITY_COLUMN
|
||||||
|
from obitools3.writers.tab import TabWriter
|
||||||
|
from obitools3.format.tab import TabFormat
|
||||||
|
from obitools3.utils cimport tobytes, tostr
|
||||||
|
|
||||||
from obitools3.apps.optiongroups import addMinimalInputOption, \
|
from obitools3.apps.optiongroups import addMinimalInputOption, \
|
||||||
addExportOutputOption, \
|
addExportOutputOption, \
|
||||||
@ -76,6 +79,13 @@ def run(config):
|
|||||||
else:
|
else:
|
||||||
pb = ProgressBar(withoutskip - skip, config)
|
pb = ProgressBar(withoutskip - skip, config)
|
||||||
|
|
||||||
|
if config['obi']['outputformat'] == b'metabaR':
|
||||||
|
# Check prefix
|
||||||
|
if "metabarprefix" not in config["obi"]:
|
||||||
|
raise Exception("Prefix needed when exporting for metabaR (--metabaR-prefix option)")
|
||||||
|
else:
|
||||||
|
metabaRprefix = config["obi"]["metabarprefix"]
|
||||||
|
|
||||||
i=0
|
i=0
|
||||||
for seq in iview :
|
for seq in iview :
|
||||||
PyErr_CheckSignals()
|
PyErr_CheckSignals()
|
||||||
@ -91,6 +101,81 @@ def run(config):
|
|||||||
pb(i, force=True)
|
pb(i, force=True)
|
||||||
print("", file=sys.stderr)
|
print("", file=sys.stderr)
|
||||||
|
|
||||||
|
if config['obi']['outputformat'] == b'metabaR':
|
||||||
|
|
||||||
|
# Export ngsfilter file if view provided
|
||||||
|
if 'metabarngsfilter' in config['obi']:
|
||||||
|
ngsfilter_input = open_uri(config['obi']['metabarngsfilter'])
|
||||||
|
if ngsfilter_input is None:
|
||||||
|
raise Exception("Could not read ngsfilter view for metabaR output")
|
||||||
|
ngsfilter_view = ngsfilter_input[1]
|
||||||
|
|
||||||
|
ngsfilter_output = open(config['obi']['metabarprefix']+'.ngsfilter', 'w')
|
||||||
|
|
||||||
|
for line in ngsfilter_view:
|
||||||
|
|
||||||
|
line_to_print = b""
|
||||||
|
line_to_print += line[b'experiment']
|
||||||
|
line_to_print += b"\t"
|
||||||
|
line_to_print += line[b'sample']
|
||||||
|
line_to_print += b"\t"
|
||||||
|
line_to_print += line[b'forward_tag']
|
||||||
|
line_to_print += b":"
|
||||||
|
line_to_print += line[b'reverse_tag']
|
||||||
|
line_to_print += b"\t"
|
||||||
|
line_to_print += line[b'forward_primer']
|
||||||
|
line_to_print += b"\t"
|
||||||
|
line_to_print += line[b'reverse_primer']
|
||||||
|
line_to_print += b"\t"
|
||||||
|
line_to_print += line[b'additional_info']
|
||||||
|
|
||||||
|
print(tostr(line_to_print), file=ngsfilter_output)
|
||||||
|
|
||||||
|
if ngsfilter_input[0] != input[0]:
|
||||||
|
ngsfilter_input[0].close()
|
||||||
|
ngsfilter_output.close()
|
||||||
|
|
||||||
|
# Export sample metadata
|
||||||
|
samples_output = open(config['obi']['metabarprefix']+'_samples.csv', 'w')
|
||||||
|
|
||||||
|
# Export sample metadata file if view provided
|
||||||
|
if 'metabarsamples' in config['obi']:
|
||||||
|
samples_input = open_uri(config['obi']['metabarsamples'])
|
||||||
|
if samples_input is None:
|
||||||
|
raise Exception("Could not read sample view for metabaR output")
|
||||||
|
samples_view = samples_input[1]
|
||||||
|
|
||||||
|
# Export with tab formatter
|
||||||
|
TabWriter(TabFormat(header=True, sep='\t',),
|
||||||
|
samples_output,
|
||||||
|
header=True)
|
||||||
|
|
||||||
|
if samples_input[0] != input[0]:
|
||||||
|
samples_input[0].close()
|
||||||
|
|
||||||
|
# Else export just sample names from main view
|
||||||
|
else:
|
||||||
|
|
||||||
|
sample_list = []
|
||||||
|
if 'MERGED_sample' in iview:
|
||||||
|
sample_list = iview['MERGED_sample'].keys()
|
||||||
|
elif 'sample' not in iview:
|
||||||
|
for seq in iview:
|
||||||
|
sample = seq['sample']
|
||||||
|
if sample not in sample_list:
|
||||||
|
sample_list.append(sample)
|
||||||
|
else:
|
||||||
|
logger("warning", "Can not read sample list from main view for metabaR sample list export")
|
||||||
|
|
||||||
|
print("sample_id", file=samples_output)
|
||||||
|
for sample in sample_list:
|
||||||
|
line_to_print = b""
|
||||||
|
line_to_print += sample
|
||||||
|
line_to_print += b"\t"
|
||||||
|
print(tostr(line_to_print), file=samples_output)
|
||||||
|
|
||||||
|
samples_output.close()
|
||||||
|
|
||||||
# TODO save command in input dms?
|
# TODO save command in input dms?
|
||||||
|
|
||||||
if not BrokenPipeError and not IOError:
|
if not BrokenPipeError and not IOError:
|
||||||
|
@ -91,7 +91,7 @@ def addOptions(parser):
|
|||||||
metavar="<ATTRIBUTE_NAME>",
|
metavar="<ATTRIBUTE_NAME>",
|
||||||
help="Select records with the attribute <ATTRIBUTE_NAME> "
|
help="Select records with the attribute <ATTRIBUTE_NAME> "
|
||||||
"defined (not set to NA value). "
|
"defined (not set to NA value). "
|
||||||
"Several -a options can be used on the same "
|
"Several -A options can be used on the same "
|
||||||
"command line.")
|
"command line.")
|
||||||
|
|
||||||
group.add_argument("-L", "--lmax",
|
group.add_argument("-L", "--lmax",
|
||||||
@ -184,7 +184,7 @@ def Filter_generator(options, tax_filter, i_view):
|
|||||||
invert_selection = options["invert_selection"]
|
invert_selection = options["invert_selection"]
|
||||||
id_set = None
|
id_set = None
|
||||||
if "id_list" in options:
|
if "id_list" in options:
|
||||||
id_set = set(x.strip() for x in open(options["id_list"]))
|
id_set = set(x.strip() for x in open(options["id_list"], 'rb'))
|
||||||
|
|
||||||
# Initialize the regular expression patterns
|
# Initialize the regular expression patterns
|
||||||
seq_pattern = None
|
seq_pattern = None
|
||||||
@ -258,6 +258,13 @@ def Filter_generator(options, tax_filter, i_view):
|
|||||||
|
|
||||||
|
|
||||||
def Taxonomy_filter_generator(taxo, options):
|
def Taxonomy_filter_generator(taxo, options):
|
||||||
|
|
||||||
|
if (("required_ranks" in options and options["required_ranks"]) or \
|
||||||
|
("required_taxids" in options and options["required_taxids"]) or \
|
||||||
|
("ignored_taxids" in options and options["ignored_taxids"])) and \
|
||||||
|
(taxo is None):
|
||||||
|
raise RollbackException("obi grep error: can't use taxonomy options without providing a taxonomy. Rollbacking view")
|
||||||
|
|
||||||
if taxo is not None:
|
if taxo is not None:
|
||||||
def tax_filter(seq):
|
def tax_filter(seq):
|
||||||
good = True
|
good = True
|
||||||
|
@ -8,6 +8,7 @@ from obitools3.apps.optiongroups import addMinimalInputOption, addMinimalOutputO
|
|||||||
from obitools3.dms.view import RollbackException
|
from obitools3.dms.view import RollbackException
|
||||||
from obitools3.apps.config import logger
|
from obitools3.apps.config import logger
|
||||||
from obitools3.utils cimport str2bytes
|
from obitools3.utils cimport str2bytes
|
||||||
|
from obitools3.apps.optiongroups import addExportOutputOption
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import sys
|
import sys
|
||||||
@ -16,13 +17,14 @@ from io import BufferedWriter
|
|||||||
from cpython.exc cimport PyErr_CheckSignals
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
|
|
||||||
__title__="Keep the N first lines of a view."
|
__title__="Keep the N first lines of a view"
|
||||||
|
|
||||||
|
|
||||||
def addOptions(parser):
|
def addOptions(parser):
|
||||||
|
|
||||||
addMinimalInputOption(parser)
|
addMinimalInputOption(parser)
|
||||||
addMinimalOutputOption(parser)
|
addMinimalOutputOption(parser)
|
||||||
|
addExportOutputOption(parser)
|
||||||
addNoProgressBarOption(parser)
|
addNoProgressBarOption(parser)
|
||||||
|
|
||||||
group=parser.add_argument_group('obi head specific options')
|
group=parser.add_argument_group('obi head specific options')
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||||
from obitools3.dms.view.view cimport View
|
from obitools3.dms.view.view cimport View
|
||||||
@ -26,20 +27,25 @@ from obitools3.dms.capi.obiview cimport VIEW_TYPE_NUC_SEQS, \
|
|||||||
QUALITY_COLUMN, \
|
QUALITY_COLUMN, \
|
||||||
COUNT_COLUMN, \
|
COUNT_COLUMN, \
|
||||||
TAXID_COLUMN, \
|
TAXID_COLUMN, \
|
||||||
MERGED_PREFIX
|
MERGED_PREFIX, \
|
||||||
|
SCIENTIFIC_NAME_COLUMN
|
||||||
|
|
||||||
from obitools3.dms.capi.obidms cimport obi_import_view
|
from obitools3.dms.capi.obidms cimport obi_import_view
|
||||||
|
|
||||||
from obitools3.dms.capi.obitypes cimport obitype_t, \
|
from obitools3.dms.capi.obitypes cimport obitype_t, \
|
||||||
OBI_VOID, \
|
OBI_VOID, \
|
||||||
OBI_QUAL
|
OBI_QUAL, \
|
||||||
|
OBI_STR, \
|
||||||
|
OBI_INT
|
||||||
|
|
||||||
from obitools3.dms.capi.obierrno cimport obi_errno
|
from obitools3.dms.capi.obierrno cimport obi_errno
|
||||||
|
|
||||||
from obitools3.apps.optiongroups import addImportInputOption, \
|
from obitools3.apps.optiongroups import addImportInputOption, \
|
||||||
addTabularInputOption, \
|
addTabularInputOption, \
|
||||||
addTaxdumpInputOption, \
|
addTaxdumpInputOption, \
|
||||||
addMinimalOutputOption
|
addMinimalOutputOption, \
|
||||||
|
addNoProgressBarOption, \
|
||||||
|
addTaxonomyOption
|
||||||
|
|
||||||
from obitools3.uri.decode import open_uri
|
from obitools3.uri.decode import open_uri
|
||||||
|
|
||||||
@ -48,9 +54,10 @@ from obitools3.apps.config import logger
|
|||||||
from cpython.exc cimport PyErr_CheckSignals
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
from io import BufferedWriter
|
from io import BufferedWriter
|
||||||
|
import ast
|
||||||
|
|
||||||
|
|
||||||
__title__="Imports sequences from different formats into a DMS"
|
__title__="Import sequences from different formats into a DMS"
|
||||||
|
|
||||||
|
|
||||||
default_config = { 'destview' : None,
|
default_config = { 'destview' : None,
|
||||||
@ -67,7 +74,9 @@ def addOptions(parser):
|
|||||||
addImportInputOption(parser)
|
addImportInputOption(parser)
|
||||||
addTabularInputOption(parser)
|
addTabularInputOption(parser)
|
||||||
addTaxdumpInputOption(parser)
|
addTaxdumpInputOption(parser)
|
||||||
|
addTaxonomyOption(parser)
|
||||||
addMinimalOutputOption(parser)
|
addMinimalOutputOption(parser)
|
||||||
|
addNoProgressBarOption(parser)
|
||||||
|
|
||||||
group = parser.add_argument_group('obi import specific options')
|
group = parser.add_argument_group('obi import specific options')
|
||||||
|
|
||||||
@ -83,6 +92,10 @@ def addOptions(parser):
|
|||||||
help="If importing a view into another DMS, do it by importing each line, saving disk space if the original view "
|
help="If importing a view into another DMS, do it by importing each line, saving disk space if the original view "
|
||||||
"has a line selection associated.")
|
"has a line selection associated.")
|
||||||
|
|
||||||
|
# group.add_argument('--only-id',
|
||||||
|
# action="store", dest="import:onlyid",
|
||||||
|
# help="only id")
|
||||||
|
|
||||||
def run(config):
|
def run(config):
|
||||||
|
|
||||||
cdef tuple input
|
cdef tuple input
|
||||||
@ -94,6 +107,10 @@ def run(config):
|
|||||||
cdef obitype_t new_type
|
cdef obitype_t new_type
|
||||||
cdef bint get_quality
|
cdef bint get_quality
|
||||||
cdef bint NUC_SEQS_view
|
cdef bint NUC_SEQS_view
|
||||||
|
cdef bint silva
|
||||||
|
cdef bint rdp
|
||||||
|
cdef bint unite
|
||||||
|
cdef bint sintax
|
||||||
cdef int nb_elts
|
cdef int nb_elts
|
||||||
cdef object d
|
cdef object d
|
||||||
cdef View view
|
cdef View view
|
||||||
@ -104,6 +121,8 @@ def run(config):
|
|||||||
cdef Column seq_col
|
cdef Column seq_col
|
||||||
cdef Column qual_col
|
cdef Column qual_col
|
||||||
cdef Column old_column
|
cdef Column old_column
|
||||||
|
cdef Column sci_name_col
|
||||||
|
cdef bytes sci_name
|
||||||
cdef bint rewrite
|
cdef bint rewrite
|
||||||
cdef dict dcols
|
cdef dict dcols
|
||||||
cdef int skipping
|
cdef int skipping
|
||||||
@ -175,6 +194,16 @@ def run(config):
|
|||||||
logger("info", "Done.")
|
logger("info", "Done.")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Open taxonomy if there is one
|
||||||
|
if 'taxoURI' in config['obi'] and config['obi']['taxoURI'] is not None:
|
||||||
|
taxo_uri = open_uri(config['obi']['taxoURI'])
|
||||||
|
if taxo_uri is None or taxo_uri[2] == bytes:
|
||||||
|
raise Exception("Couldn't open taxonomy")
|
||||||
|
taxo = taxo_uri[1]
|
||||||
|
|
||||||
|
else :
|
||||||
|
taxo = None
|
||||||
|
|
||||||
# If importing a view between two DMS and not wanting to save space if line selection in original view, use C API
|
# If importing a view between two DMS and not wanting to save space if line selection in original view, use C API
|
||||||
if isinstance(input[1], View) and not config['import']['space_priority']:
|
if isinstance(input[1], View) and not config['import']['space_priority']:
|
||||||
if obi_import_view(input[0].name_with_full_path, o_dms.name_with_full_path, input[1].name, tobytes((config['obi']['outputURI'].split('/'))[-1])) < 0 :
|
if obi_import_view(input[0].name_with_full_path, o_dms.name_with_full_path, input[1].name, tobytes((config['obi']['outputURI'].split('/'))[-1])) < 0 :
|
||||||
@ -187,8 +216,11 @@ def run(config):
|
|||||||
logger("info", "Done.")
|
logger("info", "Done.")
|
||||||
return
|
return
|
||||||
|
|
||||||
if entry_count >= 0:
|
# Reinitialize the progress bar
|
||||||
|
if entry_count >= 0 and config['obi']['noprogressbar'] == False:
|
||||||
pb = ProgressBar(entry_count, config)
|
pb = ProgressBar(entry_count, config)
|
||||||
|
else:
|
||||||
|
pb = None
|
||||||
|
|
||||||
NUC_SEQS_view = False
|
NUC_SEQS_view = False
|
||||||
if isinstance(output[1], View) :
|
if isinstance(output[1], View) :
|
||||||
@ -197,15 +229,38 @@ def run(config):
|
|||||||
NUC_SEQS_view = True
|
NUC_SEQS_view = True
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
# Save basic columns in variables for optimization
|
# Save basic columns in variables for optimization
|
||||||
if NUC_SEQS_view :
|
if NUC_SEQS_view :
|
||||||
id_col = view[ID_COLUMN]
|
id_col = view[ID_COLUMN]
|
||||||
def_col = view[DEFINITION_COLUMN]
|
def_col = view[DEFINITION_COLUMN]
|
||||||
seq_col = view[NUC_SEQUENCE_COLUMN]
|
seq_col = view[NUC_SEQUENCE_COLUMN]
|
||||||
|
|
||||||
|
# Prepare taxon scientific name and taxid refs if RDP/SILVA/UNITE/SINTAX formats
|
||||||
|
silva = False
|
||||||
|
rdp = False
|
||||||
|
unite = False
|
||||||
|
sintax=False
|
||||||
|
if 'inputformat' in config['obi'] and (config['obi']['inputformat'] == b"silva" or \
|
||||||
|
config['obi']['inputformat'] == b"rdp" or \
|
||||||
|
config['obi']['inputformat'] == b"unite" or \
|
||||||
|
config['obi']['inputformat'] == b"sintax"):
|
||||||
|
#if taxo is None:
|
||||||
|
# raise Exception("A taxonomy (as built by 'obi import --taxdump') must be provided for SILVA and RDP files")
|
||||||
|
if config['obi']['inputformat'] == b"silva":
|
||||||
|
silva = True
|
||||||
|
elif config['obi']['inputformat'] == b"rdp":
|
||||||
|
rdp = True
|
||||||
|
elif config['obi']['inputformat'] == b"unite":
|
||||||
|
unite = True
|
||||||
|
elif config['obi']['inputformat'] == b"sintax":
|
||||||
|
sintax = True
|
||||||
|
sci_name_col = Column.new_column(view, SCIENTIFIC_NAME_COLUMN, OBI_STR)
|
||||||
|
if taxo is not None:
|
||||||
|
taxid_col = Column.new_column(view, TAXID_COLUMN, OBI_INT)
|
||||||
|
|
||||||
dcols = {}
|
dcols = {}
|
||||||
|
|
||||||
# First read through the entries to prepare columns with dictionaries as they are very time-expensive to rewrite
|
# First read through the entries to prepare columns with dictionaries as they are very time-expensive to rewrite
|
||||||
if config['import']['preread']:
|
if config['import']['preread']:
|
||||||
logger("info", "First readthrough...")
|
logger("info", "First readthrough...")
|
||||||
@ -245,15 +300,21 @@ def run(config):
|
|||||||
for tag in dict_dict:
|
for tag in dict_dict:
|
||||||
dcols[tag] = (Column.new_column(view, tag, dict_dict[tag][1], \
|
dcols[tag] = (Column.new_column(view, tag, dict_dict[tag][1], \
|
||||||
nb_elements_per_line=len(dict_dict[tag][0]), \
|
nb_elements_per_line=len(dict_dict[tag][0]), \
|
||||||
elements_names=list(dict_dict[tag][0])), \
|
elements_names=list(dict_dict[tag][0]), \
|
||||||
|
dict_column=True), \
|
||||||
dict_dict[tag][1])
|
dict_dict[tag][1])
|
||||||
|
|
||||||
|
|
||||||
# Reinitialize the input
|
# Reinitialize the input
|
||||||
if isinstance(input[0], CompressedFile):
|
if isinstance(input[0], CompressedFile):
|
||||||
input_is_file = True
|
input_is_file = True
|
||||||
if entry_count >= 0:
|
|
||||||
|
# Reinitialize the progress bar
|
||||||
|
if entry_count >= 0 and config['obi']['noprogressbar'] == False:
|
||||||
pb = ProgressBar(entry_count, config)
|
pb = ProgressBar(entry_count, config)
|
||||||
|
else:
|
||||||
|
pb = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
input[0].close()
|
input[0].close()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -262,6 +323,11 @@ def run(config):
|
|||||||
if input is None:
|
if input is None:
|
||||||
raise Exception("Could not open input URI")
|
raise Exception("Could not open input URI")
|
||||||
|
|
||||||
|
# if 'onlyid' in config['import']:
|
||||||
|
# onlyid = tobytes(config['import']['onlyid'])
|
||||||
|
# else:
|
||||||
|
# onlyid = None
|
||||||
|
|
||||||
entries = input[1]
|
entries = input[1]
|
||||||
i = 0
|
i = 0
|
||||||
for entry in entries :
|
for entry in entries :
|
||||||
@ -279,10 +345,13 @@ def run(config):
|
|||||||
elif not i%50000:
|
elif not i%50000:
|
||||||
logger("info", "Imported %d entries", i)
|
logger("info", "Imported %d entries", i)
|
||||||
|
|
||||||
|
# if onlyid is not None and entry.id != onlyid:
|
||||||
|
# continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
||||||
if NUC_SEQS_view:
|
if NUC_SEQS_view:
|
||||||
id_col[i] = entry.id
|
id_col[i] = entry.id
|
||||||
def_col[i] = entry.definition
|
def_col[i] = entry.definition
|
||||||
seq_col[i] = entry.seq
|
seq_col[i] = entry.seq
|
||||||
# Check if there is a sequencing quality associated by checking the first entry # TODO haven't found a more robust solution yet
|
# Check if there is a sequencing quality associated by checking the first entry # TODO haven't found a more robust solution yet
|
||||||
@ -293,40 +362,89 @@ def run(config):
|
|||||||
qual_col = view[QUALITY_COLUMN]
|
qual_col = view[QUALITY_COLUMN]
|
||||||
if get_quality:
|
if get_quality:
|
||||||
qual_col[i] = entry.quality
|
qual_col[i] = entry.quality
|
||||||
|
|
||||||
|
# Parse taxon scientific name if RDP or Silva or Unite file
|
||||||
|
if (rdp or silva or unite or sintax):
|
||||||
|
if rdp or silva:
|
||||||
|
sci_names = entry.definition.split(b";")
|
||||||
|
sci_name_col[i] = sci_names[-1]
|
||||||
|
elif unite:
|
||||||
|
sci_names = entry.id.split(b'|')[-1].split(b';')
|
||||||
|
sci_name_col[i] = re.sub(b'[a-zA-Z]__', b'', sci_names[-1])
|
||||||
|
elif sintax:
|
||||||
|
reconstructed_line = entry.id+b' '+entry.definition[:-1]
|
||||||
|
splitted_reconstructed_line = reconstructed_line.split(b';')
|
||||||
|
taxa = splitted_reconstructed_line[1].split(b'=')[1]
|
||||||
|
taxa = splitted_reconstructed_line[1].split(b',')
|
||||||
|
sci_names = []
|
||||||
|
for t in taxa:
|
||||||
|
tf = t.split(b':')[1]
|
||||||
|
sci_names.append(tf)
|
||||||
|
sci_name_col[i] = sci_names[-1]
|
||||||
|
id_col[i] = reconstructed_line.split(b';')[0]
|
||||||
|
def_col[i] = reconstructed_line
|
||||||
|
|
||||||
|
# Fond taxid if taxonomy provided
|
||||||
|
if taxo is not None :
|
||||||
|
for sci_name in reversed(sci_names):
|
||||||
|
if unite:
|
||||||
|
sci_name = re.sub(b'[a-zA-Z]__', b'', sci_name)
|
||||||
|
if sci_name.split()[0] != b'unidentified' and sci_name.split()[0] != b'uncultured' and sci_name.split()[0] != b'metagenome':
|
||||||
|
taxon = taxo.get_taxon_by_name(sci_name)
|
||||||
|
if taxon is not None:
|
||||||
|
sci_name_col[i] = taxon.name
|
||||||
|
taxid_col[i] = taxon.taxid
|
||||||
|
#print(taxid_col[i], sci_name_col[i])
|
||||||
|
break
|
||||||
|
|
||||||
for tag in entry :
|
for tag in entry :
|
||||||
|
|
||||||
if tag != ID_COLUMN and tag != DEFINITION_COLUMN and tag != NUC_SEQUENCE_COLUMN and tag != QUALITY_COLUMN : # TODO dirty
|
if tag != ID_COLUMN and tag != DEFINITION_COLUMN and tag != NUC_SEQUENCE_COLUMN and tag != QUALITY_COLUMN : # TODO dirty
|
||||||
|
|
||||||
value = entry[tag]
|
value = entry[tag]
|
||||||
if tag == b"taxid":
|
if tag == b"taxid":
|
||||||
tag = TAXID_COLUMN
|
tag = TAXID_COLUMN
|
||||||
if tag == b"count":
|
if tag == b"count":
|
||||||
tag = COUNT_COLUMN
|
tag = COUNT_COLUMN
|
||||||
|
if tag == b"scientific_name":
|
||||||
|
tag = SCIENTIFIC_NAME_COLUMN
|
||||||
if tag[:7] == b"merged_":
|
if tag[:7] == b"merged_":
|
||||||
tag = MERGED_PREFIX+tag[7:]
|
tag = MERGED_PREFIX+tag[7:]
|
||||||
|
|
||||||
|
if type(value) == bytes and value[:1]==b"[" :
|
||||||
|
try:
|
||||||
|
if type(eval(value)) == list:
|
||||||
|
value = eval(value)
|
||||||
|
#print(value)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
if tag not in dcols :
|
if tag not in dcols :
|
||||||
|
|
||||||
value_type = type(value)
|
value_type = type(value)
|
||||||
nb_elts = 1
|
nb_elts = 1
|
||||||
value_obitype = OBI_VOID
|
value_obitype = OBI_VOID
|
||||||
|
dict_col = False
|
||||||
if value_type == dict or value_type == list :
|
|
||||||
|
if value_type == dict :
|
||||||
nb_elts = len(value)
|
nb_elts = len(value)
|
||||||
elt_names = list(value)
|
elt_names = list(value)
|
||||||
|
dict_col = True
|
||||||
else :
|
else :
|
||||||
nb_elts = 1
|
nb_elts = 1
|
||||||
elt_names = None
|
elt_names = None
|
||||||
|
|
||||||
|
if value_type == list :
|
||||||
|
tuples = True
|
||||||
|
else:
|
||||||
|
tuples = False
|
||||||
|
|
||||||
value_obitype = get_obitype(value)
|
value_obitype = get_obitype(value)
|
||||||
|
|
||||||
if value_obitype != OBI_VOID :
|
if value_obitype != OBI_VOID :
|
||||||
dcols[tag] = (Column.new_column(view, tag, value_obitype, nb_elements_per_line=nb_elts, elements_names=elt_names), value_obitype)
|
dcols[tag] = (Column.new_column(view, tag, value_obitype, nb_elements_per_line=nb_elts, elements_names=elt_names, dict_column=dict_col, tuples=tuples), value_obitype)
|
||||||
|
|
||||||
# Fill value
|
# Fill value
|
||||||
if value_type == dict and nb_elts == 1: # special case that makes the OBI3 create a 1 elt/line column which won't read a dict value
|
|
||||||
value = value[list(value.keys())[0]] # The solution is to transform the value in a simple atomic one acceptable by the column
|
|
||||||
dcols[tag][0][i] = value
|
dcols[tag][0][i] = value
|
||||||
|
|
||||||
# TODO else log error?
|
# TODO else log error?
|
||||||
@ -352,8 +470,8 @@ def run(config):
|
|||||||
# Fill value
|
# Fill value
|
||||||
dcols[tag][0][i] = value
|
dcols[tag][0][i] = value
|
||||||
|
|
||||||
except IndexError :
|
except (IndexError, OverflowError):
|
||||||
|
|
||||||
value_type = type(value)
|
value_type = type(value)
|
||||||
old_column = dcols[tag][0]
|
old_column = dcols[tag][0]
|
||||||
old_nb_elements_per_line = old_column.nb_elements_per_line
|
old_nb_elements_per_line = old_column.nb_elements_per_line
|
||||||
@ -400,18 +518,19 @@ def run(config):
|
|||||||
dcols[tag][0][i] = value
|
dcols[tag][0][i] = value
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("\nCould not import sequence id:", entry.id, "(error raised:", e, ")")
|
print("\nCould not import sequence:\n", repr(entry), "\nError raised:", e, "\n/!\ Check if '--input-na-string' option needs to be set")
|
||||||
if 'skiperror' in config['obi'] and not config['obi']['skiperror']:
|
if 'skiperror' in config['obi'] and not config['obi']['skiperror']:
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
i-=1 # overwrite problematic entry
|
||||||
|
|
||||||
i+=1
|
i+=1
|
||||||
|
|
||||||
if pb is not None:
|
if pb is not None:
|
||||||
pb(i, force=True)
|
pb(i, force=True)
|
||||||
print("", file=sys.stderr)
|
print("", file=sys.stderr)
|
||||||
logger("info", "Imported %d entries", i)
|
logger("info", "Imported %d entries", len(view))
|
||||||
|
|
||||||
# Save command config in View and DMS comments
|
# Save command config in View and DMS comments
|
||||||
command_line = " ".join(sys.argv[1:])
|
command_line = " ".join(sys.argv[1:])
|
||||||
|
@ -31,27 +31,11 @@ def run(config):
|
|||||||
input = open_uri(config['obi']['inputURI'])
|
input = open_uri(config['obi']['inputURI'])
|
||||||
if input is None:
|
if input is None:
|
||||||
raise Exception("Could not read input")
|
raise Exception("Could not read input")
|
||||||
if input[2] == DMS and not config['ls']['longformat']:
|
|
||||||
dms = input[0]
|
# Print representation
|
||||||
l = []
|
if config['ls']['longformat']:
|
||||||
for viewname in input[0]:
|
print(input[1].repr_longformat())
|
||||||
view = dms[viewname]
|
|
||||||
l.append(tostr(viewname) + "\t(Date created: " + str(bytes2str_object(view.comments["Date created"]))+")")
|
|
||||||
view.close()
|
|
||||||
l.sort()
|
|
||||||
for v in l:
|
|
||||||
print(v)
|
|
||||||
else:
|
else:
|
||||||
print(repr(input[1]))
|
print(repr(input[1]))
|
||||||
if input[2] == DMS:
|
|
||||||
taxolist = ["\n### Taxonomies:"]
|
|
||||||
for t in Taxonomy.list_taxos(input[0]):
|
|
||||||
taxolist.append("\t"+tostr(t))
|
|
||||||
if len(taxolist) > 1:
|
|
||||||
for t in taxolist:
|
|
||||||
print(t)
|
|
||||||
if config['ls']['longformat'] and len(input[1].comments) > 0:
|
|
||||||
print("\n### Comments:")
|
|
||||||
print(str(input[1].comments))
|
|
||||||
|
|
||||||
input[0].close(force=True)
|
input[0].close(force=True)
|
||||||
|
23
python/obitools3/commands/ngsfilter.pyx
Normal file → Executable file
23
python/obitools3/commands/ngsfilter.pyx
Normal file → Executable file
@ -24,11 +24,7 @@ from cpython.exc cimport PyErr_CheckSignals
|
|||||||
from io import BufferedWriter
|
from io import BufferedWriter
|
||||||
|
|
||||||
|
|
||||||
#REVERSE_SEQ_COLUMN_NAME = b"REVERSE_SEQUENCE" # used by alignpairedend tool
|
__title__="Assign sequence records to the corresponding experiment/sample based on DNA tags and primers"
|
||||||
#REVERSE_QUALITY_COLUMN_NAME = b"REVERSE_QUALITY" # used by alignpairedend tool
|
|
||||||
|
|
||||||
|
|
||||||
__title__="Assigns sequence records to the corresponding experiment/sample based on DNA tags and primers"
|
|
||||||
|
|
||||||
|
|
||||||
def addOptions(parser):
|
def addOptions(parser):
|
||||||
@ -268,6 +264,10 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
|||||||
|
|
||||||
not_aligned = len(sequences) > 1
|
not_aligned = len(sequences) > 1
|
||||||
sequences[0] = sequences[0].clone()
|
sequences[0] = sequences[0].clone()
|
||||||
|
|
||||||
|
if not_aligned:
|
||||||
|
sequences[0][b"R1_parent"] = sequences[0].id
|
||||||
|
sequences[0][b"R2_parent"] = sequences[1].id
|
||||||
|
|
||||||
if not_aligned:
|
if not_aligned:
|
||||||
sequences[1] = sequences[1].clone()
|
sequences[1] = sequences[1].clone()
|
||||||
@ -275,7 +275,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
|||||||
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
||||||
|
|
||||||
for seq in sequences:
|
for seq in sequences:
|
||||||
if hasattr(seq, "quality_array"):
|
if hasattr(seq, "quality_array") and seq.quality_array is not None:
|
||||||
q = -reduce(lambda x,y:x+y,(math.log10(z) for z in seq.quality_array),0)/len(seq.quality_array)*10
|
q = -reduce(lambda x,y:x+y,(math.log10(z) for z in seq.quality_array),0)/len(seq.quality_array)*10
|
||||||
seq[b'avg_quality']=q
|
seq[b'avg_quality']=q
|
||||||
q = -reduce(lambda x,y:x+y,(math.log10(z) for z in seq.quality_array[0:10]),0)
|
q = -reduce(lambda x,y:x+y,(math.log10(z) for z in seq.quality_array[0:10]),0)
|
||||||
@ -326,7 +326,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
|||||||
sequences[0] = sequences[0][directmatch[1][2]:]
|
sequences[0] = sequences[0][directmatch[1][2]:]
|
||||||
else:
|
else:
|
||||||
sequences[1] = sequences[1][directmatch[1][2]:]
|
sequences[1] = sequences[1][directmatch[1][2]:]
|
||||||
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
||||||
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
||||||
|
|
||||||
if directmatch[0].forward:
|
if directmatch[0].forward:
|
||||||
@ -373,7 +373,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
|||||||
sequences[0] = sequences[0][:r[1]]
|
sequences[0] = sequences[0][:r[1]]
|
||||||
else:
|
else:
|
||||||
sequences[1] = sequences[1][:r[1]]
|
sequences[1] = sequences[1][:r[1]]
|
||||||
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
||||||
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
||||||
# do the same on the other seq
|
# do the same on the other seq
|
||||||
if first_match_first_seq:
|
if first_match_first_seq:
|
||||||
@ -398,7 +398,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
|||||||
seq_to_match = sequences[0]
|
seq_to_match = sequences[0]
|
||||||
reversematch = []
|
reversematch = []
|
||||||
# Compute begin
|
# Compute begin
|
||||||
begin=directmatch[1][2]+1 # end of match + 1 on the same sequence
|
#begin=directmatch[1][2]+1 # end of match + 1 on the same sequence -- No, already cut out forward primer
|
||||||
# Try reverse matching on the other sequence:
|
# Try reverse matching on the other sequence:
|
||||||
new_seq = True
|
new_seq = True
|
||||||
pattern = 0
|
pattern = 0
|
||||||
@ -412,7 +412,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
|||||||
primer=p
|
primer=p
|
||||||
# Saving original primer as 4th member of the tuple to serve as correct key in infos dict even if it might have been reversed complemented
|
# Saving original primer as 4th member of the tuple to serve as correct key in infos dict even if it might have been reversed complemented
|
||||||
# (3rd member already used by directmatch)
|
# (3rd member already used by directmatch)
|
||||||
reversematch.append((primer, primer(seq_to_match, same_sequence=not new_seq, pattern=pattern, begin=begin), None, p))
|
reversematch.append((primer, primer(seq_to_match, same_sequence=not new_seq, pattern=pattern, begin=0), None, p))
|
||||||
new_seq = False
|
new_seq = False
|
||||||
pattern+=1
|
pattern+=1
|
||||||
# Choose match closer to the end of the sequence
|
# Choose match closer to the end of the sequence
|
||||||
@ -454,7 +454,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
|||||||
sequences[1] = sequences[1][reversematch[1][2]:]
|
sequences[1] = sequences[1][reversematch[1][2]:]
|
||||||
if not directmatch[0].forward:
|
if not directmatch[0].forward:
|
||||||
sequences[1] = sequences[1].reverse_complement
|
sequences[1] = sequences[1].reverse_complement
|
||||||
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
||||||
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
||||||
else:
|
else:
|
||||||
sequences[0] = sequences[0][reversematch[1][2]:]
|
sequences[0] = sequences[0][reversematch[1][2]:]
|
||||||
@ -649,6 +649,7 @@ def run(config):
|
|||||||
|
|
||||||
g = 0
|
g = 0
|
||||||
u = 0
|
u = 0
|
||||||
|
i = 0
|
||||||
no_tags = config['ngsfilter']['notags']
|
no_tags = config['ngsfilter']['notags']
|
||||||
try:
|
try:
|
||||||
for i in range(entries_len):
|
for i in range(entries_len):
|
||||||
|
87
python/obitools3/commands/rm.pyx
Normal file
87
python/obitools3/commands/rm.pyx
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
#cython: language_level=3
|
||||||
|
|
||||||
|
from obitools3.uri.decode import open_uri
|
||||||
|
from obitools3.apps.config import logger
|
||||||
|
from obitools3.dms import DMS
|
||||||
|
from obitools3.apps.optiongroups import addMinimalInputOption
|
||||||
|
from obitools3.dms.view.view cimport View
|
||||||
|
from obitools3.utils cimport tostr
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
__title__="Delete a view"
|
||||||
|
|
||||||
|
|
||||||
|
def addOptions(parser):
|
||||||
|
addMinimalInputOption(parser)
|
||||||
|
|
||||||
|
def run(config):
|
||||||
|
|
||||||
|
DMS.obi_atexit()
|
||||||
|
|
||||||
|
logger("info", "obi rm")
|
||||||
|
|
||||||
|
# Open the input
|
||||||
|
input = open_uri(config['obi']['inputURI'])
|
||||||
|
if input is None:
|
||||||
|
raise Exception("Could not read input")
|
||||||
|
|
||||||
|
# Check that it's a view
|
||||||
|
if isinstance(input[1], View) :
|
||||||
|
view = input[1]
|
||||||
|
else:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
dms = input[0]
|
||||||
|
|
||||||
|
# Get the path to the view file to remove
|
||||||
|
path = dms.full_path # dms path
|
||||||
|
view_path=path+b"/VIEWS/"
|
||||||
|
view_path+=view.name
|
||||||
|
view_path+=b".obiview"
|
||||||
|
|
||||||
|
to_remove = {}
|
||||||
|
# For each column:
|
||||||
|
for col_alias in view.keys():
|
||||||
|
col = view[col_alias]
|
||||||
|
col_name = col.original_name
|
||||||
|
col_version = col.version
|
||||||
|
col_type = col.data_type
|
||||||
|
col_ref = (col_name, col_version)
|
||||||
|
# build file name and AVL file names
|
||||||
|
col_file_name = f"{tostr(path)}/{tostr(col.original_name)}.obicol/{tostr(col.original_name)}@{col.version}.odc"
|
||||||
|
if col_type in [b'OBI_CHAR', b'OBI_QUAL', b'OBI_STR', b'OBI_SEQ']:
|
||||||
|
avl_file_name = f"{tostr(path)}/OBIBLOB_INDEXERS/{tostr(col.original_name)}_{col.version}_indexer"
|
||||||
|
else:
|
||||||
|
avl_file_name = None
|
||||||
|
to_remove[col_ref] = [col_file_name, avl_file_name]
|
||||||
|
|
||||||
|
# For each view:
|
||||||
|
do_not_remove = []
|
||||||
|
for vn in dms:
|
||||||
|
v = dms[vn]
|
||||||
|
# ignore the one being deleted
|
||||||
|
if v.name != view.name:
|
||||||
|
# check that none of the column is referenced, if referenced, remove from list to remove
|
||||||
|
cols = [(v[c].original_name, v[c].version) for c in v.keys()]
|
||||||
|
for col_ref in to_remove:
|
||||||
|
if col_ref in cols:
|
||||||
|
do_not_remove.append(col_ref)
|
||||||
|
|
||||||
|
for nr in do_not_remove:
|
||||||
|
to_remove.pop(nr)
|
||||||
|
|
||||||
|
# Close the view and the DMS
|
||||||
|
view.close()
|
||||||
|
input[0].close(force=True)
|
||||||
|
|
||||||
|
#print(to_remove)
|
||||||
|
|
||||||
|
# rm AFTER view and DMS close
|
||||||
|
os.remove(view_path)
|
||||||
|
for col in to_remove:
|
||||||
|
os.remove(to_remove[col][0])
|
||||||
|
if to_remove[col][1] is not None:
|
||||||
|
shutil.rmtree(to_remove[col][1])
|
||||||
|
|
||||||
|
|
@ -36,7 +36,7 @@ NULL_VALUE = {OBI_BOOL: OBIBool_NA,
|
|||||||
OBI_STR: b""}
|
OBI_STR: b""}
|
||||||
|
|
||||||
|
|
||||||
__title__="Sort view lines according to the value of a given attribute."
|
__title__="Sort view lines according to the value of a given attribute"
|
||||||
|
|
||||||
|
|
||||||
def addOptions(parser):
|
def addOptions(parser):
|
||||||
|
105
python/obitools3/commands/split.pyx
Normal file
105
python/obitools3/commands/split.pyx
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
#cython: language_level=3
|
||||||
|
|
||||||
|
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||||
|
from obitools3.dms import DMS
|
||||||
|
from obitools3.dms.view.view cimport View, Line_selection
|
||||||
|
from obitools3.uri.decode import open_uri
|
||||||
|
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
|
||||||
|
from obitools3.dms.view import RollbackException
|
||||||
|
from obitools3.apps.config import logger
|
||||||
|
from obitools3.utils cimport tobytes
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
|
|
||||||
|
__title__="Split"
|
||||||
|
|
||||||
|
|
||||||
|
def addOptions(parser):
|
||||||
|
|
||||||
|
addMinimalInputOption(parser)
|
||||||
|
addNoProgressBarOption(parser)
|
||||||
|
|
||||||
|
group=parser.add_argument_group("obi split specific options")
|
||||||
|
|
||||||
|
group.add_argument('-p','--prefix',
|
||||||
|
action="store", dest="split:prefix",
|
||||||
|
metavar="<PREFIX>",
|
||||||
|
help="Prefix added to each subview name (included undefined)")
|
||||||
|
|
||||||
|
group.add_argument('-t','--tag-name',
|
||||||
|
action="store", dest="split:tagname",
|
||||||
|
metavar="<TAG_NAME>",
|
||||||
|
help="Attribute/tag used to split the input")
|
||||||
|
|
||||||
|
group.add_argument('-u','--undefined',
|
||||||
|
action="store", dest="split:undefined",
|
||||||
|
default=b'UNDEFINED',
|
||||||
|
metavar="<VIEW_NAME>",
|
||||||
|
help="Name of the view where undefined sequenced are stored (will be PREFIX_VIEW_NAME)")
|
||||||
|
|
||||||
|
|
||||||
|
def run(config):
|
||||||
|
|
||||||
|
DMS.obi_atexit()
|
||||||
|
|
||||||
|
logger("info", "obi split")
|
||||||
|
|
||||||
|
# Open the input
|
||||||
|
input = open_uri(config["obi"]["inputURI"])
|
||||||
|
if input is None:
|
||||||
|
raise Exception("Could not read input view")
|
||||||
|
i_dms = input[0]
|
||||||
|
i_view = input[1]
|
||||||
|
|
||||||
|
# Initialize the progress bar
|
||||||
|
if config['obi']['noprogressbar'] == False:
|
||||||
|
pb = ProgressBar(len(i_view), config)
|
||||||
|
else:
|
||||||
|
pb = None
|
||||||
|
|
||||||
|
tag_to_split = config["split"]["tagname"]
|
||||||
|
undefined = tobytes(config["split"]["undefined"])
|
||||||
|
selections = {}
|
||||||
|
|
||||||
|
# Go through input view and split
|
||||||
|
for i in range(len(i_view)):
|
||||||
|
PyErr_CheckSignals()
|
||||||
|
if pb is not None:
|
||||||
|
pb(i)
|
||||||
|
line = i_view[i]
|
||||||
|
if tag_to_split not in line or line[tag_to_split] is None or len(line[tag_to_split])==0:
|
||||||
|
value = undefined
|
||||||
|
else:
|
||||||
|
value = line[tag_to_split]
|
||||||
|
if value not in selections:
|
||||||
|
selections[value] = Line_selection(i_view)
|
||||||
|
selections[value].append(i)
|
||||||
|
|
||||||
|
if pb is not None:
|
||||||
|
pb(len(i_view), force=True)
|
||||||
|
print("", file=sys.stderr)
|
||||||
|
|
||||||
|
# Create output views with the line selection
|
||||||
|
try:
|
||||||
|
for cat in selections:
|
||||||
|
o_view_name = config["split"]["prefix"].encode()+cat
|
||||||
|
o_view = selections[cat].materialize(o_view_name)
|
||||||
|
# Save command config in View and DMS comments
|
||||||
|
command_line = " ".join(sys.argv[1:])
|
||||||
|
input_dms_name=[input[0].name]
|
||||||
|
input_view_name=[input[1].name]
|
||||||
|
if 'taxoURI' in config['obi'] and config['obi']['taxoURI'] is not None:
|
||||||
|
input_dms_name.append(config['obi']['taxoURI'].split("/")[-3])
|
||||||
|
input_view_name.append("taxonomy/"+config['obi']['taxoURI'].split("/")[-1])
|
||||||
|
o_view.write_config(config, "split", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||||
|
o_view.close()
|
||||||
|
except Exception, e:
|
||||||
|
raise RollbackException("obi split error, rollbacking view: "+str(e), o_view)
|
||||||
|
|
||||||
|
i_dms.record_command_line(command_line)
|
||||||
|
i_dms.close(force=True)
|
||||||
|
|
||||||
|
logger("info", "Done.")
|
||||||
|
|
@ -16,7 +16,7 @@ import sys
|
|||||||
from cpython.exc cimport PyErr_CheckSignals
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
|
|
||||||
__title__="Compute basic statistics for attribute values."
|
__title__="Compute basic statistics for attribute values"
|
||||||
|
|
||||||
'''
|
'''
|
||||||
`obi stats` computes basic statistics for attribute values of sequence records.
|
`obi stats` computes basic statistics for attribute values of sequence records.
|
||||||
@ -119,9 +119,12 @@ def mean(values, options):
|
|||||||
|
|
||||||
def variance(v):
|
def variance(v):
|
||||||
if len(v)==1:
|
if len(v)==1:
|
||||||
return 0
|
return 0
|
||||||
s = reduce(lambda x,y:(x[0]+y,x[1]+y**2),v,(0.,0.))
|
s = reduce(lambda x,y:(x[0]+y,x[1]+y**2),v,(0.,0.))
|
||||||
return s[1]/(len(v)-1) - s[0]**2/len(v)/(len(v)-1)
|
var = round(s[1]/(len(v)-1) - s[0]**2/len(v)/(len(v)-1), 5) # round to go around shady python rounding stuff when var is actually 0
|
||||||
|
if var == -0.0: # then fix -0 to +0 if was rounded to -0
|
||||||
|
var = 0.0
|
||||||
|
return var
|
||||||
|
|
||||||
|
|
||||||
def varpop(values, options):
|
def varpop(values, options):
|
||||||
@ -154,7 +157,7 @@ def run(config):
|
|||||||
else :
|
else :
|
||||||
taxo = None
|
taxo = None
|
||||||
|
|
||||||
statistics = set(config['stats']['minimum']) | set(config['stats']['maximum']) | set(config['stats']['mean'])
|
statistics = set(config['stats']['minimum']) | set(config['stats']['maximum']) | set(config['stats']['mean']) | set(config['stats']['var']) | set(config['stats']['sd'])
|
||||||
total = 0
|
total = 0
|
||||||
catcount={}
|
catcount={}
|
||||||
totcount={}
|
totcount={}
|
||||||
@ -195,7 +198,7 @@ def run(config):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
totcount[category]=totcount.get(category,0)+1
|
totcount[category]=totcount.get(category,0)+1
|
||||||
for var in statistics:
|
for var in statistics:
|
||||||
if var in line:
|
if var in line and line[var] is not None:
|
||||||
v = line[var]
|
v = line[var]
|
||||||
if var not in values:
|
if var not in values:
|
||||||
values[var]={}
|
values[var]={}
|
||||||
@ -238,14 +241,34 @@ def run(config):
|
|||||||
else:
|
else:
|
||||||
sdvar= "%s"
|
sdvar= "%s"
|
||||||
|
|
||||||
hcat = "\t".join([pcat % x for x in config['stats']['categories']]) + "\t" +\
|
hcat = ""
|
||||||
"\t".join([minvar % x for x in config['stats']['minimum']]) + "\t" +\
|
|
||||||
"\t".join([maxvar % x for x in config['stats']['maximum']]) + "\t" +\
|
for x in config['stats']['categories']:
|
||||||
"\t".join([meanvar % x for x in config['stats']['mean']]) + "\t" +\
|
hcat += pcat % x
|
||||||
"\t".join([varvar % x for x in config['stats']['var']]) + "\t" +\
|
hcat += "\t"
|
||||||
"\t".join([sdvar % x for x in config['stats']['sd']]) + \
|
|
||||||
"\t count" + \
|
for x in config['stats']['minimum']:
|
||||||
"\t total"
|
hcat += minvar % x
|
||||||
|
hcat += "\t"
|
||||||
|
|
||||||
|
for x in config['stats']['maximum']:
|
||||||
|
hcat += maxvar % x
|
||||||
|
hcat += "\t"
|
||||||
|
|
||||||
|
for x in config['stats']['mean']:
|
||||||
|
hcat += meanvar % x
|
||||||
|
hcat += "\t"
|
||||||
|
|
||||||
|
for x in config['stats']['var']:
|
||||||
|
hcat += varvar % x
|
||||||
|
hcat += "\t"
|
||||||
|
|
||||||
|
for x in config['stats']['sd']:
|
||||||
|
hcat += sdvar % x
|
||||||
|
hcat += "\t"
|
||||||
|
|
||||||
|
hcat += "count\ttotal"
|
||||||
|
|
||||||
print(hcat)
|
print(hcat)
|
||||||
sorted_stats = sorted(catcount.items(), key = lambda kv:(totcount[kv[0]]), reverse=True)
|
sorted_stats = sorted(catcount.items(), key = lambda kv:(totcount[kv[0]]), reverse=True)
|
||||||
for i in range(len(sorted_stats)):
|
for i in range(len(sorted_stats)):
|
||||||
@ -265,8 +288,8 @@ def run(config):
|
|||||||
print((("%%%df" % lvarp[m]) % varp[m][c])+"\t", end="")
|
print((("%%%df" % lvarp[m]) % varp[m][c])+"\t", end="")
|
||||||
for m in config['stats']['sd']:
|
for m in config['stats']['sd']:
|
||||||
print((("%%%df" % lsigma[m]) % sigma[m][c])+"\t", end="")
|
print((("%%%df" % lsigma[m]) % sigma[m][c])+"\t", end="")
|
||||||
print("%7d" %catcount[c], end="")
|
print("%d" %catcount[c]+"\t", end="")
|
||||||
print("%9d" %totcount[c])
|
print("%d" %totcount[c]+"\t")
|
||||||
|
|
||||||
input[0].close(force=True)
|
input[0].close(force=True)
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ from cpython.exc cimport PyErr_CheckSignals
|
|||||||
from io import BufferedWriter
|
from io import BufferedWriter
|
||||||
|
|
||||||
|
|
||||||
__title__="Keep the N last lines of a view."
|
__title__="Keep the N last lines of a view"
|
||||||
|
|
||||||
|
|
||||||
def addOptions(parser):
|
def addOptions(parser):
|
||||||
|
230
python/obitools3/commands/taxonomy.pyx
Normal file
230
python/obitools3/commands/taxonomy.pyx
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
#cython: language_level=3
|
||||||
|
|
||||||
|
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
|
||||||
|
from obitools3.dms import DMS
|
||||||
|
from obitools3.dms.view.view cimport View, Line_selection
|
||||||
|
from obitools3.uri.decode import open_uri
|
||||||
|
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
|
||||||
|
from obitools3.dms.view import RollbackException
|
||||||
|
from obitools3.dms.column.column cimport Column
|
||||||
|
from functools import reduce
|
||||||
|
from obitools3.apps.config import logger
|
||||||
|
from obitools3.utils cimport tobytes, str2bytes, tostr
|
||||||
|
from io import BufferedWriter
|
||||||
|
from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
|
||||||
|
ID_COLUMN, \
|
||||||
|
DEFINITION_COLUMN, \
|
||||||
|
QUALITY_COLUMN, \
|
||||||
|
COUNT_COLUMN, \
|
||||||
|
TAXID_COLUMN
|
||||||
|
from obitools3.dms.capi.obitypes cimport OBI_INT
|
||||||
|
from obitools3.dms.capi.obitaxonomy cimport MIN_LOCAL_TAXID
|
||||||
|
import time
|
||||||
|
import math
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from cpython.exc cimport PyErr_CheckSignals
|
||||||
|
|
||||||
|
|
||||||
|
__title__="Add taxa with a new generated taxid to an NCBI taxonomy database"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def addOptions(parser):
|
||||||
|
|
||||||
|
addMinimalInputOption(parser)
|
||||||
|
addTaxonomyOption(parser)
|
||||||
|
addMinimalOutputOption(parser)
|
||||||
|
addNoProgressBarOption(parser)
|
||||||
|
|
||||||
|
group=parser.add_argument_group('obi taxonomy specific options')
|
||||||
|
|
||||||
|
group.add_argument('-n', '--taxon-name-tag',
|
||||||
|
action="store",
|
||||||
|
dest="taxonomy:taxon_name_tag",
|
||||||
|
metavar="<SCIENTIFIC_NAME_TAG>",
|
||||||
|
default=b"SCIENTIFIC_NAME",
|
||||||
|
help="Name of the tag giving the scientific name of the taxon "
|
||||||
|
"(default: 'SCIENTIFIC_NAME').")
|
||||||
|
|
||||||
|
# group.add_argument('-g', '--try-genus-match',
|
||||||
|
# action="store_true", dest="taxonomy:try_genus_match",
|
||||||
|
# default=False,
|
||||||
|
# help="Try matching the first word of <SCIENTIFIC_NAME_TAG> when can't find corresponding taxid for a taxon. "
|
||||||
|
# "If there is a match it is added in the 'parent_taxid' tag. (Can be used by 'obi taxonomy' to add the taxon under that taxid).")
|
||||||
|
|
||||||
|
group.add_argument('-a', '--restricting-ancestor',
|
||||||
|
action="store",
|
||||||
|
dest="taxonomy:restricting_ancestor",
|
||||||
|
metavar="<RESTRICTING_ANCESTOR>",
|
||||||
|
default=None,
|
||||||
|
help="Enables to restrict the addition of taxids under an ancestor specified by its taxid.")
|
||||||
|
|
||||||
|
group.add_argument('-t', '--taxid-tag',
|
||||||
|
action="store",
|
||||||
|
dest="taxonomy:taxid_tag",
|
||||||
|
metavar="<TAXID_TAG>",
|
||||||
|
default=b"TAXID",
|
||||||
|
help="Name of the tag to store the new taxid "
|
||||||
|
"(default: 'TAXID').")
|
||||||
|
|
||||||
|
group.add_argument('-l', '--log-file',
|
||||||
|
action="store",
|
||||||
|
dest="taxonomy:log_file",
|
||||||
|
metavar="<LOG_FILE>",
|
||||||
|
default='',
|
||||||
|
help="Path to a log file to write informations about added taxids.")
|
||||||
|
|
||||||
|
|
||||||
|
def run(config):
|
||||||
|
|
||||||
|
DMS.obi_atexit()
|
||||||
|
|
||||||
|
logger("info", "obi taxonomy")
|
||||||
|
|
||||||
|
# Open the input
|
||||||
|
input = open_uri(config['obi']['inputURI'])
|
||||||
|
if input is None:
|
||||||
|
raise Exception("Could not read input view")
|
||||||
|
i_dms = input[0]
|
||||||
|
i_view = input[1]
|
||||||
|
i_view_name = input[1].name
|
||||||
|
|
||||||
|
# Open the output: only the DMS, as the output view is going to be created by cloning the input view
|
||||||
|
# (could eventually be done via an open_uri() argument)
|
||||||
|
output = open_uri(config['obi']['outputURI'],
|
||||||
|
input=False,
|
||||||
|
dms_only=True)
|
||||||
|
if output is None:
|
||||||
|
raise Exception("Could not create output view")
|
||||||
|
o_dms = output[0]
|
||||||
|
output_0 = output[0]
|
||||||
|
o_view_name = output[1]
|
||||||
|
|
||||||
|
# stdout output: create temporary view
|
||||||
|
if type(output_0)==BufferedWriter:
|
||||||
|
o_dms = i_dms
|
||||||
|
i=0
|
||||||
|
o_view_name = b"temp"
|
||||||
|
while o_view_name in i_dms: # Making sure view name is unique in output DMS
|
||||||
|
o_view_name = o_view_name+b"_"+str2bytes(str(i))
|
||||||
|
i+=1
|
||||||
|
imported_view_name = o_view_name
|
||||||
|
|
||||||
|
# If the input and output DMS are not the same, import the input view in the output DMS before cloning it to modify it
|
||||||
|
# (could be the other way around: clone and modify in the input DMS then import the new view in the output DMS)
|
||||||
|
if i_dms != o_dms:
|
||||||
|
imported_view_name = i_view_name
|
||||||
|
i=0
|
||||||
|
while imported_view_name in o_dms: # Making sure view name is unique in output DMS
|
||||||
|
imported_view_name = i_view_name+b"_"+str2bytes(str(i))
|
||||||
|
i+=1
|
||||||
|
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], i_view_name, imported_view_name)
|
||||||
|
i_view = o_dms[imported_view_name]
|
||||||
|
|
||||||
|
# Clone output view from input view
|
||||||
|
o_view = i_view.clone(o_view_name)
|
||||||
|
if o_view is None:
|
||||||
|
raise Exception("Couldn't create output view")
|
||||||
|
i_view.close()
|
||||||
|
|
||||||
|
# Open taxonomy
|
||||||
|
taxo_uri = open_uri(config['obi']['taxoURI'])
|
||||||
|
if taxo_uri is None or taxo_uri[2] == bytes:
|
||||||
|
raise Exception("Couldn't open taxonomy")
|
||||||
|
taxo = taxo_uri[1]
|
||||||
|
|
||||||
|
# Initialize the progress bar
|
||||||
|
if config['obi']['noprogressbar'] == False:
|
||||||
|
pb = ProgressBar(len(o_view), config)
|
||||||
|
else:
|
||||||
|
pb = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
if config['taxonomy']['log_file']:
|
||||||
|
logfile = open(config['taxonomy']['log_file'], 'w')
|
||||||
|
else:
|
||||||
|
logfile = sys.stdout
|
||||||
|
if 'restricting_ancestor' in config['taxonomy']:
|
||||||
|
res_anc = int(config['taxonomy']['restricting_ancestor'])
|
||||||
|
else:
|
||||||
|
res_anc = None
|
||||||
|
taxid_column_name = config['taxonomy']['taxid_tag']
|
||||||
|
parent_taxid_column_name = "PARENT_TAXID" # TODO macro
|
||||||
|
taxon_name_column_name = config['taxonomy']['taxon_name_tag']
|
||||||
|
taxid_column = Column.new_column(o_view, taxid_column_name, OBI_INT)
|
||||||
|
if parent_taxid_column_name in o_view:
|
||||||
|
parent_taxid_column = o_view[parent_taxid_column_name]
|
||||||
|
else:
|
||||||
|
parent_taxid_column = None
|
||||||
|
#parent_taxid_column = Column.new_column(o_view, parent_taxid_column_name, OBI_INT)
|
||||||
|
taxon_name_column = o_view[taxon_name_column_name]
|
||||||
|
|
||||||
|
for i in range(len(o_view)):
|
||||||
|
PyErr_CheckSignals()
|
||||||
|
#if pb is not None:
|
||||||
|
# pb(i)
|
||||||
|
taxon_name = taxon_name_column[i]
|
||||||
|
taxon = taxo.get_taxon_by_name(taxon_name, res_anc)
|
||||||
|
if taxon is not None:
|
||||||
|
taxid_column[i] = taxon.taxid
|
||||||
|
if logfile:
|
||||||
|
print(f"Found taxon '{tostr(taxon_name)}' already existing with taxid {taxid_column[i]}", file=logfile)
|
||||||
|
else: # try finding genus or other parent taxon from the first word
|
||||||
|
#print(i, o_view[i].id)
|
||||||
|
if parent_taxid_column is not None and parent_taxid_column[i] is not None:
|
||||||
|
taxid_column[i] = taxo.add_taxon(taxon_name, 'species', parent_taxid_column[i])
|
||||||
|
if logfile:
|
||||||
|
print(f"Adding taxon '{tostr(taxon_name)}' under provided parent {parent_taxid_column[i]} with taxid {taxid_column[i]}", file=logfile)
|
||||||
|
else:
|
||||||
|
taxon_name_sp = taxon_name.split(b" ")
|
||||||
|
taxon = taxo.get_taxon_by_name(taxon_name_sp[0], res_anc)
|
||||||
|
if taxon is not None:
|
||||||
|
parent_taxid_column[i] = taxon.taxid
|
||||||
|
taxid_column[i] = taxo.add_taxon(taxon_name, 'species', taxon.taxid)
|
||||||
|
if logfile:
|
||||||
|
print(f"Adding taxon '{tostr(taxon_name)}' under '{tostr(taxon.name)}' ({taxon.taxid}) with taxid {taxid_column[i]}", file=logfile)
|
||||||
|
else:
|
||||||
|
taxid_column[i] = taxo.add_taxon(taxon_name, 'species', res_anc)
|
||||||
|
if logfile:
|
||||||
|
print(f"Adding taxon '{tostr(taxon_name)}' under provided restricting ancestor {res_anc} with taxid {taxid_column[i]}", file=logfile)
|
||||||
|
|
||||||
|
taxo.write(taxo.name, update=True)
|
||||||
|
|
||||||
|
except Exception, e:
|
||||||
|
raise RollbackException("obi taxonomy error, rollbacking view: "+str(e), o_view)
|
||||||
|
|
||||||
|
#if pb is not None:
|
||||||
|
# pb(i, force=True)
|
||||||
|
# print("", file=sys.stderr)
|
||||||
|
|
||||||
|
#logger("info", "\nTaxa already in the taxonomy: "+str(found_count)+"/"+str(len(o_view))+" ("+str(round(found_count*100.0/len(o_view), 2))+"%)")
|
||||||
|
#logger("info", "\nParent taxids found: "+str(parent_found_count)+"/"+str(len(o_view))+" ("+str(round(parent_found_count*100.0/len(o_view), 2))+"%)")
|
||||||
|
#logger("info", "\nTaxids not found: "+str(not_found_count)+"/"+str(len(o_view))+" ("+str(round(not_found_count*100.0/len(o_view), 2))+"%)")
|
||||||
|
|
||||||
|
# Save command config in View and DMS comments
|
||||||
|
command_line = " ".join(sys.argv[1:])
|
||||||
|
input_dms_name=[input[0].name]
|
||||||
|
input_view_name=[i_view_name]
|
||||||
|
if 'taxoURI' in config['obi'] and config['obi']['taxoURI'] is not None:
|
||||||
|
input_dms_name.append(config['obi']['taxoURI'].split("/")[-3])
|
||||||
|
input_view_name.append("taxonomy/"+config['obi']['taxoURI'].split("/")[-1])
|
||||||
|
o_view.write_config(config, "taxonomy", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
|
||||||
|
o_dms.record_command_line(command_line)
|
||||||
|
|
||||||
|
#print("\n\nOutput view:\n````````````", file=sys.stderr)
|
||||||
|
#print(repr(o_view), file=sys.stderr)
|
||||||
|
|
||||||
|
# stdout output: write to buffer
|
||||||
|
if type(output_0)==BufferedWriter:
|
||||||
|
logger("info", "Printing to output...")
|
||||||
|
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
|
||||||
|
o_view.close()
|
||||||
|
|
||||||
|
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
|
||||||
|
if i_dms != o_dms or type(output_0)==BufferedWriter:
|
||||||
|
View.delete_view(o_dms, imported_view_name)
|
||||||
|
o_dms.close(force=True)
|
||||||
|
i_dms.close(force=True)
|
||||||
|
|
||||||
|
logger("info", "Done.")
|
@ -39,7 +39,7 @@ COL_COMMENTS_MAX_LEN = 2048
|
|||||||
MAX_INT = 2147483647 # used to generate random float values
|
MAX_INT = 2147483647 # used to generate random float values
|
||||||
|
|
||||||
|
|
||||||
__title__="Tests if the obitools are working properly"
|
__title__="Test if the obitools are working properly"
|
||||||
|
|
||||||
|
|
||||||
default_config = {
|
default_config = {
|
||||||
@ -301,8 +301,11 @@ def fill_column(config, infos, col) :
|
|||||||
def create_random_column(config, infos) :
|
def create_random_column(config, infos) :
|
||||||
alias = random.choice([b'', random_unique_name(infos)])
|
alias = random.choice([b'', random_unique_name(infos)])
|
||||||
tuples = random.choice([True, False])
|
tuples = random.choice([True, False])
|
||||||
|
dict_column = False
|
||||||
if not tuples :
|
if not tuples :
|
||||||
nb_elements_per_line=random.randint(1, config['test']['maxelts'])
|
nb_elements_per_line=random.randint(1, config['test']['maxelts'])
|
||||||
|
if nb_elements_per_line > 1:
|
||||||
|
dict_column = True
|
||||||
elements_names = []
|
elements_names = []
|
||||||
for i in range(nb_elements_per_line) :
|
for i in range(nb_elements_per_line) :
|
||||||
elements_names.append(random_unique_element_name(config, infos))
|
elements_names.append(random_unique_element_name(config, infos))
|
||||||
@ -318,6 +321,7 @@ def create_random_column(config, infos) :
|
|||||||
data_type,
|
data_type,
|
||||||
nb_elements_per_line=nb_elements_per_line,
|
nb_elements_per_line=nb_elements_per_line,
|
||||||
elements_names=elements_names,
|
elements_names=elements_names,
|
||||||
|
dict_column=dict_column,
|
||||||
tuples=tuples,
|
tuples=tuples,
|
||||||
comments=random_comments(config),
|
comments=random_comments(config),
|
||||||
alias=alias
|
alias=alias
|
||||||
@ -442,7 +446,7 @@ def addOptions(parser):
|
|||||||
default=20,
|
default=20,
|
||||||
type=int,
|
type=int,
|
||||||
help="Maximum length of tuples. "
|
help="Maximum length of tuples. "
|
||||||
"Default: 50")
|
"Default: 20")
|
||||||
|
|
||||||
group.add_argument('--max_ini_col_count','-o',
|
group.add_argument('--max_ini_col_count','-o',
|
||||||
action="store", dest="test:maxinicolcount",
|
action="store", dest="test:maxinicolcount",
|
||||||
@ -455,7 +459,7 @@ def addOptions(parser):
|
|||||||
group.add_argument('--max_line_nb','-l',
|
group.add_argument('--max_line_nb','-l',
|
||||||
action="store", dest="test:maxlinenb",
|
action="store", dest="test:maxlinenb",
|
||||||
metavar='<MAX_LINE_NB>',
|
metavar='<MAX_LINE_NB>',
|
||||||
default=10000,
|
default=1000,
|
||||||
type=int,
|
type=int,
|
||||||
help="Maximum number of lines in a column. "
|
help="Maximum number of lines in a column. "
|
||||||
"Default: 1000")
|
"Default: 1000")
|
||||||
|
@ -354,8 +354,8 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, di
|
|||||||
key = mergedKeys[k]
|
key = mergedKeys[k]
|
||||||
merged_col_name = mergedKeys_m[k]
|
merged_col_name = mergedKeys_m[k]
|
||||||
|
|
||||||
if merged_infos[merged_col_name]['nb_elts'] == 1:
|
# if merged_infos[merged_col_name]['nb_elts'] == 1:
|
||||||
raise Exception("Can't merge information from a tag with only one element (e.g. one sample ; don't use -m option)")
|
# raise Exception("Can't merge information from a tag with only one element (e.g. one sample ; don't use -m option)")
|
||||||
|
|
||||||
if merged_col_name in view:
|
if merged_col_name in view:
|
||||||
i_col = view[merged_col_name]
|
i_col = view[merged_col_name]
|
||||||
@ -378,6 +378,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, di
|
|||||||
OBI_INT,
|
OBI_INT,
|
||||||
nb_elements_per_line=merged_infos[merged_col_name]['nb_elts'],
|
nb_elements_per_line=merged_infos[merged_col_name]['nb_elts'],
|
||||||
elements_names=list(merged_infos[merged_col_name]['elt_names']),
|
elements_names=list(merged_infos[merged_col_name]['elt_names']),
|
||||||
|
dict_column=True,
|
||||||
comments=i_col.comments,
|
comments=i_col.comments,
|
||||||
alias=merged_col_name
|
alias=merged_col_name
|
||||||
)
|
)
|
||||||
@ -400,6 +401,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, di
|
|||||||
OBI_INT,
|
OBI_INT,
|
||||||
nb_elements_per_line=len(view),
|
nb_elements_per_line=len(view),
|
||||||
elements_names=[id for id in i_id_col],
|
elements_names=[id for id in i_id_col],
|
||||||
|
dict_column=True,
|
||||||
alias=TAXID_DIST_COLUMN
|
alias=TAXID_DIST_COLUMN
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ cdef extern from "obidms.h" nogil:
|
|||||||
int obi_close_dms(OBIDMS_p dms, bint force)
|
int obi_close_dms(OBIDMS_p dms, bint force)
|
||||||
char* obi_dms_get_dms_path(OBIDMS_p dms)
|
char* obi_dms_get_dms_path(OBIDMS_p dms)
|
||||||
char* obi_dms_get_full_path(OBIDMS_p dms, const_char_p path_name)
|
char* obi_dms_get_full_path(OBIDMS_p dms, const_char_p path_name)
|
||||||
|
char* obi_dms_formatted_infos(OBIDMS_p dms, bint detailed)
|
||||||
void obi_close_atexit()
|
void obi_close_atexit()
|
||||||
|
|
||||||
obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number)
|
obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number)
|
||||||
|
@ -31,6 +31,7 @@ cdef extern from "obidmscolumn.h" nogil:
|
|||||||
const_char_p elements_names
|
const_char_p elements_names
|
||||||
OBIType_t returned_data_type
|
OBIType_t returned_data_type
|
||||||
OBIType_t stored_data_type
|
OBIType_t stored_data_type
|
||||||
|
bint dict_column
|
||||||
bint tuples
|
bint tuples
|
||||||
bint to_eval
|
bint to_eval
|
||||||
time_t creation_date
|
time_t creation_date
|
||||||
@ -63,10 +64,11 @@ cdef extern from "obidmscolumn.h" nogil:
|
|||||||
|
|
||||||
char* obi_get_elements_names(OBIDMS_column_p column)
|
char* obi_get_elements_names(OBIDMS_column_p column)
|
||||||
|
|
||||||
char* obi_column_formatted_infos(OBIDMS_column_p column)
|
|
||||||
|
|
||||||
index_t obi_column_get_element_index_from_name(OBIDMS_column_p column, const char* element_name)
|
index_t obi_column_get_element_index_from_name(OBIDMS_column_p column, const char* element_name)
|
||||||
|
|
||||||
int obi_column_write_comments(OBIDMS_column_p column, const char* comments)
|
int obi_column_write_comments(OBIDMS_column_p column, const char* comments)
|
||||||
|
|
||||||
int obi_column_add_comment(OBIDMS_column_p column, const char* key, const char* value)
|
int obi_column_add_comment(OBIDMS_column_p column, const char* key, const char* value)
|
||||||
|
|
||||||
|
char* obi_column_formatted_infos(OBIDMS_column_p column, bint detailed)
|
||||||
|
|
@ -8,6 +8,7 @@ cdef extern from "obi_ecopcr.h" nogil:
|
|||||||
|
|
||||||
int obi_ecopcr(const char* input_dms_name,
|
int obi_ecopcr(const char* input_dms_name,
|
||||||
const char* i_view_name,
|
const char* i_view_name,
|
||||||
|
const char* tax_dms_name,
|
||||||
const char* taxonomy_name,
|
const char* taxonomy_name,
|
||||||
const char* output_dms_name,
|
const char* output_dms_name,
|
||||||
const char* o_view_name,
|
const char* o_view_name,
|
||||||
|
@ -11,4 +11,5 @@ cdef extern from "obi_ecotag.h" nogil:
|
|||||||
const char* taxonomy_name,
|
const char* taxonomy_name,
|
||||||
const char* output_view_name,
|
const char* output_view_name,
|
||||||
const char* output_view_comments,
|
const char* output_view_comments,
|
||||||
double ecotag_threshold)
|
double ecotag_threshold,
|
||||||
|
double bubble_threshold)
|
||||||
|
@ -7,6 +7,8 @@ from libc.stdint cimport int32_t
|
|||||||
|
|
||||||
cdef extern from "obidms_taxonomy.h" nogil:
|
cdef extern from "obidms_taxonomy.h" nogil:
|
||||||
|
|
||||||
|
extern int MIN_LOCAL_TAXID
|
||||||
|
|
||||||
struct ecotxnode :
|
struct ecotxnode :
|
||||||
int32_t taxid
|
int32_t taxid
|
||||||
int32_t rank
|
int32_t rank
|
||||||
@ -18,6 +20,13 @@ cdef extern from "obidms_taxonomy.h" nogil:
|
|||||||
ctypedef ecotxnode ecotx_t
|
ctypedef ecotxnode ecotx_t
|
||||||
|
|
||||||
|
|
||||||
|
struct econame_t : # can't get this struct to be accepted by Cython ('unknown size')
|
||||||
|
char* name
|
||||||
|
char* class_name
|
||||||
|
int32_t is_scientific_name
|
||||||
|
ecotxnode* taxon
|
||||||
|
|
||||||
|
|
||||||
struct ecotxidx_t :
|
struct ecotxidx_t :
|
||||||
int32_t count
|
int32_t count
|
||||||
int32_t max_taxid
|
int32_t max_taxid
|
||||||
@ -30,9 +39,14 @@ cdef extern from "obidms_taxonomy.h" nogil:
|
|||||||
char** label
|
char** label
|
||||||
|
|
||||||
|
|
||||||
|
struct econameidx_t :
|
||||||
|
int32_t count
|
||||||
|
econame_t* names
|
||||||
|
|
||||||
|
|
||||||
struct OBIDMS_taxonomy_t :
|
struct OBIDMS_taxonomy_t :
|
||||||
ecorankidx_t* ranks
|
ecorankidx_t* ranks
|
||||||
# econameidx_t* names
|
econameidx_t* names
|
||||||
ecotxidx_t* taxa
|
ecotxidx_t* taxa
|
||||||
|
|
||||||
ctypedef OBIDMS_taxonomy_t* OBIDMS_taxonomy_p
|
ctypedef OBIDMS_taxonomy_t* OBIDMS_taxonomy_p
|
||||||
@ -44,14 +58,18 @@ cdef extern from "obidms_taxonomy.h" nogil:
|
|||||||
|
|
||||||
OBIDMS_taxonomy_p obi_read_taxdump(const_char_p taxdump)
|
OBIDMS_taxonomy_p obi_read_taxdump(const_char_p taxdump)
|
||||||
|
|
||||||
int obi_write_taxonomy(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const_char_p tax_name)
|
int obi_write_taxonomy(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const_char_p tax_name, bint update)
|
||||||
|
|
||||||
int obi_close_taxonomy(OBIDMS_taxonomy_p taxonomy)
|
int obi_close_taxonomy(OBIDMS_taxonomy_p taxonomy)
|
||||||
|
|
||||||
ecotx_t* obi_taxo_get_parent_at_rank(ecotx_t* taxon, int32_t rankidx)
|
ecotx_t* obi_taxo_get_parent_at_rank(ecotx_t* taxon, int32_t rankidx)
|
||||||
|
|
||||||
ecotx_t* obi_taxo_get_taxon_with_taxid(OBIDMS_taxonomy_p taxonomy, int32_t taxid)
|
ecotx_t* obi_taxo_get_taxon_with_taxid(OBIDMS_taxonomy_p taxonomy, int32_t taxid)
|
||||||
|
|
||||||
|
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
|
||||||
|
|
||||||
|
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
|
||||||
|
|
||||||
bint obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid)
|
bint obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid)
|
||||||
|
|
||||||
ecotx_t* obi_taxo_get_species(ecotx_t* taxon, OBIDMS_taxonomy_p taxonomy)
|
ecotx_t* obi_taxo_get_species(ecotx_t* taxon, OBIDMS_taxonomy_p taxonomy)
|
||||||
@ -71,4 +89,4 @@ cdef extern from "obidms_taxonomy.h" nogil:
|
|||||||
int obi_taxo_add_preferred_name_with_taxon(OBIDMS_taxonomy_p tax, ecotx_t* taxon, const char* preferred_name)
|
int obi_taxo_add_preferred_name_with_taxon(OBIDMS_taxonomy_p tax, ecotx_t* taxon, const char* preferred_name)
|
||||||
|
|
||||||
const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks)
|
const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks)
|
||||||
|
|
||||||
|
@ -53,6 +53,8 @@ cdef extern from "obitypes.h" nogil:
|
|||||||
extern const_char_p OBIQual_char_NA
|
extern const_char_p OBIQual_char_NA
|
||||||
extern uint8_t* OBIQual_int_NA
|
extern uint8_t* OBIQual_int_NA
|
||||||
extern void* OBITuple_NA
|
extern void* OBITuple_NA
|
||||||
|
|
||||||
|
extern obiint_t OBI_INT_MAX
|
||||||
|
|
||||||
const_char_p name_data_type(int data_type)
|
const_char_p name_data_type(int data_type)
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ cdef extern from "obiview.h" nogil:
|
|||||||
extern const_char_p REVERSE_QUALITY_COLUMN
|
extern const_char_p REVERSE_QUALITY_COLUMN
|
||||||
extern const_char_p REVERSE_SEQUENCE_COLUMN
|
extern const_char_p REVERSE_SEQUENCE_COLUMN
|
||||||
extern const_char_p COUNT_COLUMN
|
extern const_char_p COUNT_COLUMN
|
||||||
|
extern const_char_p SCIENTIFIC_NAME_COLUMN
|
||||||
extern const_char_p TAXID_COLUMN
|
extern const_char_p TAXID_COLUMN
|
||||||
extern const_char_p MERGED_TAXID_COLUMN
|
extern const_char_p MERGED_TAXID_COLUMN
|
||||||
extern const_char_p MERGED_PREFIX
|
extern const_char_p MERGED_PREFIX
|
||||||
@ -94,6 +95,7 @@ cdef extern from "obiview.h" nogil:
|
|||||||
index_t nb_elements_per_line,
|
index_t nb_elements_per_line,
|
||||||
char* elements_names,
|
char* elements_names,
|
||||||
bint elt_names_formatted,
|
bint elt_names_formatted,
|
||||||
|
bint dict_column,
|
||||||
bint tuples,
|
bint tuples,
|
||||||
bint to_eval,
|
bint to_eval,
|
||||||
const_char_p indexer_name,
|
const_char_p indexer_name,
|
||||||
@ -103,13 +105,17 @@ cdef extern from "obiview.h" nogil:
|
|||||||
bint create)
|
bint create)
|
||||||
|
|
||||||
int obi_view_delete_column(Obiview_p view, const_char_p column_name, bint delete_file)
|
int obi_view_delete_column(Obiview_p view, const_char_p column_name, bint delete_file)
|
||||||
|
|
||||||
OBIDMS_column_p obi_view_get_column(Obiview_p view, const_char_p column_name)
|
OBIDMS_column_p obi_view_get_column(Obiview_p view, const_char_p column_name)
|
||||||
|
|
||||||
OBIDMS_column_p* obi_view_get_pointer_on_column_in_view(Obiview_p view, const_char_p column_name)
|
OBIDMS_column_p* obi_view_get_pointer_on_column_in_view(Obiview_p view, const_char_p column_name)
|
||||||
|
|
||||||
int obi_view_create_column_alias(Obiview_p view, const_char_p current_name, const_char_p alias)
|
int obi_view_create_column_alias(Obiview_p view, const_char_p current_name, const_char_p alias)
|
||||||
|
|
||||||
|
char* obi_view_formatted_infos(Obiview_p view, bint detailed)
|
||||||
|
|
||||||
|
char* obi_view_formatted_infos_one_line(Obiview_p view)
|
||||||
|
|
||||||
int obi_view_write_comments(Obiview_p view, const_char_p comments)
|
int obi_view_write_comments(Obiview_p view, const_char_p comments)
|
||||||
|
|
||||||
int obi_view_add_comment(Obiview_p view, const_char_p key, const_char_p value)
|
int obi_view_add_comment(Obiview_p view, const_char_p key, const_char_p value)
|
||||||
|
@ -7,7 +7,8 @@ __OBIDMS_COLUMN_CLASS__ = {}
|
|||||||
from ..capi.obitypes cimport name_data_type, \
|
from ..capi.obitypes cimport name_data_type, \
|
||||||
obitype_t, \
|
obitype_t, \
|
||||||
obiversion_t, \
|
obiversion_t, \
|
||||||
OBI_QUAL
|
OBI_QUAL, \
|
||||||
|
OBI_STR
|
||||||
|
|
||||||
from ..capi.obidms cimport obi_import_column
|
from ..capi.obidms cimport obi_import_column
|
||||||
|
|
||||||
@ -90,6 +91,7 @@ cdef class Column(OBIWrapper) :
|
|||||||
obitype_t data_type,
|
obitype_t data_type,
|
||||||
index_t nb_elements_per_line=1,
|
index_t nb_elements_per_line=1,
|
||||||
list elements_names=None,
|
list elements_names=None,
|
||||||
|
bint dict_column=False,
|
||||||
bint tuples=False,
|
bint tuples=False,
|
||||||
bint to_eval=False,
|
bint to_eval=False,
|
||||||
object associated_column_name=b"",
|
object associated_column_name=b"",
|
||||||
@ -127,6 +129,10 @@ cdef class Column(OBIWrapper) :
|
|||||||
else:
|
else:
|
||||||
elements_names_p = NULL
|
elements_names_p = NULL
|
||||||
|
|
||||||
|
if column_name_b == b"SAMPLE" or column_name_b == b"sample":
|
||||||
|
# force str type
|
||||||
|
data_type = OBI_STR
|
||||||
|
|
||||||
if data_type == OBI_QUAL:
|
if data_type == OBI_QUAL:
|
||||||
if associated_column_name_b == b"":
|
if associated_column_name_b == b"":
|
||||||
if column_name == QUALITY_COLUMN:
|
if column_name == QUALITY_COLUMN:
|
||||||
@ -152,6 +158,7 @@ cdef class Column(OBIWrapper) :
|
|||||||
nb_elements_per_line = nb_elements_per_line,
|
nb_elements_per_line = nb_elements_per_line,
|
||||||
elements_names = elements_names_p,
|
elements_names = elements_names_p,
|
||||||
elt_names_formatted = False,
|
elt_names_formatted = False,
|
||||||
|
dict_column = dict_column,
|
||||||
tuples = tuples,
|
tuples = tuples,
|
||||||
to_eval = to_eval,
|
to_eval = to_eval,
|
||||||
indexer_name = NULL,
|
indexer_name = NULL,
|
||||||
@ -200,7 +207,7 @@ cdef class Column(OBIWrapper) :
|
|||||||
|
|
||||||
column_p = column_pp[0]
|
column_p = column_pp[0]
|
||||||
column_type = column_p.header.returned_data_type
|
column_type = column_p.header.returned_data_type
|
||||||
column_class = Column.get_column_class(column_type, (column_p.header.nb_elements_per_line > 1), column_p.header.tuples)
|
column_class = Column.get_column_class(column_type, (column_p.header.nb_elements_per_line > 1 or column_p.header.dict_column == True), column_p.header.tuples)
|
||||||
column = OBIWrapper.new_wrapper(column_class, column_pp)
|
column = OBIWrapper.new_wrapper(column_class, column_pp)
|
||||||
|
|
||||||
column._view = view
|
column._view = view
|
||||||
@ -236,6 +243,7 @@ cdef class Column(OBIWrapper) :
|
|||||||
nb_elements_per_line = -1,
|
nb_elements_per_line = -1,
|
||||||
elements_names = NULL,
|
elements_names = NULL,
|
||||||
elt_names_formatted = False,
|
elt_names_formatted = False,
|
||||||
|
dict_column = False,
|
||||||
tuples = False,
|
tuples = False,
|
||||||
to_eval = False,
|
to_eval = False,
|
||||||
indexer_name = NULL,
|
indexer_name = NULL,
|
||||||
@ -302,15 +310,24 @@ cdef class Column(OBIWrapper) :
|
|||||||
|
|
||||||
@OBIWrapper.checkIsActive
|
@OBIWrapper.checkIsActive
|
||||||
def __repr__(self) :
|
def __repr__(self) :
|
||||||
cdef bytes s
|
cdef str s
|
||||||
#cdef char* s_b
|
cdef char* sc
|
||||||
#cdef str s_str
|
cdef OBIDMS_column_p pointer = self.pointer()
|
||||||
#s_b = obi_column_formatted_infos(self.pointer())
|
sc = obi_column_formatted_infos(pointer, False)
|
||||||
#s_str = bytes2str(s_b)
|
s = bytes2str(sc)
|
||||||
#free(s_b)
|
free(sc)
|
||||||
s = self._alias + b", data type: " + self.data_type
|
return s
|
||||||
#return s_str
|
|
||||||
return bytes2str(s)
|
|
||||||
|
@OBIWrapper.checkIsActive
|
||||||
|
def repr_longformat(self) :
|
||||||
|
cdef str s
|
||||||
|
cdef char* sc
|
||||||
|
cdef OBIDMS_column_p pointer = self.pointer()
|
||||||
|
sc = obi_column_formatted_infos(pointer, True)
|
||||||
|
s = bytes2str(sc)
|
||||||
|
free(sc)
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
def close(self): # TODO discuss, can't be called bc then bug when closing view that tries to close it in C
|
def close(self): # TODO discuss, can't be called bc then bug when closing view that tries to close it in C
|
||||||
@ -365,6 +382,13 @@ cdef class Column(OBIWrapper) :
|
|||||||
raise OBIDeactivatedInstanceError()
|
raise OBIDeactivatedInstanceError()
|
||||||
return self.pointer().header.nb_elements_per_line
|
return self.pointer().header.nb_elements_per_line
|
||||||
|
|
||||||
|
# dict_column property getter
|
||||||
|
@property
|
||||||
|
def dict_column(self):
|
||||||
|
if not self.active() :
|
||||||
|
raise OBIDeactivatedInstanceError()
|
||||||
|
return self.pointer().header.dict_column
|
||||||
|
|
||||||
# data_type property getter
|
# data_type property getter
|
||||||
@property
|
@property
|
||||||
def data_type(self):
|
def data_type(self):
|
||||||
|
@ -38,11 +38,13 @@ cdef class Column_bool(Column):
|
|||||||
object column_name,
|
object column_name,
|
||||||
index_t nb_elements_per_line=1,
|
index_t nb_elements_per_line=1,
|
||||||
object elements_names=None,
|
object elements_names=None,
|
||||||
|
bint dict_column=False,
|
||||||
bint tuples=False,
|
bint tuples=False,
|
||||||
object comments={}):
|
object comments={}):
|
||||||
return Column.new_column(view, column_name, OBI_BOOL,
|
return Column.new_column(view, column_name, OBI_BOOL,
|
||||||
nb_elements_per_line=nb_elements_per_line,
|
nb_elements_per_line=nb_elements_per_line,
|
||||||
elements_names=elements_names,
|
elements_names=elements_names,
|
||||||
|
dict_column=dict_column,
|
||||||
tuples=tuples,
|
tuples=tuples,
|
||||||
comments=comments)
|
comments=comments)
|
||||||
|
|
||||||
|
@ -36,12 +36,14 @@ cdef class Column_char(Column):
|
|||||||
object column_name,
|
object column_name,
|
||||||
index_t nb_elements_per_line=1,
|
index_t nb_elements_per_line=1,
|
||||||
object elements_names=None,
|
object elements_names=None,
|
||||||
|
bint dict_column=False,
|
||||||
bint tuples=False,
|
bint tuples=False,
|
||||||
object comments={}):
|
object comments={}):
|
||||||
|
|
||||||
return Column.new_column(view, column_name, OBI_CHAR,
|
return Column.new_column(view, column_name, OBI_CHAR,
|
||||||
nb_elements_per_line=nb_elements_per_line,
|
nb_elements_per_line=nb_elements_per_line,
|
||||||
elements_names=elements_names,
|
elements_names=elements_names,
|
||||||
|
dict_column=dict_column,
|
||||||
tuples=tuples,
|
tuples=tuples,
|
||||||
comments=comments)
|
comments=comments)
|
||||||
|
|
||||||
|
@ -36,12 +36,14 @@ cdef class Column_float(Column):
|
|||||||
object column_name,
|
object column_name,
|
||||||
index_t nb_elements_per_line=1,
|
index_t nb_elements_per_line=1,
|
||||||
object elements_names=None,
|
object elements_names=None,
|
||||||
|
bint dict_column=False,
|
||||||
bint tuples=False,
|
bint tuples=False,
|
||||||
object comments={}):
|
object comments={}):
|
||||||
|
|
||||||
return Column.new_column(view, column_name, OBI_FLOAT,
|
return Column.new_column(view, column_name, OBI_FLOAT,
|
||||||
nb_elements_per_line=nb_elements_per_line,
|
nb_elements_per_line=nb_elements_per_line,
|
||||||
elements_names=elements_names,
|
elements_names=elements_names,
|
||||||
|
dict_column=dict_column,
|
||||||
tuples=tuples,
|
tuples=tuples,
|
||||||
comments=comments)
|
comments=comments)
|
||||||
|
|
||||||
|
@ -38,12 +38,14 @@ cdef class Column_int(Column):
|
|||||||
object column_name,
|
object column_name,
|
||||||
index_t nb_elements_per_line=1,
|
index_t nb_elements_per_line=1,
|
||||||
object elements_names=None,
|
object elements_names=None,
|
||||||
|
bint dict_column=False,
|
||||||
bint tuples=False,
|
bint tuples=False,
|
||||||
object comments={}):
|
object comments={}):
|
||||||
|
|
||||||
return Column.new_column(view, column_name, OBI_INT,
|
return Column.new_column(view, column_name, OBI_INT,
|
||||||
nb_elements_per_line=nb_elements_per_line,
|
nb_elements_per_line=nb_elements_per_line,
|
||||||
elements_names=elements_names,
|
elements_names=elements_names,
|
||||||
|
dict_column=dict_column,
|
||||||
tuples=tuples,
|
tuples=tuples,
|
||||||
comments=comments)
|
comments=comments)
|
||||||
|
|
||||||
|
@ -38,6 +38,7 @@ cdef class Column_qual(Column_idx):
|
|||||||
object column_name,
|
object column_name,
|
||||||
index_t nb_elements_per_line=1,
|
index_t nb_elements_per_line=1,
|
||||||
object elements_names=None,
|
object elements_names=None,
|
||||||
|
bint dict_column=False,
|
||||||
object associated_column_name=b"",
|
object associated_column_name=b"",
|
||||||
int associated_column_version=-1,
|
int associated_column_version=-1,
|
||||||
object comments={}):
|
object comments={}):
|
||||||
@ -45,6 +46,7 @@ cdef class Column_qual(Column_idx):
|
|||||||
return Column.new_column(view, column_name, OBI_QUAL,
|
return Column.new_column(view, column_name, OBI_QUAL,
|
||||||
nb_elements_per_line=nb_elements_per_line,
|
nb_elements_per_line=nb_elements_per_line,
|
||||||
elements_names=elements_names,
|
elements_names=elements_names,
|
||||||
|
dict_column=dict_column,
|
||||||
tuples=False,
|
tuples=False,
|
||||||
associated_column_name=associated_column_name,
|
associated_column_name=associated_column_name,
|
||||||
associated_column_version=associated_column_name,
|
associated_column_version=associated_column_name,
|
||||||
|
@ -39,12 +39,14 @@ cdef class Column_seq(Column_idx):
|
|||||||
object column_name,
|
object column_name,
|
||||||
index_t nb_elements_per_line=1,
|
index_t nb_elements_per_line=1,
|
||||||
object elements_names=None,
|
object elements_names=None,
|
||||||
|
bint dict_column=False,
|
||||||
bint tuples=False,
|
bint tuples=False,
|
||||||
object comments={}):
|
object comments={}):
|
||||||
|
|
||||||
return Column.new_column(view, column_name, OBI_SEQ,
|
return Column.new_column(view, column_name, OBI_SEQ,
|
||||||
nb_elements_per_line=nb_elements_per_line,
|
nb_elements_per_line=nb_elements_per_line,
|
||||||
elements_names=elements_names,
|
elements_names=elements_names,
|
||||||
|
dict_column=dict_column,
|
||||||
tuples=tuples,
|
tuples=tuples,
|
||||||
comments=comments)
|
comments=comments)
|
||||||
|
|
||||||
|
@ -38,12 +38,14 @@ cdef class Column_str(Column_idx):
|
|||||||
object column_name,
|
object column_name,
|
||||||
index_t nb_elements_per_line=1,
|
index_t nb_elements_per_line=1,
|
||||||
object elements_names=None,
|
object elements_names=None,
|
||||||
|
bint dict_column=False,
|
||||||
bint tuples=False,
|
bint tuples=False,
|
||||||
object comments={}):
|
object comments={}):
|
||||||
|
|
||||||
return Column.new_column(view, column_name, OBI_STR,
|
return Column.new_column(view, column_name, OBI_STR,
|
||||||
nb_elements_per_line=nb_elements_per_line,
|
nb_elements_per_line=nb_elements_per_line,
|
||||||
elements_names=elements_names,
|
elements_names=elements_names,
|
||||||
|
dict_column=dict_column,
|
||||||
tuples=tuples,
|
tuples=tuples,
|
||||||
comments=comments)
|
comments=comments)
|
||||||
|
|
||||||
@ -72,6 +74,9 @@ cdef class Column_str(Column_idx):
|
|||||||
if value is None :
|
if value is None :
|
||||||
value_b = <char*>OBIStr_NA
|
value_b = <char*>OBIStr_NA
|
||||||
else :
|
else :
|
||||||
|
if self.name == b'sample' or self.name == b'SAMPLE':
|
||||||
|
if type(value) == int:
|
||||||
|
value = str(value) # force sample ids to be str
|
||||||
value_bytes = tobytes(value)
|
value_bytes = tobytes(value)
|
||||||
value_b = <char*>value_bytes
|
value_b = <char*>value_bytes
|
||||||
|
|
||||||
@ -135,6 +140,9 @@ cdef class Column_multi_elts_str(Column_multi_elts_idx):
|
|||||||
if value is None :
|
if value is None :
|
||||||
value_b = <char*>OBIStr_NA
|
value_b = <char*>OBIStr_NA
|
||||||
else :
|
else :
|
||||||
|
if self.name == b'sample' or self.name == b'SAMPLE':
|
||||||
|
if type(value) == int:
|
||||||
|
value = str(value) # force sample ids to be str
|
||||||
value_bytes = tobytes(value)
|
value_bytes = tobytes(value)
|
||||||
value_b = <char*>value_bytes
|
value_b = <char*>value_bytes
|
||||||
|
|
||||||
@ -204,6 +212,9 @@ cdef class Column_tuples_str(Column_idx):
|
|||||||
i = 0
|
i = 0
|
||||||
for elt in value :
|
for elt in value :
|
||||||
if elt is not None and elt != '':
|
if elt is not None and elt != '':
|
||||||
|
if self.name == b'sample' or self.name == b'SAMPLE':
|
||||||
|
if type(elt) == int:
|
||||||
|
elt = str(elt) # force sample ids to be str
|
||||||
elt_b = tobytes(elt)
|
elt_b = tobytes(elt)
|
||||||
strcpy(array+i, <char*>elt_b)
|
strcpy(array+i, <char*>elt_b)
|
||||||
i = i + len(elt_b) + 1
|
i = i + len(elt_b) + 1
|
||||||
|
@ -10,7 +10,8 @@ from .capi.obidms cimport obi_open_dms, \
|
|||||||
obi_dms_exists, \
|
obi_dms_exists, \
|
||||||
obi_dms_get_full_path, \
|
obi_dms_get_full_path, \
|
||||||
obi_close_atexit, \
|
obi_close_atexit, \
|
||||||
obi_dms_write_comments
|
obi_dms_write_comments, \
|
||||||
|
obi_dms_formatted_infos
|
||||||
|
|
||||||
from .capi.obitypes cimport const_char_p
|
from .capi.obitypes cimport const_char_p
|
||||||
|
|
||||||
@ -32,6 +33,8 @@ from .object import OBIWrapper
|
|||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from libc.stdlib cimport free
|
||||||
|
|
||||||
|
|
||||||
cdef class DMS(OBIWrapper):
|
cdef class DMS(OBIWrapper):
|
||||||
|
|
||||||
@ -223,13 +226,24 @@ cdef class DMS(OBIWrapper):
|
|||||||
|
|
||||||
|
|
||||||
@OBIWrapper.checkIsActive
|
@OBIWrapper.checkIsActive
|
||||||
def __repr__(self):
|
def __repr__(self) :
|
||||||
cdef str s
|
cdef str s
|
||||||
s=""
|
cdef char* sc
|
||||||
for view_name in self.keys():
|
cdef OBIDMS_p pointer = self.pointer()
|
||||||
view = self.get_view(view_name)
|
sc = obi_dms_formatted_infos(pointer, False)
|
||||||
s = s + repr(view) + "\n"
|
s = bytes2str(sc)
|
||||||
view.close()
|
free(sc)
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
@OBIWrapper.checkIsActive
|
||||||
|
def repr_longformat(self) :
|
||||||
|
cdef str s
|
||||||
|
cdef char* sc
|
||||||
|
cdef OBIDMS_p pointer = self.pointer()
|
||||||
|
sc = obi_dms_formatted_infos(pointer, True)
|
||||||
|
s = bytes2str(sc)
|
||||||
|
free(sc)
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
@ -11,13 +11,16 @@ cdef class Taxonomy(OBIWrapper) :
|
|||||||
cdef bytes _name
|
cdef bytes _name
|
||||||
cdef DMS _dms
|
cdef DMS _dms
|
||||||
cdef list _ranks
|
cdef list _ranks
|
||||||
|
cdef dict _name_dict
|
||||||
|
|
||||||
cdef inline OBIDMS_taxonomy_p pointer(self)
|
cdef inline OBIDMS_taxonomy_p pointer(self)
|
||||||
|
cdef fill_name_dict(self)
|
||||||
|
|
||||||
cpdef Taxon get_taxon_by_idx(self, int idx)
|
cpdef Taxon get_taxon_by_idx(self, int idx)
|
||||||
cpdef Taxon get_taxon_by_taxid(self, int taxid)
|
cpdef Taxon get_taxon_by_taxid(self, int taxid)
|
||||||
cpdef write(self, object prefix)
|
cpdef Taxon get_taxon_by_name(self, object taxon_name, object restricting_taxid=*)
|
||||||
cpdef int add_taxon(self, str name, str rank_name, int parent_taxid, int min_taxid=*)
|
cpdef write(self, object prefix, bint update=*)
|
||||||
|
cpdef int add_taxon(self, object name, object rank_name, int parent_taxid, int min_taxid=*)
|
||||||
cpdef object get_species(self, int taxid)
|
cpdef object get_species(self, int taxid)
|
||||||
cpdef object get_genus(self, int taxid)
|
cpdef object get_genus(self, int taxid)
|
||||||
cpdef object get_family(self, int taxid)
|
cpdef object get_family(self, int taxid)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#cython: language_level=3
|
#cython: language_level=3
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
from obitools3.utils cimport str2bytes, bytes2str, tobytes, tostr
|
from obitools3.utils cimport str2bytes, bytes2str, tobytes, tostr
|
||||||
from ..capi.obidms cimport OBIDMS_p, obi_dms_get_full_path
|
from ..capi.obidms cimport OBIDMS_p, obi_dms_get_full_path
|
||||||
|
|
||||||
@ -15,7 +17,11 @@ from ..capi.obitaxonomy cimport obi_taxonomy_exists, \
|
|||||||
obi_taxo_get_species, \
|
obi_taxo_get_species, \
|
||||||
obi_taxo_get_genus, \
|
obi_taxo_get_genus, \
|
||||||
obi_taxo_get_family, \
|
obi_taxo_get_family, \
|
||||||
ecotx_t
|
ecotx_t, \
|
||||||
|
econame_t, \
|
||||||
|
obi_taxo_get_name_from_name_idx, \
|
||||||
|
obi_taxo_get_taxon_from_name_idx
|
||||||
|
|
||||||
|
|
||||||
from cpython.pycapsule cimport PyCapsule_New, PyCapsule_GetPointer
|
from cpython.pycapsule cimport PyCapsule_New, PyCapsule_GetPointer
|
||||||
import tarfile
|
import tarfile
|
||||||
@ -24,11 +30,29 @@ from libc.stdlib cimport free
|
|||||||
|
|
||||||
|
|
||||||
cdef class Taxonomy(OBIWrapper) :
|
cdef class Taxonomy(OBIWrapper) :
|
||||||
# TODO function to import taxonomy?
|
# TODO function to import taxonomy?
|
||||||
|
|
||||||
cdef inline OBIDMS_taxonomy_p pointer(self) :
|
cdef inline OBIDMS_taxonomy_p pointer(self) :
|
||||||
return <OBIDMS_taxonomy_p>(self._pointer)
|
return <OBIDMS_taxonomy_p>(self._pointer)
|
||||||
|
|
||||||
|
cdef fill_name_dict(self):
|
||||||
|
print("Indexing taxon names...", file=sys.stderr)
|
||||||
|
|
||||||
|
cdef OBIDMS_taxonomy_p pointer = self.pointer()
|
||||||
|
cdef ecotx_t* taxon_p
|
||||||
|
cdef object taxon_capsule
|
||||||
|
cdef bytes name
|
||||||
|
cdef int count
|
||||||
|
cdef int n
|
||||||
|
|
||||||
|
count = (<OBIDMS_taxonomy_p>pointer).names.count
|
||||||
|
|
||||||
|
for n in range(count) :
|
||||||
|
name = obi_taxo_get_name_from_name_idx(pointer, n)
|
||||||
|
taxon_p = obi_taxo_get_taxon_from_name_idx(pointer, n)
|
||||||
|
taxon_capsule = PyCapsule_New(taxon_p, NULL, NULL)
|
||||||
|
self._name_dict[name] = Taxon(taxon_capsule, self)
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def exists(DMS dms, object name) :
|
def exists(DMS dms, object name) :
|
||||||
@ -69,13 +93,16 @@ cdef class Taxonomy(OBIWrapper) :
|
|||||||
raise RuntimeError("Error : Cannot read taxonomy %s"
|
raise RuntimeError("Error : Cannot read taxonomy %s"
|
||||||
% tostr(name))
|
% tostr(name))
|
||||||
|
|
||||||
|
print("Taxonomy read", file=sys.stderr)
|
||||||
|
|
||||||
taxo = OBIWrapper.new_wrapper(Taxonomy, pointer)
|
taxo = OBIWrapper.new_wrapper(Taxonomy, pointer)
|
||||||
|
|
||||||
dms.register(taxo)
|
dms.register(taxo)
|
||||||
|
|
||||||
taxo._dms = dms
|
taxo._dms = dms
|
||||||
taxo._name = tobytes(name)
|
taxo._name = tobytes(name)
|
||||||
|
taxo._name_dict = {}
|
||||||
|
taxo.fill_name_dict()
|
||||||
taxo._ranks = []
|
taxo._ranks = []
|
||||||
for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) :
|
for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) :
|
||||||
taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks))
|
taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks))
|
||||||
@ -118,19 +145,22 @@ cdef class Taxonomy(OBIWrapper) :
|
|||||||
|
|
||||||
taxo._dms = dms
|
taxo._dms = dms
|
||||||
taxo._name = folder_path
|
taxo._name = folder_path
|
||||||
|
taxo._name_dict = {}
|
||||||
|
taxo.fill_name_dict()
|
||||||
taxo._ranks = []
|
taxo._ranks = []
|
||||||
for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) :
|
for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) :
|
||||||
taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks))
|
taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks))
|
||||||
|
|
||||||
|
print('Read %d taxa' % len(taxo), file=sys.stderr)
|
||||||
|
|
||||||
return taxo
|
return taxo
|
||||||
|
|
||||||
|
|
||||||
def __getitem__(self, object ref):
|
def __getitem__(self, object ref):
|
||||||
if type(ref) == int :
|
if type(ref) == int :
|
||||||
return self.get_taxon_by_taxid(ref)
|
return self.get_taxon_by_taxid(ref)
|
||||||
else :
|
elif type(ref) == str or type(ref) == bytes :
|
||||||
raise NotImplementedError()
|
return self.get_taxon_by_name(ref)
|
||||||
|
|
||||||
|
|
||||||
cpdef Taxon get_taxon_by_taxid(self, int taxid):
|
cpdef Taxon get_taxon_by_taxid(self, int taxid):
|
||||||
@ -143,6 +173,20 @@ cdef class Taxonomy(OBIWrapper) :
|
|||||||
return Taxon(taxon_capsule, self)
|
return Taxon(taxon_capsule, self)
|
||||||
|
|
||||||
|
|
||||||
|
cpdef Taxon get_taxon_by_name(self, object taxon_name, object restricting_taxid=None):
|
||||||
|
#print(taxon_name)
|
||||||
|
taxon = self._name_dict.get(tobytes(taxon_name), None)
|
||||||
|
if not taxon:
|
||||||
|
return None
|
||||||
|
elif restricting_taxid:
|
||||||
|
if self.is_ancestor(restricting_taxid, taxon.taxid):
|
||||||
|
return taxon
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return taxon
|
||||||
|
|
||||||
|
|
||||||
cpdef Taxon get_taxon_by_idx(self, int idx):
|
cpdef Taxon get_taxon_by_idx(self, int idx):
|
||||||
cdef ecotx_t* taxa
|
cdef ecotx_t* taxa
|
||||||
cdef ecotx_t* taxon_p
|
cdef ecotx_t* taxon_p
|
||||||
@ -232,19 +276,19 @@ cdef class Taxonomy(OBIWrapper) :
|
|||||||
|
|
||||||
taxa = self.pointer().taxa.taxon
|
taxa = self.pointer().taxa.taxon
|
||||||
|
|
||||||
# Yield each taxid
|
# Yield each taxon
|
||||||
for t in range(self.pointer().taxa.count):
|
for t in range(self.pointer().taxa.count):
|
||||||
taxon_p = <ecotx_t*> (taxa+t)
|
taxon_p = <ecotx_t*> (taxa+t)
|
||||||
taxon_capsule = PyCapsule_New(taxon_p, NULL, NULL)
|
taxon_capsule = PyCapsule_New(taxon_p, NULL, NULL)
|
||||||
yield Taxon(taxon_capsule, self)
|
yield Taxon(taxon_capsule, self)
|
||||||
|
|
||||||
|
|
||||||
cpdef write(self, object prefix) :
|
cpdef write(self, object prefix, bint update=False) :
|
||||||
if obi_write_taxonomy(self._dms.pointer(), self.pointer(), tobytes(prefix)) < 0 :
|
if obi_write_taxonomy(self._dms.pointer(), self.pointer(), tobytes(prefix), update) < 0 :
|
||||||
raise Exception("Error writing the taxonomy to binary files")
|
raise Exception("Error writing the taxonomy to binary files")
|
||||||
|
|
||||||
|
|
||||||
cpdef int add_taxon(self, str name, str rank_name, int parent_taxid, int min_taxid=10000000) :
|
cpdef int add_taxon(self, object name, object rank_name, int parent_taxid, int min_taxid=10000000) :
|
||||||
cdef int taxid
|
cdef int taxid
|
||||||
taxid = obi_taxo_add_local_taxon(self.pointer(), tobytes(name), tobytes(rank_name), parent_taxid, min_taxid)
|
taxid = obi_taxo_add_local_taxon(self.pointer(), tobytes(name), tobytes(rank_name), parent_taxid, min_taxid)
|
||||||
if taxid < 0 :
|
if taxid < 0 :
|
||||||
@ -267,6 +311,11 @@ cdef class Taxonomy(OBIWrapper) :
|
|||||||
def name(self):
|
def name(self):
|
||||||
return self._name
|
return self._name
|
||||||
|
|
||||||
|
# ranks property getter
|
||||||
|
@property
|
||||||
|
def ranks(self):
|
||||||
|
return self._ranks
|
||||||
|
|
||||||
|
|
||||||
def parental_tree_iterator(self, int taxid):
|
def parental_tree_iterator(self, int taxid):
|
||||||
"""
|
"""
|
||||||
@ -281,6 +330,7 @@ cdef class Taxonomy(OBIWrapper) :
|
|||||||
if taxon is not None:
|
if taxon is not None:
|
||||||
while taxon.taxid != 1:
|
while taxon.taxid != 1:
|
||||||
yield taxon
|
yield taxon
|
||||||
|
#print(taxon.taxid)
|
||||||
taxon = taxon.parent
|
taxon = taxon.parent
|
||||||
yield taxon
|
yield taxon
|
||||||
else:
|
else:
|
||||||
|
@ -19,7 +19,9 @@ from ..capi.obiview cimport Alias_column_pair_p, \
|
|||||||
obi_view_delete_column, \
|
obi_view_delete_column, \
|
||||||
obi_view_create_column_alias, \
|
obi_view_create_column_alias, \
|
||||||
obi_view_write_comments, \
|
obi_view_write_comments, \
|
||||||
obi_delete_view
|
obi_delete_view, \
|
||||||
|
obi_view_formatted_infos, \
|
||||||
|
obi_view_formatted_infos_one_line
|
||||||
|
|
||||||
from ..capi.obidmscolumn cimport OBIDMS_column_p
|
from ..capi.obidmscolumn cimport OBIDMS_column_p
|
||||||
from ..capi.obidms cimport OBIDMS_p
|
from ..capi.obidms cimport OBIDMS_p
|
||||||
@ -59,6 +61,8 @@ import pkgutil
|
|||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from libc.stdlib cimport free
|
||||||
|
|
||||||
|
|
||||||
cdef class View(OBIWrapper) :
|
cdef class View(OBIWrapper) :
|
||||||
|
|
||||||
@ -73,7 +77,7 @@ cdef class View(OBIWrapper) :
|
|||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def import_view(object dms_1, object dms_2, object view_name_1, object view_name_2):
|
def import_view(object dms_1, object dms_2, object view_name_1, object view_name_2): # TODO argument to import line by line to avoid huge AVL copy
|
||||||
if obi_import_view(tobytes(dms_1), tobytes(dms_2), tobytes(view_name_1), tobytes(view_name_2)) < 0 :
|
if obi_import_view(tobytes(dms_1), tobytes(dms_2), tobytes(view_name_1), tobytes(view_name_2)) < 0 :
|
||||||
raise Exception("Error importing a view")
|
raise Exception("Error importing a view")
|
||||||
|
|
||||||
@ -186,15 +190,22 @@ cdef class View(OBIWrapper) :
|
|||||||
@OBIWrapper.checkIsActive
|
@OBIWrapper.checkIsActive
|
||||||
def __repr__(self) :
|
def __repr__(self) :
|
||||||
cdef str s
|
cdef str s
|
||||||
if self.read_only: # can read date
|
cdef char* sc
|
||||||
s = "#View name:\n{name:s}\n#Date created:\n{date:s}\n#Line count:\n{line_count:d}\n#Columns:\n".format(name = bytes2str(self.name),
|
cdef Obiview_p pointer = self.pointer()
|
||||||
line_count = self.line_count,
|
sc = obi_view_formatted_infos(pointer, False)
|
||||||
date = str(bytes2str_object(self.comments["Date created"])))
|
s = bytes2str(sc)
|
||||||
else:
|
free(sc)
|
||||||
s = "#View name:\n{name:s}\n#Line count:\n{line_count:d}\n#Columns:\n".format(name = bytes2str(self.name),
|
return s
|
||||||
line_count = self.line_count)
|
|
||||||
for column_name in self.keys() :
|
|
||||||
s = s + repr(self[column_name]) + '\n'
|
@OBIWrapper.checkIsActive
|
||||||
|
def repr_longformat(self) :
|
||||||
|
cdef str s
|
||||||
|
cdef char* sc
|
||||||
|
cdef Obiview_p pointer = self.pointer()
|
||||||
|
sc = obi_view_formatted_infos(pointer, True)
|
||||||
|
s = bytes2str(sc)
|
||||||
|
free(sc)
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
@ -332,9 +343,9 @@ cdef class View(OBIWrapper) :
|
|||||||
|
|
||||||
new_column = Column.new_column(self, old_column.pointer().header.name, new_data_type,
|
new_column = Column.new_column(self, old_column.pointer().header.name, new_data_type,
|
||||||
nb_elements_per_line=new_nb_elements_per_line, elements_names=new_elements_names,
|
nb_elements_per_line=new_nb_elements_per_line, elements_names=new_elements_names,
|
||||||
comments=old_column.comments, alias=column_name_b+tobytes('___new___'))
|
dict_column=(new_nb_elements_per_line>1), comments=old_column.comments, alias=column_name_b+tobytes('___new___'))
|
||||||
|
|
||||||
switch_to_dict = old_column.nb_elements_per_line == 1 and new_nb_elements_per_line > 1
|
switch_to_dict = not old_column.dict_column and new_nb_elements_per_line > 1
|
||||||
ori_key = old_column._elements_names[0]
|
ori_key = old_column._elements_names[0]
|
||||||
|
|
||||||
for i in range(length) :
|
for i in range(length) :
|
||||||
@ -393,6 +404,7 @@ cdef class View(OBIWrapper) :
|
|||||||
col.data_type_int,
|
col.data_type_int,
|
||||||
nb_elements_per_line = col.nb_elements_per_line,
|
nb_elements_per_line = col.nb_elements_per_line,
|
||||||
elements_names = col._elements_names,
|
elements_names = col._elements_names,
|
||||||
|
dict_column = col.dict_column,
|
||||||
tuples = col.tuples,
|
tuples = col.tuples,
|
||||||
to_eval = col.to_eval,
|
to_eval = col.to_eval,
|
||||||
comments = col.comments,
|
comments = col.comments,
|
||||||
@ -588,7 +600,8 @@ cdef class View(OBIWrapper) :
|
|||||||
if element is not None:
|
if element is not None:
|
||||||
if element.comments[b"input_dms_name"] is not None :
|
if element.comments[b"input_dms_name"] is not None :
|
||||||
for i in range(len(element.comments[b"input_dms_name"])) :
|
for i in range(len(element.comments[b"input_dms_name"])) :
|
||||||
if element.comments[b"input_dms_name"][i] == element.dms.name and b"/" not in element.comments[b"input_view_name"][i]: # Same DMS and not a special element like a taxonomy
|
if b"/" not in element.comments[b"input_view_name"][i] and element.comments[b"input_view_name"][i] in element.dms \
|
||||||
|
and element.comments[b"input_dms_name"][i] == element.dms.name : # Same DMS and not a special element like a taxonomy and view was not deleted
|
||||||
top_level.append(element.dms[element.comments[b"input_view_name"][i]])
|
top_level.append(element.dms[element.comments[b"input_view_name"][i]])
|
||||||
else:
|
else:
|
||||||
top_level.append(None)
|
top_level.append(None)
|
||||||
@ -786,7 +799,8 @@ cdef class Line :
|
|||||||
|
|
||||||
|
|
||||||
def keys(self):
|
def keys(self):
|
||||||
return self._view.keys()
|
cdef bytes key
|
||||||
|
return [key for key in self._view.keys()]
|
||||||
|
|
||||||
|
|
||||||
def __contains__(self, object column_name):
|
def __contains__(self, object column_name):
|
||||||
@ -794,7 +808,7 @@ cdef class Line :
|
|||||||
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return bytes2str(self).repr_bytes()
|
return bytes2str(self.repr_bytes())
|
||||||
|
|
||||||
|
|
||||||
cpdef repr_bytes(self):
|
cpdef repr_bytes(self):
|
||||||
|
@ -7,11 +7,12 @@ from obitools3.utils cimport bytes2str
|
|||||||
|
|
||||||
cdef class FastaFormat:
|
cdef class FastaFormat:
|
||||||
|
|
||||||
def __init__(self, list tags=[], bint printNAKeys=False, bytes NAString=b"NA"):
|
def __init__(self, list tags=[], bint printNAKeys=False, bytes NAString=b"NA", bint NAIntTo0=False):
|
||||||
self.headerFormatter = HeaderFormat("fasta",
|
self.headerFormatter = HeaderFormat("fasta",
|
||||||
tags=tags,
|
tags=tags,
|
||||||
printNAKeys=printNAKeys,
|
printNAKeys=printNAKeys,
|
||||||
NAString=NAString)
|
NAString=NAString,
|
||||||
|
NAIntTo0=NAIntTo0)
|
||||||
|
|
||||||
@cython.boundscheck(False)
|
@cython.boundscheck(False)
|
||||||
def __call__(self, object data):
|
def __call__(self, object data):
|
||||||
|
@ -8,11 +8,12 @@ from obitools3.utils cimport bytes2str, str2bytes, tobytes
|
|||||||
# TODO quality offset option?
|
# TODO quality offset option?
|
||||||
cdef class FastqFormat:
|
cdef class FastqFormat:
|
||||||
|
|
||||||
def __init__(self, list tags=[], bint printNAKeys=False, bytes NAString=b"NA"):
|
def __init__(self, list tags=[], bint printNAKeys=False, bytes NAString=b"NA", bint NAIntTo0=False):
|
||||||
self.headerFormatter = HeaderFormat("fastq",
|
self.headerFormatter = HeaderFormat("fastq",
|
||||||
tags=tags,
|
tags=tags,
|
||||||
printNAKeys=printNAKeys,
|
printNAKeys=printNAKeys,
|
||||||
NAString=NAString)
|
NAString=NAString,
|
||||||
|
NAIntTo0=NAIntTo0)
|
||||||
|
|
||||||
@cython.boundscheck(False)
|
@cython.boundscheck(False)
|
||||||
def __call__(self, object data):
|
def __call__(self, object data):
|
||||||
|
@ -4,5 +4,6 @@ cdef class HeaderFormat:
|
|||||||
cdef set tags
|
cdef set tags
|
||||||
cdef bint printNAKeys
|
cdef bint printNAKeys
|
||||||
cdef bytes NAString
|
cdef bytes NAString
|
||||||
|
cdef bint NAIntTo0
|
||||||
cdef size_t headerBufferLength
|
cdef size_t headerBufferLength
|
||||||
|
|
@ -8,13 +8,14 @@ from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
|
|||||||
|
|
||||||
from obitools3.utils cimport str2bytes, bytes2str_object
|
from obitools3.utils cimport str2bytes, bytes2str_object
|
||||||
from obitools3.dms.column.column cimport Column_line
|
from obitools3.dms.column.column cimport Column_line
|
||||||
|
from obitools3.dms.column.typed_column.int cimport Column_int, Column_multi_elts_int
|
||||||
|
|
||||||
|
|
||||||
cdef class HeaderFormat:
|
cdef class HeaderFormat:
|
||||||
|
|
||||||
SPECIAL_KEYS = [NUC_SEQUENCE_COLUMN, ID_COLUMN, DEFINITION_COLUMN, QUALITY_COLUMN]
|
SPECIAL_KEYS = [NUC_SEQUENCE_COLUMN, ID_COLUMN, DEFINITION_COLUMN, QUALITY_COLUMN]
|
||||||
|
|
||||||
def __init__(self, str format="fasta", list tags=[], bint printNAKeys=False, bytes NAString=b"NA"):
|
def __init__(self, str format="fasta", list tags=[], bint printNAKeys=False, bytes NAString=b"NA", bint NAIntTo0=False):
|
||||||
'''
|
'''
|
||||||
@param format:
|
@param format:
|
||||||
@type format: `str`
|
@type format: `str`
|
||||||
@ -32,6 +33,7 @@ cdef class HeaderFormat:
|
|||||||
self.tags = set(tags)
|
self.tags = set(tags)
|
||||||
self.printNAKeys = printNAKeys
|
self.printNAKeys = printNAKeys
|
||||||
self.NAString = NAString
|
self.NAString = NAString
|
||||||
|
self.NAIntTo0 = NAIntTo0
|
||||||
|
|
||||||
if format=="fasta":
|
if format=="fasta":
|
||||||
self.start=b">"
|
self.start=b">"
|
||||||
@ -57,17 +59,25 @@ cdef class HeaderFormat:
|
|||||||
if k in tags:
|
if k in tags:
|
||||||
value = data[k]
|
value = data[k]
|
||||||
if value is None or (isinstance(value, Column_line) and value.is_NA()):
|
if value is None or (isinstance(value, Column_line) and value.is_NA()):
|
||||||
if self.printNAKeys:
|
if isinstance(data.view[k], Column_int) and self.NAIntTo0: # people want missing int values to be 0
|
||||||
|
value = b'0'
|
||||||
|
elif self.printNAKeys:
|
||||||
value = self.NAString
|
value = self.NAString
|
||||||
else:
|
else:
|
||||||
value = None
|
value = None
|
||||||
else:
|
else:
|
||||||
if type(value) == Column_line:
|
if type(value) == Column_line:
|
||||||
value = value.bytes()
|
if isinstance(data.view[k], Column_multi_elts_int) and self.NAIntTo0:
|
||||||
|
value = dict(value)
|
||||||
|
for key in data.view[k].keys():
|
||||||
|
if key not in value or value[key]:
|
||||||
|
value[key] = 0
|
||||||
|
else:
|
||||||
|
value = value.bytes()
|
||||||
else:
|
else:
|
||||||
if type(value) == tuple:
|
if type(value) == tuple:
|
||||||
value=list(value)
|
value=list(value)
|
||||||
value = str2bytes(str(bytes2str_object(value))) # genius programming
|
value = str2bytes(str(bytes2str_object(value))) # genius programming
|
||||||
if value is not None:
|
if value is not None:
|
||||||
lines.append(k + b"=" + value + b";")
|
lines.append(k + b"=" + value + b";")
|
||||||
|
|
||||||
|
@ -4,5 +4,8 @@ cdef class TabFormat:
|
|||||||
cdef bint header
|
cdef bint header
|
||||||
cdef bint first_line
|
cdef bint first_line
|
||||||
cdef bytes NAString
|
cdef bytes NAString
|
||||||
cdef list tags
|
cdef set tags
|
||||||
cdef bytes sep
|
cdef bytes sep
|
||||||
|
cdef bint NAIntTo0
|
||||||
|
cdef bint metabaR
|
||||||
|
cdef bint ngsfilter
|
||||||
|
@ -4,36 +4,59 @@ cimport cython
|
|||||||
from obitools3.dms.view.view cimport Line
|
from obitools3.dms.view.view cimport Line
|
||||||
from obitools3.utils cimport bytes2str_object, str2bytes, tobytes
|
from obitools3.utils cimport bytes2str_object, str2bytes, tobytes
|
||||||
from obitools3.dms.column.column cimport Column_line, Column_multi_elts
|
from obitools3.dms.column.column cimport Column_line, Column_multi_elts
|
||||||
|
from obitools3.dms.column.typed_column.int cimport Column_int, Column_multi_elts_int
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
cdef class TabFormat:
|
cdef class TabFormat:
|
||||||
|
|
||||||
def __init__(self, header=True, bytes NAString=b"NA", bytes sep=b"\t"):
|
def __init__(self, list tags=[], header=True, bytes NAString=b"NA", bytes sep=b"\t", bint NAIntTo0=True, metabaR=False, ngsfilter=False):
|
||||||
|
self.tags = set(tags)
|
||||||
self.header = header
|
self.header = header
|
||||||
self.first_line = True
|
self.first_line = True
|
||||||
self.NAString = NAString
|
self.NAString = NAString
|
||||||
self.sep = sep
|
self.sep = sep
|
||||||
|
self.NAIntTo0 = NAIntTo0
|
||||||
|
self.metabaR = metabaR
|
||||||
|
self.ngsfilter = ngsfilter
|
||||||
|
|
||||||
@cython.boundscheck(False)
|
@cython.boundscheck(False)
|
||||||
def __call__(self, object data):
|
def __call__(self, object data):
|
||||||
|
|
||||||
|
cdef set ktags
|
||||||
|
cdef list tags = [key for key in data]
|
||||||
|
|
||||||
line = []
|
line = []
|
||||||
|
|
||||||
if self.first_line:
|
if self.tags is not None and self.tags:
|
||||||
self.tags = [k for k in data.keys()]
|
ktags = self.tags
|
||||||
|
else:
|
||||||
for k in self.tags:
|
ktags = set(tags)
|
||||||
|
|
||||||
if self.header and self.first_line:
|
if self.header and self.first_line:
|
||||||
if isinstance(data.view[k], Column_multi_elts):
|
for k in ktags:
|
||||||
keys = data.view[k].keys()
|
if k in tags:
|
||||||
keys.sort()
|
if self.metabaR:
|
||||||
for k2 in keys:
|
if k == b'NUC_SEQ':
|
||||||
line.append(tobytes(k)+b':'+tobytes(k2))
|
ktoprint = b'sequence'
|
||||||
else:
|
else:
|
||||||
line.append(tobytes(k))
|
ktoprint = k.lower()
|
||||||
else:
|
ktoprint = ktoprint.replace(b'merged_', b'')
|
||||||
|
else:
|
||||||
|
ktoprint = k
|
||||||
|
if isinstance(data.view[k], Column_multi_elts):
|
||||||
|
keys = data.view[k].keys()
|
||||||
|
keys.sort()
|
||||||
|
for k2 in keys:
|
||||||
|
line.append(tobytes(ktoprint)+b':'+tobytes(k2))
|
||||||
|
else:
|
||||||
|
line.append(tobytes(ktoprint))
|
||||||
|
r = self.sep.join(value for value in line)
|
||||||
|
r += b'\n'
|
||||||
|
line = []
|
||||||
|
|
||||||
|
for k in ktags:
|
||||||
|
if k in tags:
|
||||||
value = data[k]
|
value = data[k]
|
||||||
if isinstance(data.view[k], Column_multi_elts):
|
if isinstance(data.view[k], Column_multi_elts):
|
||||||
keys = data.view[k].keys()
|
keys = data.view[k].keys()
|
||||||
@ -46,14 +69,22 @@ cdef class TabFormat:
|
|||||||
if value[k2] is not None:
|
if value[k2] is not None:
|
||||||
line.append(str2bytes(str(bytes2str_object(value[k2])))) # genius programming
|
line.append(str2bytes(str(bytes2str_object(value[k2])))) # genius programming
|
||||||
else:
|
else:
|
||||||
line.append(self.NAString)
|
if self.NAIntTo0 and isinstance(data.view[k], Column_multi_elts_int):
|
||||||
|
line.append(b"0")
|
||||||
|
else:
|
||||||
|
line.append(self.NAString)
|
||||||
else:
|
else:
|
||||||
if value is not None:
|
if value is not None or (self.NAIntTo0 and isinstance(data.view[k], Column_int)):
|
||||||
line.append(str2bytes(str(bytes2str_object(value))))
|
line.append(str2bytes(str(bytes2str_object(value))))
|
||||||
else:
|
else:
|
||||||
line.append(self.NAString)
|
line.append(self.NAString)
|
||||||
|
|
||||||
|
if self.header and self.first_line:
|
||||||
|
r += self.sep.join(value for value in line)
|
||||||
|
else:
|
||||||
|
r = self.sep.join(value for value in line)
|
||||||
|
|
||||||
if self.first_line:
|
if self.first_line:
|
||||||
self.first_line = False
|
self.first_line = False
|
||||||
|
|
||||||
return self.sep.join(value for value in line)
|
return r
|
||||||
|
@ -22,11 +22,11 @@ from libc.stdlib cimport free, malloc, realloc
|
|||||||
from libc.string cimport strcpy, strlen
|
from libc.string cimport strcpy, strlen
|
||||||
|
|
||||||
|
|
||||||
_featureMatcher = re.compile(b'^FEATURES.+\n(?=ORIGIN)',re.DOTALL + re.M)
|
_featureMatcher = re.compile(b'^FEATURES.+\n(?=ORIGIN(\s*))',re.DOTALL + re.M)
|
||||||
|
|
||||||
_headerMatcher = re.compile(b'^LOCUS.+(?=\nFEATURES)', re.DOTALL + re.M)
|
_headerMatcher = re.compile(b'^LOCUS.+(?=\nFEATURES)', re.DOTALL + re.M)
|
||||||
_seqMatcher = re.compile(b'ORIGIN.+(?=//\n)', re.DOTALL + re.M)
|
_seqMatcher = re.compile(b'^ORIGIN.+(?=//\n)', re.DOTALL + re.M)
|
||||||
_cleanSeq1 = re.compile(b'ORIGIN.+\n')
|
_cleanSeq1 = re.compile(b'ORIGIN(\s*)\n')
|
||||||
_cleanSeq2 = re.compile(b'[ \n0-9]+')
|
_cleanSeq2 = re.compile(b'[ \n0-9]+')
|
||||||
_acMatcher = re.compile(b'(?<=^ACCESSION ).+',re.M)
|
_acMatcher = re.compile(b'(?<=^ACCESSION ).+',re.M)
|
||||||
_deMatcher = re.compile(b'(?<=^DEFINITION ).+\n( .+\n)*',re.M)
|
_deMatcher = re.compile(b'(?<=^DEFINITION ).+\n( .+\n)*',re.M)
|
||||||
@ -155,10 +155,10 @@ def genbankIterator_file(lineiterator,
|
|||||||
yield seq
|
yield seq
|
||||||
read+=1
|
read+=1
|
||||||
|
|
||||||
# Last sequence
|
# Last sequence if not empty lines
|
||||||
seq = genbankParser(entry)
|
if entry.strip():
|
||||||
|
seq = genbankParser(entry)
|
||||||
yield seq
|
yield seq
|
||||||
|
|
||||||
free(entry)
|
free(entry)
|
||||||
|
|
||||||
|
@ -48,24 +48,25 @@ def ngsfilterIterator(lineiterator,
|
|||||||
all_lines.insert(0, firstline)
|
all_lines.insert(0, firstline)
|
||||||
|
|
||||||
# Insert header for column names
|
# Insert header for column names
|
||||||
column_names = [b"experiment", b"sample", b"forward_tag", b"reverse_tag", b"forward_primer", b"reverse_primer"]
|
column_names = [b"experiment", b"sample", b"forward_tag", b"reverse_tag", b"forward_primer", b"reverse_primer"] #,b"additional_info"]
|
||||||
header = out_sep.join(column_names)
|
header = out_sep.join(column_names)
|
||||||
|
|
||||||
new_lines.append(header)
|
new_lines.append(header)
|
||||||
|
|
||||||
for line in all_lines:
|
for line in all_lines:
|
||||||
split_line = line.split()
|
split_line = line.split(maxsplit=5)
|
||||||
tags = split_line.pop(2)
|
if split_line:
|
||||||
tags = tags.split(b":")
|
tags = split_line.pop(2)
|
||||||
for t_idx in range(len(tags)):
|
tags = tags.split(b":")
|
||||||
if tags[t_idx]==b"-" or tags[t_idx]==b"None" or tags[t_idx]==b"":
|
for t_idx in range(len(tags)):
|
||||||
tags[t_idx] = nastring
|
if tags[t_idx]==b"-" or tags[t_idx]==b"None" or tags[t_idx]==b"":
|
||||||
if len(tags) == 1: # Forward and reverse tags are the same
|
tags[t_idx] = nastring
|
||||||
tags.append(tags[0])
|
if len(tags) == 1: # Forward and reverse tags are the same
|
||||||
split_line.insert(2, tags[0])
|
tags.append(tags[0])
|
||||||
split_line.insert(3, tags[1])
|
split_line.insert(2, tags[0])
|
||||||
new_lines.append(out_sep.join(split_line[0:6]))
|
split_line.insert(3, tags[1])
|
||||||
|
new_lines.append(out_sep.join(split_line[0:6]))
|
||||||
|
|
||||||
return tabIterator(iter(new_lines),
|
return tabIterator(iter(new_lines),
|
||||||
header = True,
|
header = True,
|
||||||
sep = out_sep,
|
sep = out_sep,
|
||||||
|
@ -8,7 +8,7 @@ Created on feb 20th 2018
|
|||||||
|
|
||||||
import types
|
import types
|
||||||
from obitools3.utils cimport __etag__
|
from obitools3.utils cimport __etag__
|
||||||
|
from obitools3.utils cimport str2bytes
|
||||||
|
|
||||||
def tabIterator(lineiterator,
|
def tabIterator(lineiterator,
|
||||||
bint header = False,
|
bint header = False,
|
||||||
@ -75,7 +75,7 @@ def tabIterator(lineiterator,
|
|||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# TODO ??? default column names? like R?
|
# TODO ??? default column names? like R?
|
||||||
keys = [i for i in range(len(line.split(sep)))]
|
keys = [str2bytes(str(i)) for i in range(len(line.split(sep)))]
|
||||||
|
|
||||||
while skipped < skip :
|
while skipped < skip :
|
||||||
line = next(iterator)
|
line = next(iterator)
|
||||||
|
@ -53,7 +53,11 @@ def entryIteratorFactory(lineiterator,
|
|||||||
|
|
||||||
i = iterator
|
i = iterator
|
||||||
|
|
||||||
first=next(i)
|
try:
|
||||||
|
first=next(i)
|
||||||
|
except StopIteration:
|
||||||
|
first=""
|
||||||
|
pass
|
||||||
|
|
||||||
format=b"tabular"
|
format=b"tabular"
|
||||||
|
|
||||||
|
@ -173,7 +173,10 @@ def open_uri(uri,
|
|||||||
type newviewtype=View,
|
type newviewtype=View,
|
||||||
dms_only=False,
|
dms_only=False,
|
||||||
force_file=False):
|
force_file=False):
|
||||||
|
|
||||||
|
if type(uri) == str and not uri.isascii():
|
||||||
|
raise Exception("Paths must be ASCII characters only")
|
||||||
|
|
||||||
cdef bytes urib = tobytes(uri)
|
cdef bytes urib = tobytes(uri)
|
||||||
cdef bytes scheme
|
cdef bytes scheme
|
||||||
cdef tuple dms
|
cdef tuple dms
|
||||||
@ -192,7 +195,7 @@ def open_uri(uri,
|
|||||||
|
|
||||||
config = getConfiguration()
|
config = getConfiguration()
|
||||||
urip = urlparse(urib)
|
urip = urlparse(urib)
|
||||||
|
|
||||||
if 'obi' not in config:
|
if 'obi' not in config:
|
||||||
config['obi']={}
|
config['obi']={}
|
||||||
|
|
||||||
@ -209,13 +212,14 @@ def open_uri(uri,
|
|||||||
scheme = urip.scheme
|
scheme = urip.scheme
|
||||||
|
|
||||||
error = None
|
error = None
|
||||||
|
|
||||||
if urib != b"-" and \
|
if b'/taxonomy/' in urib or \
|
||||||
|
(urib != b"-" and \
|
||||||
(scheme==b"dms" or \
|
(scheme==b"dms" or \
|
||||||
(scheme==b"" and \
|
(scheme==b"" and \
|
||||||
(((not input) and "outputformat" not in config["obi"]) or \
|
(((not input) and "outputformat" not in config["obi"]) or \
|
||||||
(input and "inputformat" not in config["obi"])))): # TODO maybe not best way
|
(input and "inputformat" not in config["obi"]))))): # TODO maybe not best way
|
||||||
|
|
||||||
if default_dms is not None and b"/" not in urip.path: # assuming view to open in default DMS (TODO discuss)
|
if default_dms is not None and b"/" not in urip.path: # assuming view to open in default DMS (TODO discuss)
|
||||||
dms=(default_dms, urip.path)
|
dms=(default_dms, urip.path)
|
||||||
else:
|
else:
|
||||||
@ -223,7 +227,7 @@ def open_uri(uri,
|
|||||||
|
|
||||||
if dms is None and default_dms is not None:
|
if dms is None and default_dms is not None:
|
||||||
dms=(default_dms, urip.path)
|
dms=(default_dms, urip.path)
|
||||||
|
|
||||||
if dms is not None:
|
if dms is not None:
|
||||||
if dms_only:
|
if dms_only:
|
||||||
return (dms[0],
|
return (dms[0],
|
||||||
@ -248,7 +252,7 @@ def open_uri(uri,
|
|||||||
|
|
||||||
if default_dms is None:
|
if default_dms is None:
|
||||||
config["obi"]["defaultdms"]=resource[0]
|
config["obi"]["defaultdms"]=resource[0]
|
||||||
|
|
||||||
return (resource[0],
|
return (resource[0],
|
||||||
resource[1],
|
resource[1],
|
||||||
type(resource[1]),
|
type(resource[1]),
|
||||||
@ -276,7 +280,12 @@ def open_uri(uri,
|
|||||||
iseq = urib
|
iseq = urib
|
||||||
objclass = bytes
|
objclass = bytes
|
||||||
else: # TODO update uopen to be able to write?
|
else: # TODO update uopen to be able to write?
|
||||||
if not urip.path or urip.path == b'-':
|
if config['obi']['outputformat'] == b'metabaR':
|
||||||
|
if 'metabarprefix' not in config['obi']:
|
||||||
|
raise Exception("Prefix needed when exporting for metabaR (--metabaR-prefix option)")
|
||||||
|
else:
|
||||||
|
file = open(config['obi']['metabarprefix']+'.tab', 'wb')
|
||||||
|
elif not urip.path or urip.path == b'-':
|
||||||
file = sys.stdout.buffer
|
file = sys.stdout.buffer
|
||||||
else:
|
else:
|
||||||
file = open(urip.path, 'wb')
|
file = open(urip.path, 'wb')
|
||||||
@ -298,11 +307,11 @@ def open_uri(uri,
|
|||||||
format=config["obi"][formatkey]
|
format=config["obi"][formatkey]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
format=None
|
format=None
|
||||||
|
|
||||||
if b'seqtype' in qualifiers:
|
if b'seqtype' in qualifiers:
|
||||||
seqtype=qualifiers[b'seqtype'][0]
|
seqtype=qualifiers[b'seqtype'][0]
|
||||||
else:
|
else:
|
||||||
if format == b"ngsfilter" or format == b"tabular": # TODO discuss
|
if format == b"ngsfilter" or format == b"tabular" or format == b"metabaR": # TODO discuss
|
||||||
seqtype=None
|
seqtype=None
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
@ -386,10 +395,10 @@ def open_uri(uri,
|
|||||||
raise MalformedURIException('Malformed header argument in URI')
|
raise MalformedURIException('Malformed header argument in URI')
|
||||||
|
|
||||||
if b"sep" in qualifiers:
|
if b"sep" in qualifiers:
|
||||||
sep=tobytes(qualifiers[b"sep"][0][0])
|
sep = tobytes(qualifiers[b"sep"][0][0])
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
sep=tobytes(config["obi"]["sep"])
|
sep = tobytes(config["obi"]["sep"])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
sep=None
|
sep=None
|
||||||
|
|
||||||
@ -426,7 +435,21 @@ def open_uri(uri,
|
|||||||
nastring=tobytes(config["obi"][nakey])
|
nastring=tobytes(config["obi"][nakey])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
nastring=b'NA'
|
nastring=b'NA'
|
||||||
|
|
||||||
|
if b"na_int_to_0" in qualifiers:
|
||||||
|
try:
|
||||||
|
na_int_to_0=eval(qualifiers[b"na_int_to_0"][0])
|
||||||
|
except Exception as e:
|
||||||
|
raise MalformedURIException("Malformed 'NA_int_to_0' argument in URI")
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
na_int_to_0=config["obi"]["na_int_to_0"]
|
||||||
|
except KeyError:
|
||||||
|
if format==b"tabular" or format==b"metabaR":
|
||||||
|
na_int_to_0=True
|
||||||
|
else:
|
||||||
|
na_int_to_0=False
|
||||||
|
|
||||||
if b"stripwhite" in qualifiers:
|
if b"stripwhite" in qualifiers:
|
||||||
try:
|
try:
|
||||||
stripwhite=eval(qualifiers[b"stripwhite"][0])
|
stripwhite=eval(qualifiers[b"stripwhite"][0])
|
||||||
@ -461,17 +484,36 @@ def open_uri(uri,
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
commentchar=b'#'
|
commentchar=b'#'
|
||||||
|
|
||||||
|
if b"only_keys" in qualifiers:
|
||||||
|
only_keys=qualifiers[b"only_keys"][0] # not sure that works but no one ever uses qualifiers
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
only_keys_str=config["obi"]["only_keys"]
|
||||||
|
only_keys=[]
|
||||||
|
for key in only_keys_str:
|
||||||
|
only_keys.append(tobytes(key))
|
||||||
|
except KeyError:
|
||||||
|
only_keys=[]
|
||||||
|
|
||||||
|
if b"metabaR_prefix" in qualifiers:
|
||||||
|
metabaR_prefix = tobytes(qualifiers[b"metabaR_prefix"][0][0])
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
metabaR_prefix = tobytes(config["obi"]["metabarprefix"])
|
||||||
|
except KeyError:
|
||||||
|
metabaR_prefix=None
|
||||||
|
|
||||||
if format is not None:
|
if format is not None:
|
||||||
if seqtype==b"nuc":
|
if seqtype==b"nuc":
|
||||||
objclass = Nuc_Seq # Nuc_Seq_Stored? TODO
|
objclass = Nuc_Seq # Nuc_Seq_Stored? TODO
|
||||||
if format==b"fasta":
|
if format==b"fasta" or format==b"silva" or format==b"rdp" or format == b"unite" or format == b"sintax":
|
||||||
if input:
|
if input:
|
||||||
iseq = fastaNucIterator(file,
|
iseq = fastaNucIterator(file,
|
||||||
skip=skip,
|
skip=skip,
|
||||||
only=only,
|
only=only,
|
||||||
nastring=nastring)
|
nastring=nastring)
|
||||||
else:
|
else:
|
||||||
iseq = FastaNucWriter(FastaFormat(printNAKeys=printna, NAString=nastring),
|
iseq = FastaNucWriter(FastaFormat(tags=only_keys, printNAKeys=printna, NAString=nastring),
|
||||||
file,
|
file,
|
||||||
skip=skip,
|
skip=skip,
|
||||||
only=only)
|
only=only)
|
||||||
@ -484,7 +526,7 @@ def open_uri(uri,
|
|||||||
noquality=noquality,
|
noquality=noquality,
|
||||||
nastring=nastring)
|
nastring=nastring)
|
||||||
else:
|
else:
|
||||||
iseq = FastqWriter(FastqFormat(printNAKeys=printna, NAString=nastring),
|
iseq = FastqWriter(FastqFormat(tags=only_keys, printNAKeys=printna, NAString=nastring),
|
||||||
file,
|
file,
|
||||||
skip=skip,
|
skip=skip,
|
||||||
only=only)
|
only=only)
|
||||||
@ -520,7 +562,17 @@ def open_uri(uri,
|
|||||||
skip = skip,
|
skip = skip,
|
||||||
only = only)
|
only = only)
|
||||||
else:
|
else:
|
||||||
iseq = TabWriter(TabFormat(header=header, NAString=nastring, sep=sep),
|
iseq = TabWriter(TabFormat(tags=only_keys, header=header, NAString=nastring, sep=sep, NAIntTo0=na_int_to_0),
|
||||||
|
file,
|
||||||
|
skip=skip,
|
||||||
|
only=only,
|
||||||
|
header=header)
|
||||||
|
elif format==b"metabaR":
|
||||||
|
objclass = dict
|
||||||
|
if input:
|
||||||
|
raise NotImplementedError('Input data file format not implemented')
|
||||||
|
else:
|
||||||
|
iseq = TabWriter(TabFormat(tags=only_keys, header=header, NAString=nastring, sep=sep, NAIntTo0=na_int_to_0, metabaR=True),
|
||||||
file,
|
file,
|
||||||
skip=skip,
|
skip=skip,
|
||||||
only=only,
|
only=only,
|
||||||
@ -538,7 +590,7 @@ def open_uri(uri,
|
|||||||
skip = skip,
|
skip = skip,
|
||||||
only = only)
|
only = only)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError('Output sequence file format not implemented')
|
raise NotImplementedError('Output data file format not implemented')
|
||||||
else:
|
else:
|
||||||
if input:
|
if input:
|
||||||
iseq, objclass, format = entryIteratorFactory(file,
|
iseq, objclass, format = entryIteratorFactory(file,
|
||||||
@ -556,7 +608,7 @@ def open_uri(uri,
|
|||||||
commentchar)
|
commentchar)
|
||||||
else: # default export is in fasta? or tab? TODO
|
else: # default export is in fasta? or tab? TODO
|
||||||
objclass = Nuc_Seq # Nuc_Seq_Stored? TODO
|
objclass = Nuc_Seq # Nuc_Seq_Stored? TODO
|
||||||
iseq = FastaNucWriter(FastaFormat(printNAKeys=printna, NAString=nastring),
|
iseq = FastaNucWriter(FastaFormat(tags=only_keys, printNAKeys=printna, NAString=nastring),
|
||||||
file,
|
file,
|
||||||
skip=skip,
|
skip=skip,
|
||||||
only=only)
|
only=only)
|
||||||
@ -565,6 +617,6 @@ def open_uri(uri,
|
|||||||
|
|
||||||
entry_count = -1
|
entry_count = -1
|
||||||
if input:
|
if input:
|
||||||
entry_count = count_entries(file, format)
|
entry_count = count_entries(file, format, header)
|
||||||
|
|
||||||
return (file, iseq, objclass, urib, entry_count)
|
return (file, iseq, objclass, urib, entry_count)
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
from obitools3.dms.capi.obitypes cimport obitype_t, index_t
|
from obitools3.dms.capi.obitypes cimport obitype_t, index_t
|
||||||
|
|
||||||
cpdef bytes format_uniq_pattern(bytes format)
|
cpdef bytes format_uniq_pattern(bytes format)
|
||||||
cpdef int count_entries(file, bytes format)
|
cpdef int count_entries(file, bytes format, bint header)
|
||||||
|
|
||||||
cdef obi_errno_to_exception(index_t line_nb=*, object elt_id=*, str error_message=*)
|
cdef obi_errno_to_exception(index_t line_nb=*, object elt_id=*, str error_message=*)
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ cdef object clean_empty_values_from_object(object value, exclude=*)
|
|||||||
|
|
||||||
cdef obitype_t get_obitype_single_value(object value)
|
cdef obitype_t get_obitype_single_value(object value)
|
||||||
cdef obitype_t update_obitype(obitype_t obitype, object new_value)
|
cdef obitype_t update_obitype(obitype_t obitype, object new_value)
|
||||||
cdef obitype_t get_obitype_iterable_value(object value)
|
cdef obitype_t get_obitype_iterable_value(object value, type t)
|
||||||
cdef obitype_t get_obitype(object value)
|
cdef obitype_t get_obitype(object value)
|
||||||
|
|
||||||
cdef object __etag__(bytes x, bytes nastring=*)
|
cdef object __etag__(bytes x, bytes nastring=*)
|
||||||
|
@ -9,7 +9,8 @@ from obitools3.dms.capi.obitypes cimport is_a_DNA_seq, \
|
|||||||
OBI_QUAL, \
|
OBI_QUAL, \
|
||||||
OBI_SEQ, \
|
OBI_SEQ, \
|
||||||
OBI_STR, \
|
OBI_STR, \
|
||||||
index_t
|
index_t, \
|
||||||
|
OBI_INT_MAX
|
||||||
|
|
||||||
from obitools3.dms.capi.obierrno cimport OBI_LINE_IDX_ERROR, \
|
from obitools3.dms.capi.obierrno cimport OBI_LINE_IDX_ERROR, \
|
||||||
OBI_ELT_IDX_ERROR, \
|
OBI_ELT_IDX_ERROR, \
|
||||||
@ -39,7 +40,7 @@ cpdef bytes format_uniq_pattern(bytes format):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
cpdef int count_entries(file, bytes format):
|
cpdef int count_entries(file, bytes format, bint header):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sep = format_uniq_pattern(format)
|
sep = format_uniq_pattern(format)
|
||||||
@ -74,6 +75,8 @@ cpdef int count_entries(file, bytes format):
|
|||||||
total_count += len(re.findall(sep, mmapped_file))
|
total_count += len(re.findall(sep, mmapped_file))
|
||||||
if format != b"ngsfilter" and format != b"tabular" and format != b"embl" and format != b"genbank" and format != b"fastq":
|
if format != b"ngsfilter" and format != b"tabular" and format != b"embl" and format != b"genbank" and format != b"fastq":
|
||||||
total_count += 1 # adding +1 for 1st entry because separators include \n (ngsfilter and tabular already count one more because of last \n)
|
total_count += 1 # adding +1 for 1st entry because separators include \n (ngsfilter and tabular already count one more because of last \n)
|
||||||
|
if format == b"tabular" and header: # not counting header as an entry
|
||||||
|
total_count -= 1
|
||||||
|
|
||||||
except:
|
except:
|
||||||
if len(files) > 1:
|
if len(files) > 1:
|
||||||
@ -257,38 +260,51 @@ cdef obitype_t update_obitype(obitype_t obitype, object new_value) :
|
|||||||
|
|
||||||
new_type = type(new_value)
|
new_type = type(new_value)
|
||||||
|
|
||||||
if obitype == OBI_INT :
|
#if new_type == NoneType: # doesn't work because Cython sucks
|
||||||
if new_type == float :
|
if new_value == None or new_type==list or new_type==dict or new_type==tuple:
|
||||||
return OBI_FLOAT
|
return obitype
|
||||||
# TODO BOOL vers INT/FLOAT
|
|
||||||
elif new_type == str or new_type == bytes :
|
# TODO BOOL to INT/FLOAT
|
||||||
|
if new_type == str or new_type == bytes :
|
||||||
if obitype == OBI_SEQ and is_a_DNA_seq(tobytes(new_value)) :
|
if obitype == OBI_SEQ and is_a_DNA_seq(tobytes(new_value)) :
|
||||||
pass
|
pass
|
||||||
else :
|
else :
|
||||||
return OBI_STR
|
return OBI_STR
|
||||||
|
elif obitype == OBI_INT :
|
||||||
|
if new_type == float or new_value > OBI_INT_MAX :
|
||||||
|
return OBI_FLOAT
|
||||||
|
|
||||||
return obitype
|
return obitype
|
||||||
|
|
||||||
|
|
||||||
cdef obitype_t get_obitype_iterable_value(object value) :
|
cdef obitype_t get_obitype_iterable_value(object value, type t) :
|
||||||
|
|
||||||
cdef obitype_t value_obitype
|
cdef obitype_t value_obitype
|
||||||
|
|
||||||
value_obitype = OBI_VOID
|
value_obitype = OBI_VOID
|
||||||
|
|
||||||
for k in value :
|
if t == dict:
|
||||||
if value_obitype == OBI_VOID :
|
for k in value :
|
||||||
value_obitype = get_obitype_single_value(value[k])
|
if value_obitype == OBI_VOID :
|
||||||
else :
|
value_obitype = get_obitype_single_value(value[k])
|
||||||
value_obitype = update_obitype(value_obitype, value[k])
|
else :
|
||||||
|
value_obitype = update_obitype(value_obitype, value[k])
|
||||||
|
|
||||||
|
elif t == list or t == tuple:
|
||||||
|
for v in value :
|
||||||
|
if value_obitype == OBI_VOID :
|
||||||
|
value_obitype = get_obitype_single_value(v)
|
||||||
|
else :
|
||||||
|
value_obitype = update_obitype(value_obitype, v)
|
||||||
|
|
||||||
return value_obitype
|
return value_obitype
|
||||||
|
|
||||||
|
|
||||||
cdef obitype_t get_obitype(object value) :
|
cdef obitype_t get_obitype(object value) :
|
||||||
|
|
||||||
if type(value) == dict or type(value) == list or type(value) == tuple :
|
t = type(value)
|
||||||
return get_obitype_iterable_value(value)
|
if t == dict or t == list or t == tuple :
|
||||||
|
return get_obitype_iterable_value(value, t)
|
||||||
|
|
||||||
else :
|
else :
|
||||||
return get_obitype_single_value(value)
|
return get_obitype_single_value(value)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
major = 3
|
major = 3
|
||||||
minor = 0
|
minor = 0
|
||||||
serial= '0b32'
|
serial= '1b22'
|
||||||
|
|
||||||
version ="%d.%d.%s" % (major,minor,serial)
|
version ="%d.%d.%s" % (major,minor,serial)
|
||||||
|
@ -20,8 +20,6 @@ cdef class TabWriter:
|
|||||||
self.only = -1
|
self.only = -1
|
||||||
else:
|
else:
|
||||||
self.only = int(only)
|
self.only = int(only)
|
||||||
if header:
|
|
||||||
self.only += 1
|
|
||||||
|
|
||||||
self.formatter = formatter
|
self.formatter = formatter
|
||||||
self.output = output_object
|
self.output = output_object
|
||||||
|
1
src/.gitignore
vendored
1
src/.gitignore
vendored
@ -3,3 +3,4 @@
|
|||||||
/cmake_install.cmake
|
/cmake_install.cmake
|
||||||
/libcobitools3.dylib
|
/libcobitools3.dylib
|
||||||
/Makefile
|
/Makefile
|
||||||
|
/build/
|
||||||
|
@ -77,6 +77,7 @@ static inline ecotx_t* get_lca_from_merged_taxids(Obiview_p view, OBIDMS_column_
|
|||||||
{
|
{
|
||||||
ecotx_t* taxon = NULL;
|
ecotx_t* taxon = NULL;
|
||||||
ecotx_t* lca = NULL;
|
ecotx_t* lca = NULL;
|
||||||
|
ecotx_t* lca1 = NULL;
|
||||||
int32_t taxid;
|
int32_t taxid;
|
||||||
index_t taxid_idx;
|
index_t taxid_idx;
|
||||||
int64_t taxid_str_idx;
|
int64_t taxid_str_idx;
|
||||||
@ -108,10 +109,11 @@ static inline ecotx_t* get_lca_from_merged_taxids(Obiview_p view, OBIDMS_column_
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Compute LCA
|
// Compute LCA
|
||||||
|
lca1 = lca;
|
||||||
lca = obi_taxo_get_lca(taxon, lca);
|
lca = obi_taxo_get_lca(taxon, lca);
|
||||||
if (lca == NULL)
|
if (lca == NULL)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError getting the last common ancestor of two taxa when building a reference database");
|
obidebug(1, "\nError getting the last common ancestor of two taxa when building a reference database, %d %d", taxid, lca1->taxid);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -185,7 +187,7 @@ int build_reference_db(const char* dms_name,
|
|||||||
matrix_view_name = strcpy(matrix_view_name, o_view_name);
|
matrix_view_name = strcpy(matrix_view_name, o_view_name);
|
||||||
strcat(matrix_view_name, "_matrix");
|
strcat(matrix_view_name, "_matrix");
|
||||||
|
|
||||||
fprintf(stderr, "Aligning queries with reference database...\n");
|
fprintf(stderr, "Aligning sequences...\n");
|
||||||
if (obi_lcs_align_one_column(dms_name,
|
if (obi_lcs_align_one_column(dms_name,
|
||||||
refs_view_name,
|
refs_view_name,
|
||||||
"",
|
"",
|
||||||
@ -243,6 +245,7 @@ int build_reference_db(const char* dms_name,
|
|||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
|
false,
|
||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
-1,
|
-1,
|
||||||
@ -392,6 +395,7 @@ int build_reference_db(const char* dms_name,
|
|||||||
1,
|
1,
|
||||||
"",
|
"",
|
||||||
false,
|
false,
|
||||||
|
false,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
"",
|
"",
|
||||||
@ -415,6 +419,7 @@ int build_reference_db(const char* dms_name,
|
|||||||
1,
|
1,
|
||||||
"",
|
"",
|
||||||
false,
|
false,
|
||||||
|
false,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
"",
|
"",
|
||||||
@ -860,7 +865,8 @@ int build_reference_db(const char* dms_name,
|
|||||||
fprintf(stderr,"\rDone : 100 %% \n");
|
fprintf(stderr,"\rDone : 100 %% \n");
|
||||||
|
|
||||||
// Add information about the threshold used to build the DB
|
// Add information about the threshold used to build the DB
|
||||||
snprintf(threshold_str, 5, "%f", threshold);
|
#define snprintf_nowarn(...) (snprintf(__VA_ARGS__) < 0 ? abort() : (void)0)
|
||||||
|
snprintf_nowarn(threshold_str, 5, "%f", threshold);
|
||||||
|
|
||||||
new_comments = obi_add_comment((o_view->infos)->comments, DB_THRESHOLD_KEY_IN_COMMENTS, threshold_str);
|
new_comments = obi_add_comment((o_view->infos)->comments, DB_THRESHOLD_KEY_IN_COMMENTS, threshold_str);
|
||||||
if (new_comments == NULL)
|
if (new_comments == NULL)
|
||||||
|
@ -36,10 +36,12 @@ bool only_ATGC(const char* seq)
|
|||||||
{
|
{
|
||||||
if (!((*c == 'A') || \
|
if (!((*c == 'A') || \
|
||||||
(*c == 'T') || \
|
(*c == 'T') || \
|
||||||
|
(*c == 'U') || \
|
||||||
(*c == 'G') || \
|
(*c == 'G') || \
|
||||||
(*c == 'C') || \
|
(*c == 'C') || \
|
||||||
(*c == 'a') || \
|
(*c == 'a') || \
|
||||||
(*c == 't') || \
|
(*c == 't') || \
|
||||||
|
(*c == 'u') || \
|
||||||
(*c == 'g') || \
|
(*c == 'g') || \
|
||||||
(*c == 'c')))
|
(*c == 'c')))
|
||||||
{
|
{
|
||||||
@ -182,6 +184,8 @@ byte_t* encode_seq_on_2_bits(const char* seq, int32_t length)
|
|||||||
break;
|
break;
|
||||||
case 't':
|
case 't':
|
||||||
case 'T':
|
case 'T':
|
||||||
|
case 'u':
|
||||||
|
case 'U':
|
||||||
seq_b[i/4] |= NUC_T_2b;
|
seq_b[i/4] |= NUC_T_2b;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -288,6 +292,8 @@ byte_t* encode_seq_on_4_bits(const char* seq, int32_t length)
|
|||||||
break;
|
break;
|
||||||
case 't':
|
case 't':
|
||||||
case 'T':
|
case 'T':
|
||||||
|
case 'u': // discussable
|
||||||
|
case 'U':
|
||||||
seq_b[i/2] |= NUC_T_4b;
|
seq_b[i/2] |= NUC_T_4b;
|
||||||
break;
|
break;
|
||||||
case 'r':
|
case 'r':
|
||||||
|
44
src/encode.h
44
src/encode.h
@ -64,7 +64,7 @@ enum
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Checks if there are only 'atgcATGC' characters in a
|
* @brief Checks if there are only 'atgcuATGCU' characters in a
|
||||||
* character string.
|
* character string.
|
||||||
*
|
*
|
||||||
* @param seq The sequence to check.
|
* @param seq The sequence to check.
|
||||||
@ -129,12 +129,13 @@ byte_t get_nucleotide_from_encoded_seq(byte_t* seq, int32_t idx, uint8_t encodin
|
|||||||
/**
|
/**
|
||||||
* @brief Encodes a DNA sequence with each nucleotide coded on 2 bits.
|
* @brief Encodes a DNA sequence with each nucleotide coded on 2 bits.
|
||||||
*
|
*
|
||||||
* A or a : 00
|
* A or a : 00
|
||||||
* C or c : 01
|
* C or c : 01
|
||||||
* T or t : 10
|
* T or t or U or u : 10
|
||||||
* G or g : 11
|
* G or g : 11
|
||||||
*
|
*
|
||||||
* @warning The DNA sequence must contain only 'atgcATGC' characters.
|
* @warning The DNA sequence must contain only 'atgcuATGCU' characters.
|
||||||
|
* @warning Uracil ('U') bases are encoded as Thymine ('T') bases.
|
||||||
*
|
*
|
||||||
* @param seq The sequence to encode.
|
* @param seq The sequence to encode.
|
||||||
* @param length The length of the sequence to encode.
|
* @param length The length of the sequence to encode.
|
||||||
@ -169,23 +170,24 @@ char* decode_seq_on_2_bits(byte_t* seq_b, int32_t length_seq);
|
|||||||
/**
|
/**
|
||||||
* @brief Encodes a DNA sequence with each nucleotide coded on 4 bits.
|
* @brief Encodes a DNA sequence with each nucleotide coded on 4 bits.
|
||||||
*
|
*
|
||||||
* A or a : 0001
|
* A or a : 0001
|
||||||
* C or c : 0010
|
* C or c : 0010
|
||||||
* G or g : 0011
|
* G or g : 0011
|
||||||
* T or t : 0100
|
* T or t or U or u : 0100
|
||||||
* R or r : 0101
|
* R or r : 0101
|
||||||
* Y or y : 0110
|
* Y or y : 0110
|
||||||
* S or s : 0111
|
* S or s : 0111
|
||||||
* W or w : 1000
|
* W or w : 1000
|
||||||
* K or k : 1001
|
* K or k : 1001
|
||||||
* M or m : 1010
|
* M or m : 1010
|
||||||
* B or b : 1011
|
* B or b : 1011
|
||||||
* D or d : 1100
|
* D or d : 1100
|
||||||
* H or h : 1101
|
* H or h : 1101
|
||||||
* V or v : 1110
|
* V or v : 1110
|
||||||
* N or n : 1111
|
* N or n : 1111
|
||||||
*
|
*
|
||||||
* @warning The DNA sequence must contain only IUPAC characters.
|
* @warning The DNA sequence must contain only IUPAC characters.
|
||||||
|
* @warning Uracil ('U') bases are encoded as Thymine ('T') bases.
|
||||||
*
|
*
|
||||||
* @param seq The sequence to encode.
|
* @param seq The sequence to encode.
|
||||||
* @param length The length of the sequence to encode.
|
* @param length The length of the sequence to encode.
|
||||||
|
@ -77,7 +77,6 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
int32_t* shift_count_array;
|
int32_t* shift_count_array;
|
||||||
Obi_ali_p ali = NULL;
|
Obi_ali_p ali = NULL;
|
||||||
int i, j;
|
int i, j;
|
||||||
bool switched_seqs;
|
|
||||||
bool reversed;
|
bool reversed;
|
||||||
int score = 0;
|
int score = 0;
|
||||||
Obi_blob_p blob1 = NULL;
|
Obi_blob_p blob1 = NULL;
|
||||||
@ -124,6 +123,8 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
bool keep_seq2_start;
|
bool keep_seq2_start;
|
||||||
bool keep_seq1_end;
|
bool keep_seq1_end;
|
||||||
bool keep_seq2_end;
|
bool keep_seq2_end;
|
||||||
|
bool left_ali;
|
||||||
|
bool rev_quals = false;
|
||||||
|
|
||||||
// Check kmer size
|
// Check kmer size
|
||||||
if ((kmer_size < 1) || (kmer_size > 4))
|
if ((kmer_size < 1) || (kmer_size > 4))
|
||||||
@ -148,19 +149,8 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Choose the shortest sequence to save kmer positions in array
|
// Choose the shortest sequence to save kmer positions in array
|
||||||
switched_seqs = false;
|
|
||||||
len1 = blob1->length_decoded_value;
|
len1 = blob1->length_decoded_value;
|
||||||
len2 = blob2->length_decoded_value;
|
len2 = blob2->length_decoded_value;
|
||||||
if (len2 < len1)
|
|
||||||
{
|
|
||||||
switched_seqs = true;
|
|
||||||
temp_len = len1;
|
|
||||||
len1 = len2;
|
|
||||||
len2 = temp_len;
|
|
||||||
temp_blob = blob1;
|
|
||||||
blob1 = blob2;
|
|
||||||
blob2 = temp_blob;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Force encoding on 2 bits by replacing ambiguous nucleotides by 'a's
|
// Force encoding on 2 bits by replacing ambiguous nucleotides by 'a's
|
||||||
if (blob1->element_size == 4)
|
if (blob1->element_size == 4)
|
||||||
@ -196,7 +186,47 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
else
|
else
|
||||||
reversed = false;
|
reversed = false;
|
||||||
if (reversed)
|
if (reversed)
|
||||||
switched_seqs = !switched_seqs;
|
// unreverse to make cases simpler. Costly but rare (direct match is reverse primer match)
|
||||||
|
{
|
||||||
|
if (seq2 == NULL)
|
||||||
|
seq2 = obi_blob_to_seq(blob2);
|
||||||
|
seq2 = reverse_complement_sequence(seq2);
|
||||||
|
blob2 = obi_seq_to_blob(seq2);
|
||||||
|
|
||||||
|
if (seq1 == NULL)
|
||||||
|
seq1 = obi_blob_to_seq(blob1);
|
||||||
|
seq1 = reverse_complement_sequence(seq1);
|
||||||
|
blob1 = obi_seq_to_blob(seq1);
|
||||||
|
free_blob1 = true;
|
||||||
|
|
||||||
|
// Get the quality arrays
|
||||||
|
qual1 = obi_get_qual_int_with_elt_idx_and_col_p_in_view(view1, qual_col1, idx1, 0, &qual1_len);
|
||||||
|
if (qual1 == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError getting the quality of the 1st sequence when computing the kmer similarity between two sequences");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
qual2 = obi_get_qual_int_with_elt_idx_and_col_p_in_view(view2, qual_col2, idx2, 0, &qual2_len);
|
||||||
|
if (qual2 == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError getting the quality of the 2nd sequence when computing the kmer similarity between two sequences");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint8_t* newqual1 = malloc(qual1_len*sizeof(uint8_t));
|
||||||
|
uint8_t* newqual2 = malloc(qual2_len*sizeof(uint8_t));
|
||||||
|
|
||||||
|
for (i=0;i<qual1_len;i++)
|
||||||
|
newqual1[i] = qual1[qual1_len-1-i];
|
||||||
|
|
||||||
|
for (i=0;i<qual2_len;i++)
|
||||||
|
newqual2[i] = qual2[qual2_len-1-i];
|
||||||
|
|
||||||
|
qual1 = newqual1;
|
||||||
|
qual2 = newqual2;
|
||||||
|
|
||||||
|
rev_quals = true;
|
||||||
|
}
|
||||||
|
|
||||||
// Save total length for the shift counts array
|
// Save total length for the shift counts array
|
||||||
total_len = len1 + len2 + 1; // +1 for shift 0
|
total_len = len1 + len2 + 1; // +1 for shift 0
|
||||||
@ -237,7 +267,7 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (len1 >= shift_array_height)
|
else if (total_len >= shift_array_height)
|
||||||
{
|
{
|
||||||
shift_array_height = total_len;
|
shift_array_height = total_len;
|
||||||
*shift_array_p = (int32_t*) realloc(*shift_array_p, ARRAY_LENGTH * shift_array_height * sizeof(int32_t));
|
*shift_array_p = (int32_t*) realloc(*shift_array_p, ARRAY_LENGTH * shift_array_height * sizeof(int32_t));
|
||||||
@ -291,7 +321,7 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
*shift_array_height_p = shift_array_height;
|
*shift_array_height_p = shift_array_height;
|
||||||
*shift_count_array_length_p = shift_count_array_length;
|
*shift_count_array_length_p = shift_count_array_length;
|
||||||
|
|
||||||
// Fill array with positions of kmers in the shortest sequence
|
// Fill array with positions of kmers in the first sequence
|
||||||
encoding = blob1->element_size;
|
encoding = blob1->element_size;
|
||||||
kmer_count = len1 - kmer_size + 1;
|
kmer_count = len1 - kmer_size + 1;
|
||||||
for (kmer_idx=0; kmer_idx < kmer_count; kmer_idx++)
|
for (kmer_idx=0; kmer_idx < kmer_count; kmer_idx++)
|
||||||
@ -310,7 +340,7 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
kmer_pos_array[(kmer*kmer_pos_array_height)+i] = kmer_idx;
|
kmer_pos_array[(kmer*kmer_pos_array_height)+i] = kmer_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compare positions of kmers between both sequences and store shifts
|
// Compare positions of kmers between both sequences and store shifts (a shift corresponds to pos2 - pos1)
|
||||||
kmer_count = blob2->length_decoded_value - kmer_size + 1;
|
kmer_count = blob2->length_decoded_value - kmer_size + 1;
|
||||||
for (kmer_idx=0; kmer_idx < kmer_count; kmer_idx++)
|
for (kmer_idx=0; kmer_idx < kmer_count; kmer_idx++)
|
||||||
{
|
{
|
||||||
@ -374,35 +404,42 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
// The 873863 cases of hell
|
// The 873863 cases of hell
|
||||||
if (best_shift > 0)
|
if (best_shift > 0)
|
||||||
{
|
{
|
||||||
|
left_ali = false;
|
||||||
overlap_len = len2 - best_shift;
|
overlap_len = len2 - best_shift;
|
||||||
if (len1 <= overlap_len)
|
if (len1 <= overlap_len)
|
||||||
{
|
{
|
||||||
overlap_len = len1;
|
overlap_len = len1;
|
||||||
if (! switched_seqs)
|
keep_seq2_end = true;
|
||||||
keep_seq2_end = true;
|
|
||||||
else
|
|
||||||
keep_seq2_start = true;
|
|
||||||
}
|
|
||||||
else if (switched_seqs)
|
|
||||||
{
|
|
||||||
keep_seq2_start = true;
|
|
||||||
keep_seq1_end = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (best_shift < 0)
|
else if (best_shift < 0)
|
||||||
{
|
{
|
||||||
|
left_ali = true;
|
||||||
overlap_len = len1 + best_shift;
|
overlap_len = len1 + best_shift;
|
||||||
if (!switched_seqs)
|
if (len2 <= overlap_len)
|
||||||
{
|
{
|
||||||
keep_seq1_start = true;
|
overlap_len = len2;
|
||||||
keep_seq2_end = true;
|
keep_seq1_start = true;
|
||||||
}
|
}
|
||||||
}
|
else
|
||||||
else
|
{
|
||||||
{
|
keep_seq1_start = true;
|
||||||
overlap_len = len1;
|
|
||||||
if ((!switched_seqs) && (len2 > len1))
|
|
||||||
keep_seq2_end = true;
|
keep_seq2_end = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else // if (best_shift == 0)
|
||||||
|
{
|
||||||
|
if (len2 >= len1)
|
||||||
|
{
|
||||||
|
overlap_len = len1;
|
||||||
|
keep_seq2_end = true;
|
||||||
|
left_ali = true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
overlap_len = len2;
|
||||||
|
left_ali = false; // discussable
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ali = (Obi_ali_p) malloc(sizeof(Obi_ali_t));
|
ali = (Obi_ali_p) malloc(sizeof(Obi_ali_t));
|
||||||
@ -433,7 +470,7 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
ali->direction[0] = '\0';
|
ali->direction[0] = '\0';
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (((best_shift <= 0) && (!switched_seqs)) || ((best_shift > 0) && switched_seqs))
|
if (left_ali)
|
||||||
strcpy(ali->direction, "left");
|
strcpy(ali->direction, "left");
|
||||||
else
|
else
|
||||||
strcpy(ali->direction, "right");
|
strcpy(ali->direction, "right");
|
||||||
@ -442,28 +479,28 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
// Build the consensus sequence if asked
|
// Build the consensus sequence if asked
|
||||||
if (build_consensus)
|
if (build_consensus)
|
||||||
{
|
{
|
||||||
// Get the quality arrays
|
if (! rev_quals)
|
||||||
qual1 = obi_get_qual_int_with_elt_idx_and_col_p_in_view(view1, qual_col1, idx1, 0, &qual1_len);
|
|
||||||
if (qual1 == NULL)
|
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError getting the quality of the 1st sequence when computing the kmer similarity between two sequences");
|
// Get the quality arrays
|
||||||
return NULL;
|
qual1 = obi_get_qual_int_with_elt_idx_and_col_p_in_view(view1, qual_col1, idx1, 0, &qual1_len);
|
||||||
}
|
if (qual1 == NULL)
|
||||||
qual2 = obi_get_qual_int_with_elt_idx_and_col_p_in_view(view2, qual_col2, idx2, 0, &qual2_len);
|
{
|
||||||
if (qual2 == NULL)
|
obidebug(1, "\nError getting the quality of the 1st sequence when computing the kmer similarity between two sequences");
|
||||||
{
|
return NULL;
|
||||||
obidebug(1, "\nError getting the quality of the 2nd sequence when computing the kmer similarity between two sequences");
|
}
|
||||||
return NULL;
|
qual2 = obi_get_qual_int_with_elt_idx_and_col_p_in_view(view2, qual_col2, idx2, 0, &qual2_len);
|
||||||
|
if (qual2 == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError getting the quality of the 2nd sequence when computing the kmer similarity between two sequences");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode the first sequence if not already done
|
// Decode the first sequence if not already done
|
||||||
if (seq1 == NULL)
|
if (seq1 == NULL)
|
||||||
seq1 = obi_blob_to_seq(blob1);
|
seq1 = obi_blob_to_seq(blob1);
|
||||||
|
|
||||||
if (! switched_seqs)
|
consensus_len = len2 - best_shift;
|
||||||
consensus_len = len2 - best_shift;
|
|
||||||
else
|
|
||||||
consensus_len = len1 + best_shift;
|
|
||||||
|
|
||||||
// Allocate memory for consensus sequence
|
// Allocate memory for consensus sequence
|
||||||
consensus_seq = (char*) malloc(consensus_len + 1 * sizeof(char)); // TODO keep malloced too maybe
|
consensus_seq = (char*) malloc(consensus_len + 1 * sizeof(char)); // TODO keep malloced too maybe
|
||||||
@ -557,6 +594,12 @@ Obi_ali_p kmer_similarity(Obiview_p view1, OBIDMS_column_p column1, index_t idx1
|
|||||||
free(seq2);
|
free(seq2);
|
||||||
free(blob2);
|
free(blob2);
|
||||||
|
|
||||||
|
if (rev_quals)
|
||||||
|
{
|
||||||
|
free(qual1);
|
||||||
|
free(qual2);
|
||||||
|
}
|
||||||
|
|
||||||
return ali;
|
return ali;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
140
src/obi_clean.c
140
src/obi_clean.c
@ -88,42 +88,42 @@ static int create_output_columns(Obiview_p o_view,
|
|||||||
int sample_count)
|
int sample_count)
|
||||||
{
|
{
|
||||||
// Status column
|
// Status column
|
||||||
if (obi_view_add_column(o_view, CLEAN_STATUS_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, sample_count, (sample_column->header)->elements_names, true, false, false, NULL, NULL, -1, CLEAN_STATUS_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, CLEAN_STATUS_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, sample_count, (sample_column->header)->elements_names, true, true, false, false, NULL, NULL, -1, CLEAN_STATUS_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", CLEAN_STATUS_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", CLEAN_STATUS_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Head column
|
// Head column
|
||||||
if (obi_view_add_column(o_view, CLEAN_HEAD_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_HEAD_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, CLEAN_HEAD_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_HEAD_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", CLEAN_HEAD_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", CLEAN_HEAD_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sample count column
|
// Sample count column
|
||||||
if (obi_view_add_column(o_view, CLEAN_SAMPLECOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_SAMPLECOUNT_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, CLEAN_SAMPLECOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_SAMPLECOUNT_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", CLEAN_SAMPLECOUNT_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", CLEAN_SAMPLECOUNT_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Head count column
|
// Head count column
|
||||||
if (obi_view_add_column(o_view, CLEAN_HEADCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_HEADCOUNT_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, CLEAN_HEADCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_HEADCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", CLEAN_HEADCOUNT_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", CLEAN_HEADCOUNT_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Internal count column
|
// Internal count column
|
||||||
if (obi_view_add_column(o_view, CLEAN_INTERNALCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_INTERNALCOUNT_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, CLEAN_INTERNALCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_INTERNALCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", CLEAN_INTERNALCOUNT_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", CLEAN_INTERNALCOUNT_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Singleton count column
|
// Singleton count column
|
||||||
if (obi_view_add_column(o_view, CLEAN_SINGLETONCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_SINGLETONCOUNT_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, CLEAN_SINGLETONCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_SINGLETONCOUNT_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", CLEAN_SINGLETONCOUNT_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", CLEAN_SINGLETONCOUNT_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
@ -229,6 +229,8 @@ int obi_clean(const char* dms_name,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
seq_count = (i_view->infos)->line_count;
|
||||||
|
|
||||||
// Open the sequence column
|
// Open the sequence column
|
||||||
if (strcmp((i_view->infos)->view_type, VIEW_TYPE_NUC_SEQS) == 0)
|
if (strcmp((i_view->infos)->view_type, VIEW_TYPE_NUC_SEQS) == 0)
|
||||||
iseq_column = obi_view_get_column(i_view, NUC_SEQUENCE_COLUMN);
|
iseq_column = obi_view_get_column(i_view, NUC_SEQUENCE_COLUMN);
|
||||||
@ -245,7 +247,7 @@ int obi_clean(const char* dms_name,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open the sample column if there is one
|
// Open the sample column if there is one
|
||||||
if ((strcmp(sample_column_name, "") == 0) || (sample_column_name == NULL))
|
if ((strcmp(sample_column_name, "") == 0) || (sample_column_name == NULL) || (seq_count == 0))
|
||||||
{
|
{
|
||||||
fprintf(stderr, "Info: No sample information provided, assuming one sample.\n");
|
fprintf(stderr, "Info: No sample information provided, assuming one sample.\n");
|
||||||
sample_column = obi_view_get_column(i_view, COUNT_COLUMN);
|
sample_column = obi_view_get_column(i_view, COUNT_COLUMN);
|
||||||
@ -340,66 +342,67 @@ int obi_clean(const char* dms_name,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build kmer tables
|
if (seq_count > 0)
|
||||||
ktable = hash_seq_column(i_view, iseq_column, 0);
|
|
||||||
if (ktable == NULL)
|
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_CLEAN_ERROR);
|
// Build kmer tables
|
||||||
obidebug(1, "\nError building kmer tables before aligning");
|
ktable = hash_seq_column(i_view, iseq_column, 0);
|
||||||
return -1;
|
if (ktable == NULL)
|
||||||
}
|
{
|
||||||
|
obi_set_errno(OBI_CLEAN_ERROR);
|
||||||
|
obidebug(1, "\nError building kmer tables before aligning");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
seq_count = (i_view->infos)->line_count;
|
// Allocate arrays for sample counts otherwise reading in mapped files takes longer
|
||||||
|
complete_sample_count_array = (int*) malloc(seq_count * sample_count * sizeof(int));
|
||||||
// Allocate arrays for sample counts otherwise reading in mapped files takes longer
|
if (complete_sample_count_array == NULL)
|
||||||
complete_sample_count_array = (int*) malloc(seq_count * sample_count * sizeof(int));
|
{
|
||||||
if (complete_sample_count_array == NULL)
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
{
|
obidebug(1, "\nError allocating memory for the array of sample counts, size: %lld", seq_count * sample_count * sizeof(int));
|
||||||
obi_set_errno(OBI_MALLOC_ERROR);
|
return -1;
|
||||||
obidebug(1, "\nError allocating memory for the array of sample counts, size: %lld", seq_count * sample_count * sizeof(int));
|
}
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
for (samp=0; samp < sample_count; samp++)
|
|
||||||
{
|
|
||||||
for (k=0; k<seq_count; k++)
|
|
||||||
complete_sample_count_array[k+(samp*seq_count)] = obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate arrays for blobs otherwise reading in mapped files takes longer
|
|
||||||
blob_array = (Obi_blob_p*) malloc(seq_count * sizeof(Obi_blob_p));
|
|
||||||
if (blob_array == NULL)
|
|
||||||
{
|
|
||||||
obi_set_errno(OBI_MALLOC_ERROR);
|
|
||||||
obidebug(1, "\nError allocating memory for the array of blobs");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
for (k=0; k<seq_count; k++)
|
|
||||||
{
|
|
||||||
blob_array[k] = obi_get_blob_with_elt_idx_and_col_p_in_view(i_view, iseq_column, k, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate alignment result array (byte at 0 if not aligned yet,
|
|
||||||
// 1 if sequence at index has a similarity above the threshold with the current sequence,
|
|
||||||
// 2 if sequence at index has a similarity below the threshold with the current sequence)
|
|
||||||
alignment_result_array = (byte_t*) calloc(seq_count, sizeof(byte_t));
|
|
||||||
if (alignment_result_array == NULL)
|
|
||||||
{
|
|
||||||
obi_set_errno(OBI_MALLOC_ERROR);
|
|
||||||
obidebug(1, "\nError allocating memory for alignment result array");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize all sequences to singletons or NA if no sequences in that sample
|
|
||||||
for (k=0; k<seq_count; k++)
|
|
||||||
{
|
|
||||||
for (samp=0; samp < sample_count; samp++)
|
for (samp=0; samp < sample_count; samp++)
|
||||||
{
|
{
|
||||||
if (obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp) != OBIInt_NA) // Only initialize samples where there are some sequences
|
for (k=0; k<seq_count; k++)
|
||||||
|
complete_sample_count_array[k+(samp*seq_count)] = obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate arrays for blobs otherwise reading in mapped files takes longer
|
||||||
|
blob_array = (Obi_blob_p*) malloc(seq_count * sizeof(Obi_blob_p));
|
||||||
|
if (blob_array == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError allocating memory for the array of blobs");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
for (k=0; k<seq_count; k++)
|
||||||
|
{
|
||||||
|
blob_array[k] = obi_get_blob_with_elt_idx_and_col_p_in_view(i_view, iseq_column, k, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate alignment result array (byte at 0 if not aligned yet,
|
||||||
|
// 1 if sequence at index has a similarity above the threshold with the current sequence,
|
||||||
|
// 2 if sequence at index has a similarity below the threshold with the current sequence)
|
||||||
|
alignment_result_array = (byte_t*) calloc(seq_count, sizeof(byte_t));
|
||||||
|
if (alignment_result_array == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError allocating memory for alignment result array");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize all sequences to singletons or NA if no sequences in that sample
|
||||||
|
for (k=0; k<seq_count; k++)
|
||||||
|
{
|
||||||
|
for (samp=0; samp < sample_count; samp++)
|
||||||
{
|
{
|
||||||
if (obi_set_char_with_elt_idx_and_col_p_in_view(o_view, status_column, k, samp, 's') < 0)
|
if (obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp) != OBIInt_NA) // Only initialize samples where there are some sequences
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError initializing all sequences to singletons");
|
if (obi_set_char_with_elt_idx_and_col_p_in_view(o_view, status_column, k, samp, 's') < 0)
|
||||||
return -1;
|
{
|
||||||
|
obidebug(1, "\nError initializing all sequences to singletons");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -551,17 +554,20 @@ int obi_clean(const char* dms_name,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
free_kmer_tables(ktable, seq_count);
|
if (seq_count > 0)
|
||||||
free(complete_sample_count_array);
|
{
|
||||||
free(blob_array);
|
free_kmer_tables(ktable, seq_count);
|
||||||
free(alignment_result_array);
|
free(complete_sample_count_array);
|
||||||
|
free(blob_array);
|
||||||
|
free(alignment_result_array);
|
||||||
|
}
|
||||||
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
if (stop)
|
if (stop)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (heads_only)
|
if (heads_only && (seq_count > 0))
|
||||||
{
|
{
|
||||||
line_selection = malloc((((o_view->infos)->line_count) + 1) * sizeof(index_t));
|
line_selection = malloc((((o_view->infos)->line_count) + 1) * sizeof(index_t));
|
||||||
if (line_selection == NULL)
|
if (line_selection == NULL)
|
||||||
@ -635,7 +641,7 @@ int obi_clean(const char* dms_name,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Flag the end of the line selection
|
// Flag the end of the line selection
|
||||||
if (heads_only)
|
if (heads_only && (seq_count > 0))
|
||||||
line_selection[l] = -1;
|
line_selection[l] = -1;
|
||||||
|
|
||||||
// Create new view with line selection if heads only
|
// Create new view with line selection if heads only
|
||||||
|
@ -150,49 +150,49 @@ static int print_seq(Obiview_p i_view, Obiview_p o_view,
|
|||||||
static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
||||||
{
|
{
|
||||||
// Original length column
|
// Original length column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_SEQLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SEQLEN_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_SEQLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SEQLEN_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_SEQLEN_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_SEQLEN_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Amplicon length column
|
// Amplicon length column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_AMPLICONLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_AMPLICONLEN_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_AMPLICONLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_AMPLICONLEN_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_AMPLICONLEN_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_AMPLICONLEN_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Taxid column
|
// Taxid column
|
||||||
if (obi_view_add_column(o_view, TAXID_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, TAXID_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, TAXID_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, TAXID_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", TAXID_COLUMN);
|
obidebug(1, "\nError creating the %s column", TAXID_COLUMN);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Taxonomic rank column
|
// Taxonomic rank column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_RANK_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_RANK_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_RANK_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_RANK_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_RANK_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_RANK_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Species taxid column
|
// Species taxid column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_SPECIES_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_TAXID_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_SPECIES_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_TAXID_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_TAXID_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Genus taxid column
|
// Genus taxid column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_GENUS_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_TAXID_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_GENUS_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_TAXID_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_TAXID_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Family taxid column
|
// Family taxid column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_FAMILY_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_TAXID_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_FAMILY_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_TAXID_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_TAXID_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
@ -201,7 +201,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
|||||||
if (kingdom_mode)
|
if (kingdom_mode)
|
||||||
{
|
{
|
||||||
// Kingdom taxid column
|
// Kingdom taxid column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_TAXID_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_TAXID_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
@ -210,7 +210,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Superkingdom taxid column
|
// Superkingdom taxid column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
@ -218,28 +218,28 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Scientific name column
|
// Scientific name column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SCIENTIFIC_NAME_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SCIENTIFIC_NAME_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Species name column
|
// Species name column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_SPECIES_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_NAME_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_SPECIES_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_NAME_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_NAME_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_NAME_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Genus name column
|
// Genus name column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_GENUS_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_NAME_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_GENUS_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_NAME_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_NAME_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_NAME_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Family name column
|
// Family name column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_FAMILY_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_NAME_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_FAMILY_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_NAME_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_NAME_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_NAME_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
@ -248,7 +248,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
|||||||
if (kingdom_mode)
|
if (kingdom_mode)
|
||||||
{
|
{
|
||||||
// Kingdom name column
|
// Kingdom name column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_NAME_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_NAME_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
@ -257,7 +257,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Superkingdom name column
|
// Superkingdom name column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
@ -265,49 +265,49 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Strand column
|
// Strand column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_STRAND_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_STRAND_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_STRAND_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_STRAND_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_STRAND_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_STRAND_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Primer 1 column
|
// Primer 1 column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_PRIMER1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER1_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_PRIMER1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER1_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER1_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER1_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Primer 2 column
|
// Primer 2 column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_PRIMER2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER2_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_PRIMER2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER2_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER2_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER2_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error 1 column
|
// Error 1 column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_ERROR1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_ERROR1_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_ERROR1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_ERROR1_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_ERROR1_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_ERROR1_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error 2 column
|
// Error 2 column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_ERROR2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_ERROR2_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_ERROR2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_ERROR2_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_ERROR2_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_ERROR2_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temperature 1 column
|
// Temperature 1 column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_TEMP1_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_TEMP1_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_TEMP1_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_TEMP1_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_TEMP1_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_TEMP1_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temperature 2 column
|
// Temperature 2 column
|
||||||
if (obi_view_add_column(o_view, ECOPCR_TEMP2_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_TEMP2_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(o_view, ECOPCR_TEMP2_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_TEMP2_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the %s column", ECOPCR_TEMP2_COLUMN_NAME);
|
obidebug(1, "\nError creating the %s column", ECOPCR_TEMP2_COLUMN_NAME);
|
||||||
return -1;
|
return -1;
|
||||||
@ -645,7 +645,8 @@ static int print_seq(Obiview_p i_view, Obiview_p o_view,
|
|||||||
|
|
||||||
int obi_ecopcr(const char* i_dms_name,
|
int obi_ecopcr(const char* i_dms_name,
|
||||||
const char* i_view_name,
|
const char* i_view_name,
|
||||||
const char* taxonomy_name, // TODO discuss that input dms assumed
|
const char* tax_dms_name,
|
||||||
|
const char* taxonomy_name,
|
||||||
const char* o_dms_name,
|
const char* o_dms_name,
|
||||||
const char* o_view_name,
|
const char* o_view_name,
|
||||||
const char* o_view_comments,
|
const char* o_view_comments,
|
||||||
@ -678,6 +679,7 @@ int obi_ecopcr(const char* i_dms_name,
|
|||||||
|
|
||||||
OBIDMS_p i_dms = NULL;
|
OBIDMS_p i_dms = NULL;
|
||||||
OBIDMS_p o_dms = NULL;
|
OBIDMS_p o_dms = NULL;
|
||||||
|
OBIDMS_p tax_dms = NULL;
|
||||||
OBIDMS_taxonomy_p taxonomy = NULL;
|
OBIDMS_taxonomy_p taxonomy = NULL;
|
||||||
Obiview_p i_view = NULL;
|
Obiview_p i_view = NULL;
|
||||||
Obiview_p o_view = NULL;
|
Obiview_p o_view = NULL;
|
||||||
@ -965,8 +967,16 @@ int obi_ecopcr(const char* i_dms_name,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Open taxonomy DMS
|
||||||
|
tax_dms = obi_open_dms(tax_dms_name, false);
|
||||||
|
if (tax_dms == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError opening the taxonomy DMS");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Open the taxonomy
|
// Open the taxonomy
|
||||||
taxonomy = obi_read_taxonomy(i_dms, taxonomy_name, false);
|
taxonomy = obi_read_taxonomy(tax_dms, taxonomy_name, false);
|
||||||
if (taxonomy == NULL)
|
if (taxonomy == NULL)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError opening the taxonomy");
|
obidebug(1, "\nError opening the taxonomy");
|
||||||
|
@ -77,7 +77,8 @@
|
|||||||
*
|
*
|
||||||
* @param i_dms_name The path to the input DMS.
|
* @param i_dms_name The path to the input DMS.
|
||||||
* @param i_view_name The name of the input view.
|
* @param i_view_name The name of the input view.
|
||||||
* @param taxonomy_name The name of the taxonomy in the input DMS.
|
* @param tax_dms_name The path to the DMS containing the taxonomy.
|
||||||
|
* @param taxonomy_name The name of the taxonomy.
|
||||||
* @param o_dms_name The path to the output DMS.
|
* @param o_dms_name The path to the output DMS.
|
||||||
* @param o_view_name The name of the output view.
|
* @param o_view_name The name of the output view.
|
||||||
* @param o_view_comments The comments to associate with the output view.
|
* @param o_view_comments The comments to associate with the output view.
|
||||||
@ -106,6 +107,7 @@
|
|||||||
*/
|
*/
|
||||||
int obi_ecopcr(const char* i_dms_name,
|
int obi_ecopcr(const char* i_dms_name,
|
||||||
const char* i_view_name,
|
const char* i_view_name,
|
||||||
|
const char* tax_dms_name,
|
||||||
const char* taxonomy_name,
|
const char* taxonomy_name,
|
||||||
const char* o_dms_name,
|
const char* o_dms_name,
|
||||||
const char* o_view_name,
|
const char* o_view_name,
|
||||||
|
@ -104,42 +104,42 @@ int print_assignment_result(Obiview_p output_view, index_t line,
|
|||||||
static int create_output_columns(Obiview_p o_view)
|
static int create_output_columns(Obiview_p o_view)
|
||||||
{
|
{
|
||||||
// Score column
|
// Score column
|
||||||
if (obi_view_add_column(o_view, ECOTAG_SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(o_view, ECOTAG_SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the score in ecotag");
|
obidebug(1, "\nError creating the column for the score in ecotag");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assigned taxid column
|
// Assigned taxid column
|
||||||
if (obi_view_add_column(o_view, ECOTAG_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(o_view, ECOTAG_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the assigned taxid in ecotag");
|
obidebug(1, "\nError creating the column for the assigned taxid in ecotag");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assigned scientific name column
|
// Assigned scientific name column
|
||||||
if (obi_view_add_column(o_view, ECOTAG_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(o_view, ECOTAG_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the assigned scientific name in ecotag");
|
obidebug(1, "\nError creating the column for the assigned scientific name in ecotag");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assignement status column
|
// Assignement status column
|
||||||
if (obi_view_add_column(o_view, ECOTAG_STATUS_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(o_view, ECOTAG_STATUS_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the assignment status in ecotag");
|
obidebug(1, "\nError creating the column for the assignment status in ecotag");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Column for array of best match ids
|
// Column for array of best match ids
|
||||||
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, true, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, true, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the array of ids of best matches in ecotag");
|
obidebug(1, "\nError creating the column for the array of ids of best matches in ecotag");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Column for array of best match taxids
|
// Column for array of best match taxids
|
||||||
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_TAXIDS_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, true, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_TAXIDS_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, true, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the array of taxids of best matches in ecotag");
|
obidebug(1, "\nError creating the column for the array of taxids of best matches in ecotag");
|
||||||
return -1;
|
return -1;
|
||||||
@ -218,7 +218,8 @@ int obi_ecotag(const char* dms_name,
|
|||||||
const char* taxonomy_name,
|
const char* taxonomy_name,
|
||||||
const char* output_view_name,
|
const char* output_view_name,
|
||||||
const char* output_view_comments,
|
const char* output_view_comments,
|
||||||
double ecotag_threshold) // TODO different threshold for the similarity sphere around ref seqs
|
double ecotag_threshold,
|
||||||
|
double bubble_threshold)
|
||||||
{
|
{
|
||||||
|
|
||||||
// For each sequence
|
// For each sequence
|
||||||
@ -232,13 +233,14 @@ int obi_ecotag(const char* dms_name,
|
|||||||
// Write result (max score, threshold, LCA assigned, list of the ids of the best matches)
|
// Write result (max score, threshold, LCA assigned, list of the ids of the best matches)
|
||||||
|
|
||||||
|
|
||||||
index_t i, j, k;
|
index_t i, j, k, t;
|
||||||
ecotx_t* lca;
|
ecotx_t* lca;
|
||||||
ecotx_t* lca_in_array;
|
ecotx_t* lca_in_array;
|
||||||
ecotx_t* best_match;
|
ecotx_t* best_match;
|
||||||
index_t query_seq_idx, ref_seq_idx;
|
index_t query_seq_idx, ref_seq_idx;
|
||||||
double score, best_score;
|
double score, best_score;
|
||||||
double threshold;
|
double threshold;
|
||||||
|
double lca_threshold;
|
||||||
int lcs_length;
|
int lcs_length;
|
||||||
int ali_length;
|
int ali_length;
|
||||||
Kmer_table_p ktable;
|
Kmer_table_p ktable;
|
||||||
@ -257,16 +259,20 @@ int obi_ecotag(const char* dms_name,
|
|||||||
int32_t* best_match_taxids;
|
int32_t* best_match_taxids;
|
||||||
int32_t* best_match_taxids_to_store;
|
int32_t* best_match_taxids_to_store;
|
||||||
int best_match_count;
|
int best_match_count;
|
||||||
|
int best_match_taxid_count;
|
||||||
int buffer_size;
|
int buffer_size;
|
||||||
int best_match_ids_buffer_size;
|
int best_match_ids_buffer_size;
|
||||||
index_t best_match_idx;
|
index_t best_match_idx;
|
||||||
int32_t lca_array_length;
|
int32_t lca_array_length;
|
||||||
int32_t lca_taxid;
|
int32_t lca_taxid;
|
||||||
int32_t taxid_best_match;
|
int32_t taxid_best_match;
|
||||||
|
int32_t taxid;
|
||||||
|
int32_t taxid_to_store;
|
||||||
bool assigned;
|
bool assigned;
|
||||||
const char* lca_name;
|
const char* lca_name;
|
||||||
const char* id;
|
const char* id;
|
||||||
int id_len;
|
int id_len;
|
||||||
|
bool already_in;
|
||||||
|
|
||||||
OBIDMS_p dms = NULL;
|
OBIDMS_p dms = NULL;
|
||||||
OBIDMS_p ref_dms = NULL;
|
OBIDMS_p ref_dms = NULL;
|
||||||
@ -389,10 +395,10 @@ int obi_ecotag(const char* dms_name,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
free(db_threshold_str);
|
free(db_threshold_str);
|
||||||
if (ecotag_threshold < db_threshold)
|
if (bubble_threshold < db_threshold)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\nError: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).\n\n",
|
fprintf(stderr, "\nError: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).\n\n",
|
||||||
ecotag_threshold, db_threshold);
|
bubble_threshold, db_threshold);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -486,10 +492,11 @@ int obi_ecotag(const char* dms_name,
|
|||||||
|
|
||||||
for (i=0; i < query_count; i++)
|
for (i=0; i < query_count; i++)
|
||||||
{
|
{
|
||||||
if (i%1000 == 0)
|
if (i%10 == 0)
|
||||||
fprintf(stderr,"\rDone : %f %% ", (i / (float) query_count)*100);
|
fprintf(stderr,"\rDone : %f %% ", (i / (float) query_count)*100);
|
||||||
|
|
||||||
best_match_count = 0;
|
best_match_count = 0;
|
||||||
|
best_match_taxid_count = 0;
|
||||||
best_match_ids_length = 0;
|
best_match_ids_length = 0;
|
||||||
threshold = ecotag_threshold;
|
threshold = ecotag_threshold;
|
||||||
best_score = 0.0;
|
best_score = 0.0;
|
||||||
@ -541,6 +548,7 @@ int obi_ecotag(const char* dms_name,
|
|||||||
// Reset the array with that match
|
// Reset the array with that match
|
||||||
best_match_ids_length = 0;
|
best_match_ids_length = 0;
|
||||||
best_match_count = 0;
|
best_match_count = 0;
|
||||||
|
best_match_taxid_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store in best match array
|
// Store in best match array
|
||||||
@ -583,8 +591,27 @@ int obi_ecotag(const char* dms_name,
|
|||||||
|
|
||||||
// Save match
|
// Save match
|
||||||
best_match_array[best_match_count] = j;
|
best_match_array[best_match_count] = j;
|
||||||
best_match_taxids[best_match_count] = obi_get_int_with_elt_idx_and_col_p_in_view(ref_view, ref_taxid_column, j, 0);
|
|
||||||
best_match_count++;
|
best_match_count++;
|
||||||
|
|
||||||
|
// Save best match taxid only if not already in array
|
||||||
|
taxid_to_store = obi_get_int_with_elt_idx_and_col_p_in_view(ref_view, ref_taxid_column, j, 0);
|
||||||
|
already_in = false;
|
||||||
|
for (t=0; t<best_match_taxid_count; t++)
|
||||||
|
{
|
||||||
|
taxid = best_match_taxids[t];
|
||||||
|
//fprintf(stderr, "\ntaxid %d, taxid_to_store %d\n", taxid, taxid_to_store);
|
||||||
|
if (taxid == taxid_to_store)
|
||||||
|
{
|
||||||
|
already_in = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (! already_in)
|
||||||
|
{
|
||||||
|
best_match_taxids[best_match_taxid_count] = taxid_to_store;
|
||||||
|
best_match_taxid_count++;
|
||||||
|
}
|
||||||
|
|
||||||
strcpy(best_match_ids+best_match_ids_length, id);
|
strcpy(best_match_ids+best_match_ids_length, id);
|
||||||
best_match_ids_length = best_match_ids_length + id_len + 1;
|
best_match_ids_length = best_match_ids_length + id_len + 1;
|
||||||
}
|
}
|
||||||
@ -597,11 +624,16 @@ int obi_ecotag(const char* dms_name,
|
|||||||
{
|
{
|
||||||
best_match_idx = best_match_array[j];
|
best_match_idx = best_match_array[j];
|
||||||
|
|
||||||
// Find the LCA for the chosen threshold
|
// Find the LCA for the highest threshold between best_score and the chosen bubble threshold
|
||||||
score_array = obi_get_array_with_col_p_in_view(ref_view, score_a_column, best_match_idx, &lca_array_length);
|
score_array = obi_get_array_with_col_p_in_view(ref_view, score_a_column, best_match_idx, &lca_array_length);
|
||||||
|
|
||||||
|
if (bubble_threshold < best_score)
|
||||||
|
lca_threshold = best_score;
|
||||||
|
else
|
||||||
|
lca_threshold = bubble_threshold;
|
||||||
|
|
||||||
k = 0;
|
k = 0;
|
||||||
while ((k < lca_array_length) && (score_array[k] >= best_score))
|
while ((k < lca_array_length) && (score_array[k] >= lca_threshold))
|
||||||
k++;
|
k++;
|
||||||
|
|
||||||
if (k>0)
|
if (k>0)
|
||||||
@ -686,7 +718,7 @@ int obi_ecotag(const char* dms_name,
|
|||||||
assigned_name_column, lca_name,
|
assigned_name_column, lca_name,
|
||||||
assigned_status_column, assigned,
|
assigned_status_column, assigned,
|
||||||
best_match_ids_column, best_match_ids_to_store, best_match_ids_length,
|
best_match_ids_column, best_match_ids_to_store, best_match_ids_length,
|
||||||
best_match_taxids_column, best_match_taxids_to_store, best_match_count,
|
best_match_taxids_column, best_match_taxids_to_store, best_match_taxid_count,
|
||||||
score_column, best_score
|
score_column, best_score
|
||||||
) < 0)
|
) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -42,12 +42,14 @@
|
|||||||
* @param output_view_name The name to give to the output view.
|
* @param output_view_name The name to give to the output view.
|
||||||
* @param output_view_comments The comments to associate to the output view.
|
* @param output_view_comments The comments to associate to the output view.
|
||||||
* @param ecotag_threshold The threshold at which to assign.
|
* @param ecotag_threshold The threshold at which to assign.
|
||||||
|
* @param bubble_threshold The threshold at which to look for an LCA (i.e. minimum identity considered for the assignment circle);
|
||||||
|
* the threshold actually used will be the highest between this value and the best assignment score found.
|
||||||
*
|
*
|
||||||
* The algorithm works like this:
|
* The algorithm works like this:
|
||||||
* For each query sequence:
|
* For each query sequence:
|
||||||
* Align with reference database
|
* Align with reference database
|
||||||
* Keep the indices of all the best matches
|
* Keep the indices of all the best matches
|
||||||
* For each kept index, get the LCA at that threshold as stored in the reference database, then the LCA of those LCAs
|
* For each kept index, get the LCA at the highest threshold between bubble_threshold and the best assignment score found (as stored in the reference database), then the LCA of those LCAs
|
||||||
* Write result (max score, threshold, taxid and scientific name of the LCA assigned, list of the ids of the best matches)
|
* Write result (max score, threshold, taxid and scientific name of the LCA assigned, list of the ids of the best matches)
|
||||||
*
|
*
|
||||||
* @returns A value indicating the success of the operation.
|
* @returns A value indicating the success of the operation.
|
||||||
@ -65,7 +67,8 @@ int obi_ecotag(const char* dms_name,
|
|||||||
const char* taxonomy_name,
|
const char* taxonomy_name,
|
||||||
const char* output_view_name,
|
const char* output_view_name,
|
||||||
const char* output_view_comments,
|
const char* output_view_comments,
|
||||||
double ecotag_threshold);
|
double ecotag_threshold,
|
||||||
|
double bubble_threshold);
|
||||||
|
|
||||||
|
|
||||||
#endif /* OBI_ECOTAG_H_ */
|
#endif /* OBI_ECOTAG_H_ */
|
||||||
|
@ -155,35 +155,35 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
|||||||
bool normalize, int reference, bool similarity_mode)
|
bool normalize, int reference, bool similarity_mode)
|
||||||
{
|
{
|
||||||
// Create the column for the ids of the 1st sequence aligned
|
// Create the column for the ids of the 1st sequence aligned
|
||||||
if (obi_view_add_column(output_view, ID1_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, id1_indexer_name, NULL, -1, ID1_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, ID1_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, id1_indexer_name, NULL, -1, ID1_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the first column for the sequence ids when aligning");
|
obidebug(1, "\nError creating the first column for the sequence ids when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the column for the ids of the 2nd sequence aligned
|
// Create the column for the ids of the 2nd sequence aligned
|
||||||
if (obi_view_add_column(output_view, ID2_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, id2_indexer_name, NULL, -1, ID2_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, ID2_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, id2_indexer_name, NULL, -1, ID2_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the second column for the sequence ids when aligning");
|
obidebug(1, "\nError creating the second column for the sequence ids when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the column for the index (in the input view) of the first sequences aligned
|
// Create the column for the index (in the input view) of the first sequences aligned
|
||||||
if (obi_view_add_column(output_view, IDX1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, IDX1_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, IDX1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, IDX1_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the first column for the sequence indices when aligning");
|
obidebug(1, "\nError creating the first column for the sequence indices when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the column for the index (in the input view) of the second sequences aligned
|
// Create the column for the index (in the input view) of the second sequences aligned
|
||||||
if (obi_view_add_column(output_view, IDX2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, IDX2_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, IDX2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, IDX2_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the second column for the sequence indices when aligning");
|
obidebug(1, "\nError creating the second column for the sequence indices when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the column for the LCS length
|
// Create the column for the LCS length
|
||||||
if (obi_view_add_column(output_view, LCS_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, LCS_LENGTH_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, LCS_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, LCS_LENGTH_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the LCS length when aligning");
|
obidebug(1, "\nError creating the column for the LCS length when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
@ -192,7 +192,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
|||||||
// Create the column for the alignment length if it is computed
|
// Create the column for the alignment length if it is computed
|
||||||
if ((reference == ALILEN) && (normalize || !similarity_mode))
|
if ((reference == ALILEN) && (normalize || !similarity_mode))
|
||||||
{
|
{
|
||||||
if (obi_view_add_column(output_view, ALI_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ALI_LENGTH_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, ALI_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ALI_LENGTH_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the alignment length when aligning");
|
obidebug(1, "\nError creating the column for the alignment length when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
@ -201,7 +201,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
|||||||
// Create the column for the alignment score
|
// Create the column for the alignment score
|
||||||
if (normalize)
|
if (normalize)
|
||||||
{
|
{
|
||||||
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
|
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the score when aligning");
|
obidebug(1, "\nError creating the column for the score when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
@ -209,7 +209,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
|
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the column for the score when aligning");
|
obidebug(1, "\nError creating the column for the score when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
@ -219,14 +219,14 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
|||||||
if (print_seq)
|
if (print_seq)
|
||||||
{
|
{
|
||||||
// Create the column for the first sequences aligned
|
// Create the column for the first sequences aligned
|
||||||
if (obi_view_add_column(output_view, SEQ1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, seq1_indexer_name, NULL, -1, SEQ1_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, SEQ1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, seq1_indexer_name, NULL, -1, SEQ1_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the first column for the sequences when aligning");
|
obidebug(1, "\nError creating the first column for the sequences when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the column for the second sequences aligned
|
// Create the column for the second sequences aligned
|
||||||
if (obi_view_add_column(output_view, SEQ2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, seq2_indexer_name, NULL, -1, SEQ2_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, SEQ2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, seq2_indexer_name, NULL, -1, SEQ2_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the second column for the sequences when aligning");
|
obidebug(1, "\nError creating the second column for the sequences when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
@ -235,14 +235,14 @@ static int create_alignment_output_columns(Obiview_p output_view,
|
|||||||
if (print_count)
|
if (print_count)
|
||||||
{
|
{
|
||||||
// Create the column for the count of the first sequences aligned
|
// Create the column for the count of the first sequences aligned
|
||||||
if (obi_view_add_column(output_view, COUNT1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, COUNT1_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, COUNT1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, COUNT1_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the first column for the sequence counts when aligning");
|
obidebug(1, "\nError creating the first column for the sequence counts when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the column for the count of the second sequences aligned
|
// Create the column for the count of the second sequences aligned
|
||||||
if (obi_view_add_column(output_view, COUNT2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, COUNT2_COLUMN_COMMENTS, true) < 0)
|
if (obi_view_add_column(output_view, COUNT2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, COUNT2_COLUMN_COMMENTS, true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating the second column for the sequence counts when aligning");
|
obidebug(1, "\nError creating the second column for the sequence counts when aligning");
|
||||||
return -1;
|
return -1;
|
||||||
|
117
src/obiavl.c
117
src/obiavl.c
@ -582,6 +582,7 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
|||||||
{
|
{
|
||||||
size_t file_size;
|
size_t file_size;
|
||||||
size_t new_data_size;
|
size_t new_data_size;
|
||||||
|
size_t header_size;
|
||||||
double multiple;
|
double multiple;
|
||||||
int file_descriptor;
|
int file_descriptor;
|
||||||
|
|
||||||
@ -589,6 +590,8 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
|||||||
multiple = ceil((double) (ONE_IF_ZERO((avl->header)->nb_items * sizeof(AVL_node_t))) / (double) getpagesize());
|
multiple = ceil((double) (ONE_IF_ZERO((avl->header)->nb_items * sizeof(AVL_node_t))) / (double) getpagesize());
|
||||||
new_data_size = ((size_t) multiple) * getpagesize();
|
new_data_size = ((size_t) multiple) * getpagesize();
|
||||||
|
|
||||||
|
header_size = (avl->header)->header_size;
|
||||||
|
|
||||||
// Check that it is actually greater than the current size of the file, otherwise no need to truncate
|
// Check that it is actually greater than the current size of the file, otherwise no need to truncate
|
||||||
if ((avl->header)->avl_size == new_data_size)
|
if ((avl->header)->avl_size == new_data_size)
|
||||||
return 0;
|
return 0;
|
||||||
@ -596,16 +599,22 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
|||||||
// Get the file descriptor
|
// Get the file descriptor
|
||||||
file_descriptor = avl->avl_fd;
|
file_descriptor = avl->avl_fd;
|
||||||
|
|
||||||
// Unmap the tree before truncating the file
|
// Unmap the entire file before truncating it (WSL requirement)
|
||||||
if (munmap(avl->tree, (avl->header)->avl_size) < 0)
|
if (munmap(avl->tree, (avl->header)->avl_size) < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_AVL_ERROR);
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
obidebug(1, "\nError munmapping the tree of an AVL before truncating");
|
obidebug(1, "\nError munmapping the tree of an AVL before truncating");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (munmap(avl->header, header_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping the tree of an AVL before truncating");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Truncate the file
|
// Truncate the file
|
||||||
file_size = (avl->header)->header_size + new_data_size;
|
file_size = header_size + new_data_size;
|
||||||
if (ftruncate(file_descriptor, file_size) < 0)
|
if (ftruncate(file_descriptor, file_size) < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_AVL_ERROR);
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
@ -613,7 +622,22 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remap the data
|
// Remap the header and the data
|
||||||
|
|
||||||
|
avl->header = mmap(NULL,
|
||||||
|
header_size,
|
||||||
|
PROT_READ | PROT_WRITE,
|
||||||
|
MAP_SHARED,
|
||||||
|
file_descriptor,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
if (avl->header == MAP_FAILED)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
obidebug(1, "\nError re-mmapping the header of an AVL after truncating");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
avl->tree = mmap(NULL,
|
avl->tree = mmap(NULL,
|
||||||
new_data_size,
|
new_data_size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
@ -640,6 +664,7 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
|||||||
{
|
{
|
||||||
size_t file_size;
|
size_t file_size;
|
||||||
index_t new_data_size;
|
index_t new_data_size;
|
||||||
|
size_t header_size;
|
||||||
double multiple;
|
double multiple;
|
||||||
int file_descriptor;
|
int file_descriptor;
|
||||||
|
|
||||||
@ -647,6 +672,8 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
|||||||
multiple = ceil((double) (ONE_IF_ZERO((avl_data->header)->data_size_used)) / (double) getpagesize());
|
multiple = ceil((double) (ONE_IF_ZERO((avl_data->header)->data_size_used)) / (double) getpagesize());
|
||||||
new_data_size = ((index_t) multiple) * getpagesize();
|
new_data_size = ((index_t) multiple) * getpagesize();
|
||||||
|
|
||||||
|
header_size = (avl_data->header)->header_size;
|
||||||
|
|
||||||
// Check that it is actually greater than the current size of the file, otherwise no need to truncate
|
// Check that it is actually greater than the current size of the file, otherwise no need to truncate
|
||||||
if ((avl_data->header)->data_size_max >= new_data_size)
|
if ((avl_data->header)->data_size_max >= new_data_size)
|
||||||
return 0;
|
return 0;
|
||||||
@ -654,7 +681,8 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
|||||||
// Get the file descriptor
|
// Get the file descriptor
|
||||||
file_descriptor = avl_data->data_fd;
|
file_descriptor = avl_data->data_fd;
|
||||||
|
|
||||||
// Unmap the data before truncating the file
|
// Unmap the entire file before truncating it (WSL requirement)
|
||||||
|
|
||||||
if (munmap(avl_data->data, (avl_data->header)->data_size_max) < 0)
|
if (munmap(avl_data->data, (avl_data->header)->data_size_max) < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_AVL_ERROR);
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
@ -662,8 +690,15 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (munmap(avl_data->header, header_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping the header of an AVL before truncating");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Truncate the file
|
// Truncate the file
|
||||||
file_size = (avl_data->header)->header_size + new_data_size;
|
file_size = header_size + new_data_size;
|
||||||
if (ftruncate(file_descriptor, file_size) < 0)
|
if (ftruncate(file_descriptor, file_size) < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_AVL_ERROR);
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
@ -672,6 +707,22 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remap the data
|
// Remap the data
|
||||||
|
|
||||||
|
avl_data->header = mmap(NULL,
|
||||||
|
header_size,
|
||||||
|
PROT_READ | PROT_WRITE,
|
||||||
|
MAP_SHARED,
|
||||||
|
file_descriptor,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
|
||||||
|
if (avl_data->header == MAP_FAILED)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
obidebug(1, "\nError re-mmapping the header of an AVL after truncating");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
avl_data->data = mmap(NULL,
|
avl_data->data = mmap(NULL,
|
||||||
new_data_size,
|
new_data_size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
@ -710,6 +761,20 @@ int grow_avl(OBIDMS_avl_p avl) // TODO Lock when needed
|
|||||||
header_size = (avl->header)->header_size;
|
header_size = (avl->header)->header_size;
|
||||||
file_size = header_size + new_data_size;
|
file_size = header_size + new_data_size;
|
||||||
|
|
||||||
|
// Unmap the entire file before truncating it (WSL requirement)
|
||||||
|
if (munmap(avl->tree, old_data_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping the tree of an AVL tree file before enlarging");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (munmap(avl->header, header_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping the header of an AVL tree file before enlarging");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Enlarge the file
|
// Enlarge the file
|
||||||
if (ftruncate(avl_file_descriptor, file_size) < 0)
|
if (ftruncate(avl_file_descriptor, file_size) < 0)
|
||||||
{
|
{
|
||||||
@ -718,12 +783,20 @@ int grow_avl(OBIDMS_avl_p avl) // TODO Lock when needed
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmap and re-map the data
|
// Re-map
|
||||||
|
|
||||||
if (munmap(avl->tree, old_data_size) < 0)
|
avl->header = mmap(NULL,
|
||||||
|
header_size,
|
||||||
|
PROT_READ | PROT_WRITE,
|
||||||
|
MAP_SHARED,
|
||||||
|
avl_file_descriptor,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
|
||||||
|
if (avl->header == MAP_FAILED)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_AVL_ERROR);
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
obidebug(1, "\nError munmapping the tree of an AVL tree file before enlarging");
|
obidebug(1, "\nError re-mmapping the header of an AVL tree file after enlarging the file");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -768,6 +841,20 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
|||||||
header_size = (avl_data->header)->header_size;
|
header_size = (avl_data->header)->header_size;
|
||||||
file_size = header_size + new_data_size;
|
file_size = header_size + new_data_size;
|
||||||
|
|
||||||
|
// Unmap the entire file before truncating it (WSL requirement)
|
||||||
|
if (munmap(avl_data->data, old_data_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping the data of an AVL tree data file before enlarging");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (munmap(avl_data->header, header_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping the header of an AVL tree data file before enlarging");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Enlarge the file
|
// Enlarge the file
|
||||||
if (ftruncate(avl_data_file_descriptor, file_size) < 0)
|
if (ftruncate(avl_data_file_descriptor, file_size) < 0)
|
||||||
{
|
{
|
||||||
@ -776,12 +863,19 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmap and re-map the data
|
// Re-map
|
||||||
|
|
||||||
if (munmap(avl_data->data, old_data_size) < 0)
|
avl_data->header = mmap(NULL,
|
||||||
|
header_size,
|
||||||
|
PROT_READ | PROT_WRITE,
|
||||||
|
MAP_SHARED,
|
||||||
|
avl_data_file_descriptor,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
if (avl_data->header == MAP_FAILED)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_AVL_ERROR);
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
obidebug(1, "\nError munmapping the data of an AVL tree data file before enlarging");
|
obidebug(1, "\nError re-mmapping the header of an AVL tree data file after enlarging the file");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -792,7 +886,6 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
|||||||
avl_data_file_descriptor,
|
avl_data_file_descriptor,
|
||||||
header_size
|
header_size
|
||||||
);
|
);
|
||||||
|
|
||||||
if (avl_data->data == MAP_FAILED)
|
if (avl_data->data == MAP_FAILED)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_AVL_ERROR);
|
obi_set_errno(OBI_AVL_ERROR);
|
||||||
|
129
src/obidms.c
129
src/obidms.c
@ -316,6 +316,15 @@ static int enlarge_infos_file(OBIDMS_p dms, size_t new_size)
|
|||||||
multiple = ceil((double) new_size / (double) getpagesize());
|
multiple = ceil((double) new_size / (double) getpagesize());
|
||||||
rounded_new_size = multiple * getpagesize();
|
rounded_new_size = multiple * getpagesize();
|
||||||
|
|
||||||
|
// Unmap the entire file before truncating it (WSL requirement)
|
||||||
|
if (munmap(dms->infos, (dms->infos)->file_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBIDMS_UNKNOWN_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping a DMS information file when enlarging");
|
||||||
|
close(infos_file_descriptor);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Enlarge the file
|
// Enlarge the file
|
||||||
if (ftruncate(infos_file_descriptor, rounded_new_size) < 0)
|
if (ftruncate(infos_file_descriptor, rounded_new_size) < 0)
|
||||||
{
|
{
|
||||||
@ -325,15 +334,7 @@ static int enlarge_infos_file(OBIDMS_p dms, size_t new_size)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmap and remap the file
|
// Remap the file
|
||||||
if (munmap(dms->infos, (dms->infos)->file_size) < 0)
|
|
||||||
{
|
|
||||||
obi_set_errno(OBIDMS_UNKNOWN_ERROR);
|
|
||||||
obidebug(1, "\nError munmapping a DMS information file when enlarging");
|
|
||||||
close(infos_file_descriptor);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
dms->infos = mmap(NULL,
|
dms->infos = mmap(NULL,
|
||||||
rounded_new_size,
|
rounded_new_size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
@ -1409,6 +1410,111 @@ DIR* opendir_in_dms(OBIDMS_p dms, const char* path_name)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
char* obi_dms_formatted_infos(OBIDMS_p dms, bool detailed)
|
||||||
|
{
|
||||||
|
char* dms_infos = NULL;
|
||||||
|
char* view_infos = NULL;
|
||||||
|
char* view_name = NULL;
|
||||||
|
char* tax_name = NULL;
|
||||||
|
char* all_tax_dir_path = NULL;
|
||||||
|
int i, last_dot_pos;
|
||||||
|
struct dirent* dp;
|
||||||
|
Obiview_p view;
|
||||||
|
|
||||||
|
// DMS name
|
||||||
|
dms_infos = (char*) malloc((strlen("# DMS name: ")+strlen(dms->dms_name)+strlen("\n# Views:\n")+1) * sizeof(char));
|
||||||
|
if (dms_infos == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError allocating memory for DMS formatted infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
strcpy(dms_infos, "# DMS name: ");
|
||||||
|
strcat(dms_infos, dms->dms_name);
|
||||||
|
strcat(dms_infos, "\n# Views:\n");
|
||||||
|
|
||||||
|
// Go through views and get their infos
|
||||||
|
rewinddir(dms->view_directory);
|
||||||
|
while ((dp = readdir(dms->view_directory)) != NULL)
|
||||||
|
{
|
||||||
|
if ((dp->d_name)[0] == '.')
|
||||||
|
continue;
|
||||||
|
i=0;
|
||||||
|
while (i < strlen(dp->d_name))
|
||||||
|
{
|
||||||
|
if ((dp->d_name)[i] == '.')
|
||||||
|
last_dot_pos = i;
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
view_name = (char*) malloc((last_dot_pos+1) * sizeof(char));
|
||||||
|
if (view_name == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError allocating memory for a view name when getting formatted DMS infos: file %s", dp->d_name);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
strncpy(view_name, dp->d_name, last_dot_pos);
|
||||||
|
view_name[last_dot_pos] = '\0';
|
||||||
|
view = obi_open_view(dms, view_name);
|
||||||
|
if (view == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError opening a view to get DMS formatted infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (detailed)
|
||||||
|
view_infos = obi_view_formatted_infos(view, detailed);
|
||||||
|
else
|
||||||
|
view_infos = obi_view_formatted_infos_one_line(view);
|
||||||
|
if (view_infos == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError getting a view infos to get DMS formatted infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen(view_infos)+1) * sizeof(char));
|
||||||
|
if (dms_infos == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError reallocating memory for DMS formatted infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
strcat(dms_infos, view_infos);
|
||||||
|
if (obi_save_and_close_view(view) < 0)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError closing view while getting DMS formatted infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (detailed)
|
||||||
|
{
|
||||||
|
dms_infos = realloc(dms_infos, (strlen(dms_infos)+2) * sizeof(char));
|
||||||
|
strcat(dms_infos, "\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add taxonomies
|
||||||
|
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen("\n# Taxonomies:\n")+1) * sizeof(char));
|
||||||
|
if (dms_infos == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError reallocating memory for DMS formatted infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
strcat(dms_infos, "# Taxonomies:\n");
|
||||||
|
rewinddir(dms->tax_directory);
|
||||||
|
while ((dp = readdir(dms->tax_directory)) != NULL)
|
||||||
|
{
|
||||||
|
if ((dp->d_name)[0] == '.')
|
||||||
|
continue;
|
||||||
|
tax_name = dp->d_name;
|
||||||
|
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen(" # ")+strlen(view_infos)+1) * sizeof(char));
|
||||||
|
if (dms_infos == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError reallocating memory for DMS formatted infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
strcat(dms_infos, " # ");
|
||||||
|
strcat(dms_infos, tax_name);
|
||||||
|
}
|
||||||
|
return dms_infos;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// TODO move somewhere else maybe
|
// TODO move somewhere else maybe
|
||||||
// TODO discuss arguments
|
// TODO discuss arguments
|
||||||
obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number)
|
obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number)
|
||||||
@ -1474,8 +1580,8 @@ obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, c
|
|||||||
|
|
||||||
// Create new column
|
// Create new column
|
||||||
column_2 = obi_create_column(dms_2, column_name, header_1->returned_data_type, header_1->line_count,
|
column_2 = obi_create_column(dms_2, column_name, header_1->returned_data_type, header_1->line_count,
|
||||||
header_1->nb_elements_per_line, header_1->elements_names, true, header_1->tuples,
|
header_1->nb_elements_per_line, header_1->elements_names, true, header_1->dict_column,
|
||||||
header_1->to_eval, new_avl_name, (header_1->associated_column).column_name,
|
header_1->tuples, header_1->to_eval, new_avl_name, (header_1->associated_column).column_name,
|
||||||
(header_1->associated_column).version, header_1->comments);
|
(header_1->associated_column).version, header_1->comments);
|
||||||
|
|
||||||
if (column_2 == NULL)
|
if (column_2 == NULL)
|
||||||
@ -1712,6 +1818,7 @@ int obi_import_view(const char* dms_path_1, const char* dms_path_2, const char*
|
|||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
|
false,
|
||||||
NULL,
|
NULL,
|
||||||
NULL,
|
NULL,
|
||||||
-1,
|
-1,
|
||||||
|
19
src/obidms.h
19
src/obidms.h
@ -40,7 +40,7 @@
|
|||||||
*/
|
*/
|
||||||
#define MAX_NB_OPENED_INDEXERS (1000) /**< The maximum number of indexers open at the same time.
|
#define MAX_NB_OPENED_INDEXERS (1000) /**< The maximum number of indexers open at the same time.
|
||||||
*/
|
*/
|
||||||
#define MAX_PATH_LEN (1024) /**< Maximum length for the character string defining a
|
#define MAX_PATH_LEN (2048) /**< Maximum length for the character string defining a
|
||||||
* file or directory path.
|
* file or directory path.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -459,6 +459,23 @@ char* obi_dms_get_full_path(OBIDMS_p dms, const char* path_name);
|
|||||||
DIR* opendir_in_dms(OBIDMS_p dms, const char* path_name);
|
DIR* opendir_in_dms(OBIDMS_p dms, const char* path_name);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Returns the informations of a DMS with a human readable format (dms name, taxonomies and view infos).
|
||||||
|
*
|
||||||
|
* @warning The returned pointer has to be freed by the caller.
|
||||||
|
*
|
||||||
|
* @param column A pointer on a DMS.
|
||||||
|
* @param detailed Whether the informations should contain detailed view infos.
|
||||||
|
*
|
||||||
|
* @returns A pointer on a character array where the formatted DMS informations are stored.
|
||||||
|
* @retval NULL if an error occurred.
|
||||||
|
*
|
||||||
|
* @since September 2020
|
||||||
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
|
*/
|
||||||
|
char* obi_dms_formatted_infos(OBIDMS_p dms, bool detailed);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Imports a column, copying it from a DMS to another DMS, and returns the version of the column in the destination DMS.
|
* @brief Imports a column, copying it from a DMS to another DMS, and returns the version of the column in the destination DMS.
|
||||||
*
|
*
|
||||||
|
@ -873,7 +873,7 @@ static ecotxidx_t* read_taxonomy_idx(const char* taxa_file_name, const char* loc
|
|||||||
taxa_index->buffer_size = taxa_index->count;
|
taxa_index->buffer_size = taxa_index->count;
|
||||||
|
|
||||||
taxa_index->max_taxid = 0;
|
taxa_index->max_taxid = 0;
|
||||||
printf("Reading %d taxa...\n", count_taxa);
|
fprintf(stderr, "Reading %d taxa...\n", count_taxa);
|
||||||
for (i=0; i<count_taxa; i++)
|
for (i=0; i<count_taxa; i++)
|
||||||
{
|
{
|
||||||
readnext_ecotaxon(f_taxa, &(taxa_index->taxon[i]));
|
readnext_ecotaxon(f_taxa, &(taxa_index->taxon[i]));
|
||||||
@ -886,9 +886,9 @@ static ecotxidx_t* read_taxonomy_idx(const char* taxa_file_name, const char* loc
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (count_local_taxa > 0)
|
if (count_local_taxa > 0)
|
||||||
printf("Reading %d local taxa...\n", count_local_taxa);
|
fprintf(stderr, "Reading %d local taxa...\n", count_local_taxa);
|
||||||
else
|
else
|
||||||
printf("No local taxa\n");
|
fprintf(stderr, "No local taxa\n");
|
||||||
|
|
||||||
count_taxa = taxa_index->count;
|
count_taxa = taxa_index->count;
|
||||||
|
|
||||||
@ -1092,7 +1092,7 @@ static int write_ranks_idx(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const char* taxo
|
|||||||
free(taxonomy_path);
|
free(taxonomy_path);
|
||||||
|
|
||||||
// Create file
|
// Create file
|
||||||
file_descriptor = open(file_name, O_RDWR | O_CREAT | O_EXCL, 0777);
|
file_descriptor = open(file_name, O_RDWR | O_CREAT, 0777);
|
||||||
if (file_descriptor < 0)
|
if (file_descriptor < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_TAXONOMY_ERROR);
|
obi_set_errno(OBI_TAXONOMY_ERROR);
|
||||||
@ -1196,7 +1196,7 @@ static int write_taxonomy_idx(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const char* t
|
|||||||
free(taxonomy_path);
|
free(taxonomy_path);
|
||||||
|
|
||||||
// Create file
|
// Create file
|
||||||
file_descriptor = open(file_name, O_RDWR | O_CREAT | O_EXCL, 0777);
|
file_descriptor = open(file_name, O_RDWR | O_CREAT, 0777);
|
||||||
if (file_descriptor < 0)
|
if (file_descriptor < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_TAXONOMY_ERROR);
|
obi_set_errno(OBI_TAXONOMY_ERROR);
|
||||||
@ -1472,7 +1472,7 @@ static int write_names_idx(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const char* taxo
|
|||||||
free(taxonomy_path);
|
free(taxonomy_path);
|
||||||
|
|
||||||
// Create file
|
// Create file
|
||||||
file_descriptor = open(file_name, O_RDWR | O_CREAT | O_EXCL, 0777);
|
file_descriptor = open(file_name, O_RDWR | O_CREAT, 0777);
|
||||||
if (file_descriptor < 0)
|
if (file_descriptor < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_TAXONOMY_ERROR);
|
obi_set_errno(OBI_TAXONOMY_ERROR);
|
||||||
@ -1760,7 +1760,7 @@ static int write_merged_idx(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const char* tax
|
|||||||
free(taxonomy_path);
|
free(taxonomy_path);
|
||||||
|
|
||||||
// Create file
|
// Create file
|
||||||
file_descriptor = open(file_name, O_RDWR | O_CREAT | O_EXCL, 0777);
|
file_descriptor = open(file_name, O_RDWR | O_CREAT, 0777);
|
||||||
if (file_descriptor < 0)
|
if (file_descriptor < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBI_TAXONOMY_ERROR);
|
obi_set_errno(OBI_TAXONOMY_ERROR);
|
||||||
@ -2463,6 +2463,32 @@ int read_merged_dmp(const char* taxdump, OBIDMS_taxonomy_p tax, int32_t* delnode
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write the rest of the taxa from the current taxa list
|
||||||
|
while (nT < (tax->taxa)->count)
|
||||||
|
{
|
||||||
|
// Add element from taxa list
|
||||||
|
// Enlarge structure if needed
|
||||||
|
if (n == buffer_size)
|
||||||
|
{
|
||||||
|
buffer_size = buffer_size * 2;
|
||||||
|
tax->merged_idx = (ecomergedidx_t*) realloc(tax->merged_idx, sizeof(ecomergedidx_t) + sizeof(ecomerged_t) * buffer_size);
|
||||||
|
if (tax->merged_idx == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError reallocating memory for a taxonomy structure");
|
||||||
|
closedir(tax_dir);
|
||||||
|
fclose(file);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(tax->merged_idx)->merged[n].taxid = (tax->taxa)->taxon[nT].taxid;
|
||||||
|
(tax->merged_idx)->merged[n].idx = nT;
|
||||||
|
|
||||||
|
nT++;
|
||||||
|
n++;
|
||||||
|
}
|
||||||
|
|
||||||
// Store count
|
// Store count
|
||||||
(tax->merged_idx)->count = n;
|
(tax->merged_idx)->count = n;
|
||||||
|
|
||||||
@ -3224,47 +3250,48 @@ OBIDMS_taxonomy_p obi_read_taxonomy(OBIDMS_p dms, const char* taxonomy_name, boo
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int obi_write_taxonomy(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const char* tax_name)
|
int obi_write_taxonomy(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const char* tax_name, bool update)
|
||||||
{
|
{
|
||||||
char* taxonomy_path;
|
char* taxonomy_path;
|
||||||
|
|
||||||
// Build the taxonomy directory path
|
if (!update) {
|
||||||
taxonomy_path = get_taxonomy_path(dms, tax_name);
|
// Build the taxonomy directory path
|
||||||
if (taxonomy_path == NULL)
|
taxonomy_path = get_taxonomy_path(dms, tax_name);
|
||||||
return -1;
|
if (taxonomy_path == NULL)
|
||||||
|
return -1;
|
||||||
// Try to create the directory
|
// Try to create the directory
|
||||||
if (mkdir(taxonomy_path, 00777) < 0)
|
if (mkdir(taxonomy_path, 00777) < 0)
|
||||||
{
|
{
|
||||||
if (errno == EEXIST)
|
if (errno == EEXIST)
|
||||||
obidebug(1, "\nA taxonomy already exists with this name.");
|
obidebug(1, "\nA taxonomy already exists with this name.");
|
||||||
obidebug(1, "\nProblem creating a new taxonomy directory");
|
obidebug(1, "\nProblem creating a new taxonomy directory");
|
||||||
|
free(taxonomy_path);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
free(taxonomy_path);
|
free(taxonomy_path);
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
free(taxonomy_path);
|
if (write_ranks_idx(dms, tax, tax_name) < 0)
|
||||||
|
return -1;
|
||||||
if (write_ranks_idx(dms, tax, tax_name) < 0)
|
if (write_taxonomy_idx(dms, tax, tax_name) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (write_taxonomy_idx(dms, tax, tax_name) < 0)
|
if (write_names_idx(dms, tax, tax_name) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (write_names_idx(dms, tax, tax_name) < 0)
|
if (write_merged_idx(dms, tax, tax_name) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (write_merged_idx(dms, tax, tax_name) < 0)
|
// Write preferred names if there are some
|
||||||
return -1;
|
if (tax->preferred_names != NULL)
|
||||||
// Check if there are local taxa (if so last taxon is local)
|
{
|
||||||
|
if (write_preferred_names_idx(dms, tax, tax_name) < 0)
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
// Write local taxa if there are some
|
||||||
if ((tax->taxa)->local_count > 0)
|
if ((tax->taxa)->local_count > 0)
|
||||||
{
|
{
|
||||||
if (write_local_taxonomy_idx(dms, tax, tax_name) < 0)
|
if (write_local_taxonomy_idx(dms, tax, tax_name) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
// Write preferred names if there are some
|
|
||||||
if (tax->preferred_names != NULL)
|
|
||||||
{
|
|
||||||
if (write_preferred_names_idx(dms, tax, tax_name) < 0)
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3276,16 +3303,17 @@ int obi_close_taxonomy(OBIDMS_taxonomy_p taxonomy)
|
|||||||
if (taxonomy)
|
if (taxonomy)
|
||||||
{
|
{
|
||||||
// Update local informations (local taxa and preferred names) if there are any
|
// Update local informations (local taxa and preferred names) if there are any
|
||||||
if ((taxonomy->taxa)->local_count > 0)
|
// Done with write_taxo, edits all needed files. Only ldx file was edited in OBI1 but it led to issues. Discussable
|
||||||
{
|
// if ((taxonomy->taxa)->local_count > 0)
|
||||||
if (taxonomy->dms == NULL)
|
// {
|
||||||
{
|
// if (taxonomy->dms == NULL)
|
||||||
obi_set_errno(OBI_TAXONOMY_ERROR);
|
// {
|
||||||
obidebug(1, "\nError closing a taxonomy with local files but no DMS associated (probably read directly from taxdump)"); // TODO discuss
|
// obi_set_errno(OBI_TAXONOMY_ERROR);
|
||||||
}
|
// obidebug(1, "\nError closing a taxonomy with local files but no DMS associated (probably read directly from taxdump)"); // TODO discuss
|
||||||
if (write_local_taxonomy_idx(taxonomy->dms, taxonomy, taxonomy->tax_name) < 0)
|
// }
|
||||||
return -1;
|
// if (write_local_taxonomy_idx(taxonomy->dms, taxonomy, taxonomy->tax_name) < 0)
|
||||||
}
|
// return -1;
|
||||||
|
// }
|
||||||
|
|
||||||
// Write preferred names if there are some
|
// Write preferred names if there are some
|
||||||
if (taxonomy->preferred_names)
|
if (taxonomy->preferred_names)
|
||||||
@ -3351,9 +3379,10 @@ int obi_close_taxonomy(OBIDMS_taxonomy_p taxonomy)
|
|||||||
int obi_taxo_add_local_taxon(OBIDMS_taxonomy_p tax, const char* name, const char* rank_name, int32_t parent_taxid, int32_t min_taxid)
|
int obi_taxo_add_local_taxon(OBIDMS_taxonomy_p tax, const char* name, const char* rank_name, int32_t parent_taxid, int32_t min_taxid)
|
||||||
{
|
{
|
||||||
int32_t taxid;
|
int32_t taxid;
|
||||||
|
int32_t count;
|
||||||
ecotx_t* taxon;
|
ecotx_t* taxon;
|
||||||
int i;
|
int i;
|
||||||
// econame_t* name_struct;
|
econame_t* name_struct;
|
||||||
|
|
||||||
// Enlarge the structure memory for a new taxon
|
// Enlarge the structure memory for a new taxon
|
||||||
tax->taxa = (ecotxidx_t*) realloc(tax->taxa, sizeof(ecotxidx_t) + sizeof(ecotx_t) * (((tax->taxa)->count) + 1));
|
tax->taxa = (ecotxidx_t*) realloc(tax->taxa, sizeof(ecotxidx_t) + sizeof(ecotx_t) * (((tax->taxa)->count) + 1));
|
||||||
@ -3415,42 +3444,65 @@ int obi_taxo_add_local_taxon(OBIDMS_taxonomy_p tax, const char* name, const char
|
|||||||
((tax->taxa)->local_count)++;
|
((tax->taxa)->local_count)++;
|
||||||
(tax->taxa)->buffer_size = (tax->taxa)->count;
|
(tax->taxa)->buffer_size = (tax->taxa)->count;
|
||||||
|
|
||||||
// // Add new name in names structure // Commented because the new name was not added in the .ndx file in the OBITools1
|
// Add new name in names structure // On the OBI1, the new name was not added in the .ndx file but it could create issues
|
||||||
// // Allocate memory for new name
|
// Allocate memory for new name
|
||||||
// tax->names = (econameidx_t*) realloc(tax->names, sizeof(econameidx_t) + sizeof(econame_t) * ((tax->names)->count + 1));
|
tax->names = (econameidx_t*) realloc(tax->names, sizeof(econameidx_t) + sizeof(econame_t) * ((tax->names)->count + 1));
|
||||||
// if (tax->names == NULL)
|
if (tax->names == NULL)
|
||||||
// {
|
{
|
||||||
// obi_set_errno(OBI_MALLOC_ERROR);
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
// obidebug(1, "\nError reallocating memory for a taxonomy structure to add a new taxon");
|
obidebug(1, "\nError reallocating memory for a taxonomy structure to add a new taxon");
|
||||||
// return -1;
|
return -1;
|
||||||
// }
|
}
|
||||||
//
|
|
||||||
// // Add new name
|
// Add new name
|
||||||
// name_struct = (tax->names)->names + ((tax->names)->count);
|
name_struct = (tax->names)->names + ((tax->names)->count);
|
||||||
// name_struct->name = (char*) malloc((strlen(name) + 1) * sizeof(char));
|
name_struct->name = (char*) malloc((strlen(name) + 1) * sizeof(char));
|
||||||
// if (name_struct->name == NULL)
|
if (name_struct->name == NULL)
|
||||||
// {
|
{
|
||||||
// obi_set_errno(OBI_MALLOC_ERROR);
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
// obidebug(1, "\nError allocating memory for a taxon name to add a new taxon");
|
obidebug(1, "\nError allocating memory for a taxon name to add a new taxon");
|
||||||
// return -1;
|
return -1;
|
||||||
// }
|
}
|
||||||
// strcpy(name_struct->name, name);
|
strcpy(name_struct->name, name);
|
||||||
// name_struct->class_name = (char*) malloc((strlen("scientific name") + 1) * sizeof(char));
|
name_struct->class_name = (char*) malloc((strlen("scientific name") + 1) * sizeof(char));
|
||||||
// if (name_struct->class_name == NULL)
|
if (name_struct->class_name == NULL)
|
||||||
// {
|
{
|
||||||
// obi_set_errno(OBI_MALLOC_ERROR);
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
// obidebug(1, "\nError allocating memory for a taxon class name to add a new taxon");
|
obidebug(1, "\nError allocating memory for a taxon class name to add a new taxon");
|
||||||
// return -1;
|
return -1;
|
||||||
// }
|
}
|
||||||
// strcpy(name_struct->class_name, "scientific name");
|
strcpy(name_struct->class_name, "scientific name");
|
||||||
// name_struct->is_scientific_name = true;
|
name_struct->is_scientific_name = true;
|
||||||
// name_struct->taxon = ((tax->taxa)->taxon) + ((tax->taxa)->count) - 1;
|
name_struct->taxon = ((tax->taxa)->taxon) + ((tax->taxa)->count) - 1;
|
||||||
//
|
|
||||||
// // Sort names in alphabetical order
|
// Update name count
|
||||||
// qsort((tax->names)->names, (tax->names)->count, sizeof(econame_t), cmp_names);
|
((tax->names)->count)++;
|
||||||
//
|
|
||||||
// // Update name count
|
// Sort names in alphabetical order
|
||||||
// ((tax->names)->count)++;
|
qsort((tax->names)->names, (tax->names)->count, sizeof(econame_t), cmp_names);
|
||||||
|
|
||||||
|
// Add to merged index
|
||||||
|
tax->merged_idx = (ecomergedidx_t*) realloc(tax->merged_idx, sizeof(ecomergedidx_t) + sizeof(ecomerged_t) * ((tax->merged_idx)->count + 1));
|
||||||
|
if (tax->merged_idx == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError reallocating memory for a taxonomy structure");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
count = (tax->merged_idx)->count;
|
||||||
|
(tax->merged_idx)->count = count + 1;
|
||||||
|
(tax->merged_idx)->merged[count].taxid = taxid;
|
||||||
|
(tax->merged_idx)->merged[count].idx = taxon->idx;
|
||||||
|
|
||||||
|
//fprintf(stderr, "\nEntered in merged taxon.idx=%d", (tax->merged_idx)->merged[(tax->merged_idx)->count -1].idx);
|
||||||
|
//fprintf(stderr, "\nEntered in merged taxon.taxid=%d", (tax->merged_idx)->merged[(tax->merged_idx)->count -1].taxid);
|
||||||
|
//fprintf(stderr, "\nEntered in merged at %d", (tax->merged_idx)->count -1);
|
||||||
|
//taxon = obi_taxo_get_taxon_with_taxid(tax, taxid);
|
||||||
|
//fprintf(stderr, "\ntaxon=%x", taxon);
|
||||||
|
//fprintf(stderr, "\ntaxon.taxid=%d", taxon->taxid);
|
||||||
|
//fprintf(stderr, "\ntaxon.name=%s", taxon->name);
|
||||||
|
//fprintf(stderr, "\ntaxon.idx=%d\n\n", ((tax->merged_idx)->count));
|
||||||
|
|
||||||
return taxid;
|
return taxid;
|
||||||
}
|
}
|
||||||
@ -3521,11 +3573,12 @@ int obi_taxo_add_preferred_name_with_taxon(OBIDMS_taxonomy_p tax, ecotx_t* taxon
|
|||||||
name_struct->is_scientific_name = false;
|
name_struct->is_scientific_name = false;
|
||||||
name_struct->taxon = taxon;
|
name_struct->taxon = taxon;
|
||||||
|
|
||||||
|
// Update preferred name count
|
||||||
|
((tax->preferred_names)->count)++;
|
||||||
|
|
||||||
// Sort preferred names in alphabetical order
|
// Sort preferred names in alphabetical order
|
||||||
qsort((tax->preferred_names)->names, (tax->preferred_names)->count, sizeof(econame_t), cmp_names);
|
qsort((tax->preferred_names)->names, (tax->preferred_names)->count, sizeof(econame_t), cmp_names);
|
||||||
|
|
||||||
// Update preferred name count
|
|
||||||
((tax->preferred_names)->count)++;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3643,12 +3696,26 @@ ecotx_t* obi_taxo_get_taxon_with_taxid(OBIDMS_taxonomy_p taxonomy, int32_t taxid
|
|||||||
else if (indexed_taxon->idx == -1)
|
else if (indexed_taxon->idx == -1)
|
||||||
current_taxon = NULL; // TODO discuss what to do when old deleted taxon
|
current_taxon = NULL; // TODO discuss what to do when old deleted taxon
|
||||||
else
|
else
|
||||||
|
{
|
||||||
current_taxon = (taxonomy->taxa->taxon)+(indexed_taxon->idx);
|
current_taxon = (taxonomy->taxa->taxon)+(indexed_taxon->idx);
|
||||||
|
//fprintf(stderr, "\n>>>idx %d, taxid %d<<<\n", indexed_taxon->idx, indexed_taxon->taxid);
|
||||||
|
}
|
||||||
return current_taxon;
|
return current_taxon;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
|
||||||
|
{
|
||||||
|
return (((taxonomy->names)->names)[idx]).name;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
|
||||||
|
{
|
||||||
|
return (((taxonomy->names)->names)[idx]).taxon;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid) // TODO discuss that this doesn't work with deprecated taxids
|
int obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid) // TODO discuss that this doesn't work with deprecated taxids
|
||||||
{
|
{
|
||||||
ecotx_t* next_parent;
|
ecotx_t* next_parent;
|
||||||
|
@ -75,7 +75,7 @@ typedef struct {
|
|||||||
*/
|
*/
|
||||||
int32_t max_taxid; /**< Maximum taxid existing in the taxon index.
|
int32_t max_taxid; /**< Maximum taxid existing in the taxon index.
|
||||||
*/
|
*/
|
||||||
int32_t buffer_size; /**< Number of taxa. // TODO kept this but not sure of its use
|
int32_t buffer_size; /**< . // TODO kept this but not sure of its use
|
||||||
*/
|
*/
|
||||||
ecotx_t taxon[]; /**< Taxon array.
|
ecotx_t taxon[]; /**< Taxon array.
|
||||||
*/
|
*/
|
||||||
@ -239,6 +239,7 @@ OBIDMS_taxonomy_p obi_read_taxonomy(OBIDMS_p dms, const char* taxonomy_name, boo
|
|||||||
* @param dms A pointer on the DMS to which the taxonomy belongs.
|
* @param dms A pointer on the DMS to which the taxonomy belongs.
|
||||||
* @param tax A pointer on the taxonomy structure.
|
* @param tax A pointer on the taxonomy structure.
|
||||||
* @param tax_name The name (prefix) of the taxonomy.
|
* @param tax_name The name (prefix) of the taxonomy.
|
||||||
|
* @param update Whether files should be rewritten or if it's a new taxonomy (set to true e.g. after adding local taxa).
|
||||||
*
|
*
|
||||||
* @returns An integer value indicating the success of the operation.
|
* @returns An integer value indicating the success of the operation.
|
||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
@ -247,7 +248,7 @@ OBIDMS_taxonomy_p obi_read_taxonomy(OBIDMS_p dms, const char* taxonomy_name, boo
|
|||||||
* @since 2016
|
* @since 2016
|
||||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
*/
|
*/
|
||||||
int obi_write_taxonomy(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const char* tax_name);
|
int obi_write_taxonomy(OBIDMS_p dms, OBIDMS_taxonomy_p tax, const char* tax_name, bool update);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -447,8 +448,51 @@ ecotx_t* obi_taxo_get_superkingdom(ecotx_t* taxon, OBIDMS_taxonomy_p taxonomy);
|
|||||||
const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks);
|
const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks);
|
||||||
|
|
||||||
|
|
||||||
// TODO
|
/**
|
||||||
|
* @brief Function checking whether a taxid is included in a subset of the taxonomy.
|
||||||
|
*
|
||||||
|
* @param taxonomy A pointer on the taxonomy structure.
|
||||||
|
* @param restrict_to_taxids An array of taxids. The researched taxid must be under at least one of those array taxids.
|
||||||
|
* @param count Number of taxids in restrict_to_taxids.
|
||||||
|
* @param taxid The taxid to check.
|
||||||
|
*
|
||||||
|
* @returns A value indicating whether the taxid is included in the chosen subset of the taxonomy.
|
||||||
|
* @retval 0 if the taxid is not included in the subset of the taxonomy.
|
||||||
|
* @retval 1 if the taxid is included in the subset of the taxonomy.
|
||||||
|
*
|
||||||
|
* @since October 2020
|
||||||
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
|
*/
|
||||||
int obi_taxo_is_taxid_included(OBIDMS_taxonomy_p taxonomy,
|
int obi_taxo_is_taxid_included(OBIDMS_taxonomy_p taxonomy,
|
||||||
int32_t* restrict_to_taxids,
|
int32_t* restrict_to_taxids,
|
||||||
int32_t count,
|
int32_t count,
|
||||||
int32_t taxid);
|
int32_t taxid);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Function returning the name of a taxon from its index in the taxonomy name index (econameidx_t).
|
||||||
|
*
|
||||||
|
* @param taxonomy A pointer on the taxonomy structure.
|
||||||
|
* @param idx The index at which the name is in the taxonomy name index (econameidx_t).
|
||||||
|
*
|
||||||
|
* @returns The taxon name.
|
||||||
|
*
|
||||||
|
* @since October 2020
|
||||||
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
|
*/
|
||||||
|
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Function returning a taxon structure from its index in the taxonomy name index (econameidx_t).
|
||||||
|
*
|
||||||
|
* @param taxonomy A pointer on the taxonomy structure.
|
||||||
|
* @param idx The index at which the taxon is in the taxonomy name index (econameidx_t).
|
||||||
|
*
|
||||||
|
* @returns The taxon structure.
|
||||||
|
*
|
||||||
|
* @since October 2020
|
||||||
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
|
*/
|
||||||
|
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx);
|
||||||
|
|
||||||
|
@ -1024,6 +1024,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
|||||||
index_t nb_elements_per_line,
|
index_t nb_elements_per_line,
|
||||||
char* elements_names,
|
char* elements_names,
|
||||||
bool elt_names_formatted,
|
bool elt_names_formatted,
|
||||||
|
bool dict_column,
|
||||||
bool tuples,
|
bool tuples,
|
||||||
bool to_eval,
|
bool to_eval,
|
||||||
const char* indexer_name,
|
const char* indexer_name,
|
||||||
@ -1282,6 +1283,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
|||||||
header->nb_elements_per_line = nb_elements_per_line;
|
header->nb_elements_per_line = nb_elements_per_line;
|
||||||
header->stored_data_type = stored_data_type;
|
header->stored_data_type = stored_data_type;
|
||||||
header->returned_data_type = returned_data_type;
|
header->returned_data_type = returned_data_type;
|
||||||
|
header->dict_column = dict_column;
|
||||||
header->tuples = tuples;
|
header->tuples = tuples;
|
||||||
header->to_eval = to_eval;
|
header->to_eval = to_eval;
|
||||||
header->creation_date = time(NULL);
|
header->creation_date = time(NULL);
|
||||||
@ -1611,6 +1613,7 @@ OBIDMS_column_p obi_clone_column(OBIDMS_p dms,
|
|||||||
nb_elements_per_line,
|
nb_elements_per_line,
|
||||||
(column_to_clone->header)->elements_names,
|
(column_to_clone->header)->elements_names,
|
||||||
true,
|
true,
|
||||||
|
(column_to_clone->header)->dict_column,
|
||||||
(column_to_clone->header)->tuples,
|
(column_to_clone->header)->tuples,
|
||||||
(column_to_clone->header)->to_eval,
|
(column_to_clone->header)->to_eval,
|
||||||
(column_to_clone->header)->indexer_name,
|
(column_to_clone->header)->indexer_name,
|
||||||
@ -1766,6 +1769,7 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
|||||||
{
|
{
|
||||||
size_t file_size;
|
size_t file_size;
|
||||||
size_t data_size;
|
size_t data_size;
|
||||||
|
size_t header_size;
|
||||||
index_t new_line_count;
|
index_t new_line_count;
|
||||||
double multiple;
|
double multiple;
|
||||||
int column_file_descriptor;
|
int column_file_descriptor;
|
||||||
@ -1788,6 +1792,8 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
|||||||
|
|
||||||
data_size = obi_array_sizeof((column->header)->stored_data_type, new_line_count, (column->header)->nb_elements_per_line);
|
data_size = obi_array_sizeof((column->header)->stored_data_type, new_line_count, (column->header)->nb_elements_per_line);
|
||||||
|
|
||||||
|
header_size = (column->header)->header_size;
|
||||||
|
|
||||||
// Check that it is actually greater than the current data size, otherwise no need to truncate
|
// Check that it is actually greater than the current data size, otherwise no need to truncate
|
||||||
if ((column->header)->data_size == data_size)
|
if ((column->header)->data_size == data_size)
|
||||||
return 0;
|
return 0;
|
||||||
@ -1852,7 +1858,7 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmap the data before truncating the file
|
// Unmap the entire file before truncating it (WSL requirement)
|
||||||
if (munmap(column->data, (column->header)->data_size) < 0)
|
if (munmap(column->data, (column->header)->data_size) < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||||
@ -1860,9 +1866,16 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
|||||||
close(column_file_descriptor);
|
close(column_file_descriptor);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (munmap(column->header, header_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping the header of a column before truncating");
|
||||||
|
close(column_file_descriptor);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Truncate the column file
|
// Truncate the column file
|
||||||
file_size = (column->header)->header_size + data_size;
|
file_size = header_size + data_size;
|
||||||
if (ftruncate(column_file_descriptor, file_size) < 0)
|
if (ftruncate(column_file_descriptor, file_size) < 0)
|
||||||
{
|
{
|
||||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||||
@ -1871,13 +1884,30 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remap the data
|
// Remap the header and the data
|
||||||
|
|
||||||
|
column->header = mmap(NULL,
|
||||||
|
header_size,
|
||||||
|
PROT_READ | PROT_WRITE,
|
||||||
|
MAP_SHARED,
|
||||||
|
column_file_descriptor,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
|
||||||
|
if (column->header == MAP_FAILED)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||||
|
obidebug(1, "\nError re-mmapping the header of a column after truncating");
|
||||||
|
close(column_file_descriptor);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
column->data = mmap(NULL,
|
column->data = mmap(NULL,
|
||||||
data_size,
|
data_size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
MAP_SHARED,
|
MAP_SHARED,
|
||||||
column_file_descriptor,
|
column_file_descriptor,
|
||||||
(column->header)->header_size
|
header_size
|
||||||
);
|
);
|
||||||
|
|
||||||
if (column->data == MAP_FAILED)
|
if (column->data == MAP_FAILED)
|
||||||
@ -2435,17 +2465,77 @@ char* obi_column_formatted_infos(OBIDMS_column_p column, bool detailed)
|
|||||||
{
|
{
|
||||||
char* column_infos = NULL;
|
char* column_infos = NULL;
|
||||||
char* elt_names = NULL;
|
char* elt_names = NULL;
|
||||||
char* column_name = NULL;
|
char* data_type_str = NULL;
|
||||||
// should be in view.c because alias exists in the context of view
|
char* comments = NULL;
|
||||||
column_infos = malloc(2048 * sizeof(char)); // TODO
|
|
||||||
|
|
||||||
|
// Get element names informations
|
||||||
elt_names = obi_get_formatted_elements_names(column);
|
elt_names = obi_get_formatted_elements_names(column);
|
||||||
|
if (elt_names == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError getting formatted elements names for formatted columns infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get data type informations
|
||||||
|
data_type_str = name_data_type((column->header)->returned_data_type);
|
||||||
|
if (data_type_str == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError getting formatted data type for formatted columns infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
// "column_name, data type: OBI_TYPE, element names: [formatted element names](, all comments)"
|
// Get commments if detailed informations required
|
||||||
|
if (detailed)
|
||||||
|
comments = (column->header)->comments;
|
||||||
|
|
||||||
|
// Build the string of formatted infos, allocating memory as needed
|
||||||
|
|
||||||
|
// Data type
|
||||||
|
column_infos = (char*) malloc((strlen("data type: ")+strlen(data_type_str)+1) * sizeof(char));
|
||||||
|
if (column_infos == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError allocating memory for formatted column infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
strcpy(column_infos, "data type: ");
|
||||||
|
strcat(column_infos, data_type_str);
|
||||||
|
|
||||||
|
// Element names if more than 1
|
||||||
|
if ((column->header)->nb_elements_per_line > 1)
|
||||||
|
{
|
||||||
|
column_infos = realloc(column_infos, (strlen(column_infos)+strlen(", elements: ")+strlen(elt_names)+1) * sizeof(char));
|
||||||
|
if (column_infos == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError allocating memory for formatted column infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
strcat(column_infos, ", elements: ");
|
||||||
|
strcat(column_infos, elt_names);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (detailed && (strlen(comments)>2)) // Add all comments if required and not empty
|
||||||
|
{
|
||||||
|
column_infos = realloc(column_infos, (strlen(column_infos)+strlen("\nComments:\n")+strlen(comments)+1) * sizeof(char));
|
||||||
|
if (column_infos == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError allocating memory for formatted column infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
strcat(column_infos, "\nComments:\n");
|
||||||
|
strcat(column_infos, comments);
|
||||||
|
}
|
||||||
|
|
||||||
|
// "data type: OBI_TYPE, element names: [formatted element names](, all comments)"
|
||||||
|
|
||||||
free(elt_names);
|
free(elt_names);
|
||||||
|
free(data_type_str);
|
||||||
|
|
||||||
return column_infos;
|
return column_infos;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2492,7 +2582,6 @@ int obi_column_prepare_to_set_value(OBIDMS_column_p column, index_t line_nb, ind
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int obi_column_prepare_to_get_value(OBIDMS_column_p column, index_t line_nb)
|
int obi_column_prepare_to_get_value(OBIDMS_column_p column, index_t line_nb)
|
||||||
{
|
{
|
||||||
if ((line_nb+1) > ((column->header)->line_count))
|
if ((line_nb+1) > ((column->header)->line_count))
|
||||||
|
@ -36,7 +36,7 @@
|
|||||||
*/
|
*/
|
||||||
#define COLUMN_GROWTH_FACTOR (2) /**< The growth factor when a column is enlarged.
|
#define COLUMN_GROWTH_FACTOR (2) /**< The growth factor when a column is enlarged.
|
||||||
*/
|
*/
|
||||||
#define MAXIMUM_LINE_COUNT (1000000000) /**< The maximum line count for the data of a column (1E9). //TODO
|
#define MAXIMUM_LINE_COUNT (1000000000000) /**< The maximum line count for the data of a column (1E12). //TODO
|
||||||
*/
|
*/
|
||||||
#define COMMENTS_MAX_LENGTH (4096) /**< The maximum length for comments.
|
#define COMMENTS_MAX_LENGTH (4096) /**< The maximum length for comments.
|
||||||
*/
|
*/
|
||||||
@ -77,6 +77,8 @@ typedef struct OBIDMS_column_header {
|
|||||||
OBIType_t stored_data_type; /**< Type of the data that is actually stored in the data
|
OBIType_t stored_data_type; /**< Type of the data that is actually stored in the data
|
||||||
* part of the column.
|
* part of the column.
|
||||||
*/
|
*/
|
||||||
|
bool dict_column; /**< Whether the column contains dictionary-like values.
|
||||||
|
*/
|
||||||
bool tuples; /**< A boolean indicating whether the column contains indices referring to indexed tuples.
|
bool tuples; /**< A boolean indicating whether the column contains indices referring to indexed tuples.
|
||||||
*/
|
*/
|
||||||
bool to_eval; /**< A boolean indicating whether the column contains expressions that should be evaluated
|
bool to_eval; /**< A boolean indicating whether the column contains expressions that should be evaluated
|
||||||
@ -249,6 +251,7 @@ size_t obi_calculate_header_size(index_t nb_elements_per_line, int64_t elts_name
|
|||||||
* @param elements_names The names of the elements with ';' as separator (no terminal ';'),
|
* @param elements_names The names of the elements with ';' as separator (no terminal ';'),
|
||||||
* NULL or "" if the default names are to be used ("0\01\02\0...\0n").
|
* NULL or "" if the default names are to be used ("0\01\02\0...\0n").
|
||||||
* @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()).
|
* @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()).
|
||||||
|
* @param dict_column A boolean indicating whether the column should contain dictionary-like values.
|
||||||
* @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples.
|
* @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples.
|
||||||
* @param to_eval A boolean indicating whether the column contains expressions that should be evaluated
|
* @param to_eval A boolean indicating whether the column contains expressions that should be evaluated
|
||||||
* (typically OBI_STR columns containing character strings to be evaluated by Python).
|
* (typically OBI_STR columns containing character strings to be evaluated by Python).
|
||||||
@ -271,6 +274,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
|
|||||||
index_t nb_elements_per_line,
|
index_t nb_elements_per_line,
|
||||||
char* elements_names,
|
char* elements_names,
|
||||||
bool elt_names_formatted,
|
bool elt_names_formatted,
|
||||||
|
bool dict_column,
|
||||||
bool tuples,
|
bool tuples,
|
||||||
bool to_eval,
|
bool to_eval,
|
||||||
const char* indexer_name,
|
const char* indexer_name,
|
||||||
@ -505,12 +509,37 @@ index_t obi_column_get_element_index_from_name(OBIDMS_column_p column, const cha
|
|||||||
char* obi_get_elements_names(OBIDMS_column_p column);
|
char* obi_get_elements_names(OBIDMS_column_p column);
|
||||||
|
|
||||||
|
|
||||||
// TODO
|
/**
|
||||||
//char* obi_get_formatted_elements_names(OBIDMS_column_p column);
|
* @brief Recovers the elements names of the lines of a column with a human readable format ("0; 1; 2; ...; n\0").
|
||||||
|
*
|
||||||
|
* @warning The returned pointer has to be freed by the caller.
|
||||||
|
*
|
||||||
|
* @param column A pointer on an OBIDMS column.
|
||||||
|
*
|
||||||
|
* @returns A pointer on a character array where the elements names are stored.
|
||||||
|
* @retval NULL if an error occurred.
|
||||||
|
*
|
||||||
|
* @since September 2020
|
||||||
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
|
*/
|
||||||
|
char* obi_get_formatted_elements_names(OBIDMS_column_p column);
|
||||||
|
|
||||||
|
|
||||||
// TODO
|
/**
|
||||||
//char* obi_column_formatted_infos(OBIDMS_column_p column);
|
* @brief Returns the informations of a column with a human readable format (data type, element names, comments).
|
||||||
|
*
|
||||||
|
* @warning The returned pointer has to be freed by the caller.
|
||||||
|
*
|
||||||
|
* @param column A pointer on an OBIDMS column.
|
||||||
|
* @param detailed Whether the informations should contain column comments or just data type and element names.
|
||||||
|
*
|
||||||
|
* @returns A pointer on a character array where the formatted column informations are stored.
|
||||||
|
* @retval NULL if an error occurred.
|
||||||
|
*
|
||||||
|
* @since September 2020
|
||||||
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
|
*/
|
||||||
|
char* obi_column_formatted_infos(OBIDMS_column_p column, bool detailed);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
bool volatile keep_running;
|
extern bool volatile keep_running;
|
||||||
void sig_handler(int signum);
|
void sig_handler(int signum);
|
||||||
|
|
||||||
|
|
||||||
|
@ -29,6 +29,8 @@
|
|||||||
#define OBIQual_int_NA (NULL) /**< NA value for the type OBI_QUAL if the quality is in integer format */
|
#define OBIQual_int_NA (NULL) /**< NA value for the type OBI_QUAL if the quality is in integer format */
|
||||||
#define OBITuple_NA (NULL) /**< NA value for tuples of any type */
|
#define OBITuple_NA (NULL) /**< NA value for tuples of any type */
|
||||||
|
|
||||||
|
#define OBI_INT_MAX (INT32_MAX) /**< Maximum value for the type OBI_INT */
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief enum for the boolean OBIType.
|
* @brief enum for the boolean OBIType.
|
||||||
|
179
src/obiview.c
179
src/obiview.c
@ -17,6 +17,7 @@
|
|||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
|
#include <time.h>
|
||||||
//#include <ctype.h>
|
//#include <ctype.h>
|
||||||
|
|
||||||
#include "obiview.h"
|
#include "obiview.h"
|
||||||
@ -637,6 +638,15 @@ static int enlarge_view_file(Obiview_p view, size_t new_size)
|
|||||||
multiple = ceil((double) new_size / (double) getpagesize());
|
multiple = ceil((double) new_size / (double) getpagesize());
|
||||||
rounded_new_size = multiple * getpagesize();
|
rounded_new_size = multiple * getpagesize();
|
||||||
|
|
||||||
|
// Unmap the entire file before truncating it (WSL requirement)
|
||||||
|
if (munmap(view->infos, (view->infos)->file_size) < 0)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBIVIEW_ERROR);
|
||||||
|
obidebug(1, "\nError munmapping a view file when enlarging");
|
||||||
|
close(obiview_file_descriptor);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Enlarge the file
|
// Enlarge the file
|
||||||
if (ftruncate(obiview_file_descriptor, rounded_new_size) < 0)
|
if (ftruncate(obiview_file_descriptor, rounded_new_size) < 0)
|
||||||
{
|
{
|
||||||
@ -646,15 +656,7 @@ static int enlarge_view_file(Obiview_p view, size_t new_size)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmap and remap the file
|
// Remap the file
|
||||||
if (munmap(view->infos, (view->infos)->file_size) < 0)
|
|
||||||
{
|
|
||||||
obi_set_errno(OBIVIEW_ERROR);
|
|
||||||
obidebug(1, "\nError munmapping a view file when enlarging");
|
|
||||||
close(obiview_file_descriptor);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
view->infos = mmap(NULL,
|
view->infos = mmap(NULL,
|
||||||
rounded_new_size,
|
rounded_new_size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
@ -1185,6 +1187,7 @@ static int close_view(Obiview_p view)
|
|||||||
obidebug(1, "\nError getting a column to close from the linked list of column pointers of a view");
|
obidebug(1, "\nError getting a column to close from the linked list of column pointers of a view");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (obi_close_column(column) < 0)
|
if (obi_close_column(column) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError closing a column while closing a view");
|
obidebug(1, "\nError closing a column while closing a view");
|
||||||
@ -1710,7 +1713,7 @@ Obiview_p obi_new_view(OBIDMS_p dms, const char* view_name, Obiview_p view_to_cl
|
|||||||
// If there is a new line selection, build it by combining it with the one from the view to clone if there is one
|
// If there is a new line selection, build it by combining it with the one from the view to clone if there is one
|
||||||
else if (line_selection != NULL)
|
else if (line_selection != NULL)
|
||||||
{
|
{
|
||||||
view->line_selection = obi_create_column(view->dms, LINES_COLUMN_NAME, OBI_IDX, 0, 1, NULL, false, false, false, NULL, NULL, -1, NULL);
|
view->line_selection = obi_create_column(view->dms, LINES_COLUMN_NAME, OBI_IDX, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, NULL);
|
||||||
if ((view->line_selection) == NULL)
|
if ((view->line_selection) == NULL)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating a column corresponding to a line selection");
|
obidebug(1, "\nError creating a column corresponding to a line selection");
|
||||||
@ -1860,6 +1863,7 @@ Obiview_p obi_new_view(OBIDMS_p dms, const char* view_name, Obiview_p view_to_cl
|
|||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
|
false,
|
||||||
NULL,
|
NULL,
|
||||||
NULL,
|
NULL,
|
||||||
-1,
|
-1,
|
||||||
@ -1928,19 +1932,19 @@ Obiview_p obi_new_view_nuc_seqs(OBIDMS_p dms, const char* view_name, Obiview_p v
|
|||||||
if ((view_to_clone == NULL) && create_default_columns)
|
if ((view_to_clone == NULL) && create_default_columns)
|
||||||
{
|
{
|
||||||
// Adding sequence column
|
// Adding sequence column
|
||||||
if (obi_view_add_column(view, NUC_SEQUENCE_COLUMN, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) // discuss using same indexer "NUC_SEQ_INDEXER"
|
if (obi_view_add_column(view, NUC_SEQUENCE_COLUMN, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0) // discuss using same indexer "NUC_SEQ_INDEXER"
|
||||||
{
|
{
|
||||||
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
// Adding id column
|
// Adding id column
|
||||||
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
// Adding definition column
|
// Adding definition column
|
||||||
if (obi_view_add_column(view, DEFINITION_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(view, DEFINITION_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -1949,7 +1953,7 @@ Obiview_p obi_new_view_nuc_seqs(OBIDMS_p dms, const char* view_name, Obiview_p v
|
|||||||
if (quality_column)
|
if (quality_column)
|
||||||
{
|
{
|
||||||
associated_nuc_column = obi_view_get_column(view, NUC_SEQUENCE_COLUMN);
|
associated_nuc_column = obi_view_get_column(view, NUC_SEQUENCE_COLUMN);
|
||||||
if (obi_view_add_column(view, QUALITY_COLUMN, -1, NULL, OBI_QUAL, 0, 1, NULL, false, false, false, NULL, (associated_nuc_column->header)->name, (associated_nuc_column->header)->version, "{}", true) < 0) // TODO discuss automatic association
|
if (obi_view_add_column(view, QUALITY_COLUMN, -1, NULL, OBI_QUAL, 0, 1, NULL, false, false, false, false, NULL, (associated_nuc_column->header)->name, (associated_nuc_column->header)->version, "{}", true) < 0) // TODO discuss automatic association
|
||||||
{
|
{
|
||||||
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -2282,6 +2286,7 @@ int obi_view_add_column(Obiview_p view,
|
|||||||
index_t nb_elements_per_line,
|
index_t nb_elements_per_line,
|
||||||
char* elements_names,
|
char* elements_names,
|
||||||
bool elt_names_formatted,
|
bool elt_names_formatted,
|
||||||
|
bool dict_column,
|
||||||
bool tuples,
|
bool tuples,
|
||||||
bool to_eval,
|
bool to_eval,
|
||||||
const char* indexer_name,
|
const char* indexer_name,
|
||||||
@ -2364,7 +2369,7 @@ int obi_view_add_column(Obiview_p view,
|
|||||||
// Open or create the column
|
// Open or create the column
|
||||||
if (create)
|
if (create)
|
||||||
{ // Create column
|
{ // Create column
|
||||||
column = obi_create_column(view->dms, column_name, data_type, nb_lines, nb_elements_per_line, elements_names, elt_names_formatted, tuples, to_eval, indexer_name, associated_column_name, associated_column_version, comments);
|
column = obi_create_column(view->dms, column_name, data_type, nb_lines, nb_elements_per_line, elements_names, elt_names_formatted, dict_column, tuples, to_eval, indexer_name, associated_column_name, associated_column_version, comments);
|
||||||
if (column == NULL)
|
if (column == NULL)
|
||||||
{
|
{
|
||||||
obidebug(1, "\nError creating a column to add to a view");
|
obidebug(1, "\nError creating a column to add to a view");
|
||||||
@ -2603,6 +2608,144 @@ int obi_view_create_column_alias(Obiview_p view, const char* current_name, const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
char* obi_view_formatted_infos(Obiview_p view, bool detailed)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
char* view_infos = NULL;
|
||||||
|
char* view_name = NULL;
|
||||||
|
time_t creation_date;
|
||||||
|
char* creation_date_str = NULL;
|
||||||
|
index_t line_count;
|
||||||
|
char line_count_str[256];
|
||||||
|
OBIDMS_column_p column;
|
||||||
|
char* column_alias = NULL;
|
||||||
|
char* column_infos = NULL;
|
||||||
|
char* comments = NULL;
|
||||||
|
|
||||||
|
// View name
|
||||||
|
view_name = (view->infos)->name;
|
||||||
|
view_infos = (char*) malloc((strlen("# View name:\n")+strlen(view_name)+1) * sizeof(char));
|
||||||
|
strcpy(view_infos, "# View name:\n");
|
||||||
|
strcat(view_infos, view_name);
|
||||||
|
|
||||||
|
// Date created
|
||||||
|
if (view->read_only) // Date not saved until view is finished writing
|
||||||
|
{
|
||||||
|
creation_date = (view->infos)->creation_date;
|
||||||
|
creation_date_str = ctime(&creation_date);
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Date created:\n")+strlen(creation_date_str)+1) * sizeof(char));
|
||||||
|
strcat(view_infos, "\n# Date created:\n");
|
||||||
|
strcat(view_infos, creation_date_str);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Line count
|
||||||
|
line_count = (view->infos)->line_count;
|
||||||
|
snprintf(line_count_str, sizeof line_count_str, "%lld", line_count);
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Line count:\n")+strlen(line_count_str)+1) * sizeof(char));
|
||||||
|
strcat(view_infos, "# Line count:\n");
|
||||||
|
strcat(view_infos, line_count_str);
|
||||||
|
|
||||||
|
// Columns: go through each, print their alias then their infos
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Columns:")+1) * sizeof(char));
|
||||||
|
strcat(view_infos, "\n# Columns:");
|
||||||
|
for (i=0; i<((view->infos)->column_count); i++)
|
||||||
|
{
|
||||||
|
column = *((OBIDMS_column_p*)ll_get(view->columns, i));
|
||||||
|
if (column == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError getting a column from the linked list of column pointers of a view to format view infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Column alias
|
||||||
|
column_alias = (((view->infos)->column_references)[i]).alias;
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n")+strlen(column_alias)+strlen(", ")+1) * sizeof(char));
|
||||||
|
strcat(view_infos, "\n");
|
||||||
|
strcat(view_infos, column_alias);
|
||||||
|
strcat(view_infos, ", ");
|
||||||
|
|
||||||
|
// Column infos
|
||||||
|
column_infos = obi_column_formatted_infos(column, detailed);
|
||||||
|
if (column_infos == NULL)
|
||||||
|
{
|
||||||
|
obidebug(1, "\nError getting column infos to format view infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(column_infos)+1) * sizeof(char));
|
||||||
|
strcat(view_infos, column_infos);
|
||||||
|
free(column_infos);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get commments if detailed informations required
|
||||||
|
if (detailed)
|
||||||
|
{
|
||||||
|
comments = (view->infos)->comments;
|
||||||
|
if (strlen(comments)>2) // Add all comments if not empty
|
||||||
|
{
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Comments:\n")+strlen(comments)+1) * sizeof(char));
|
||||||
|
if (view_infos == NULL)
|
||||||
|
{
|
||||||
|
obi_set_errno(OBI_MALLOC_ERROR);
|
||||||
|
obidebug(1, "\nError allocating memory for formatted view infos");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
strcat(view_infos, "\n# Comments:\n");
|
||||||
|
strcat(view_infos, comments);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+2) * sizeof(char));
|
||||||
|
strcat(view_infos, "\n");
|
||||||
|
|
||||||
|
return view_infos;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
char* obi_view_formatted_infos_one_line(Obiview_p view)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
char* view_infos = NULL;
|
||||||
|
char* view_name = NULL;
|
||||||
|
time_t creation_date;
|
||||||
|
char* creation_date_str = NULL;
|
||||||
|
index_t line_count;
|
||||||
|
char line_count_str[256];
|
||||||
|
|
||||||
|
// View name
|
||||||
|
view_name = (view->infos)->name;
|
||||||
|
view_infos = (char*) malloc((strlen(" # ")+strlen(view_name)+2) * sizeof(char));
|
||||||
|
strcpy(view_infos, " # ");
|
||||||
|
strcat(view_infos, view_name);
|
||||||
|
strcat(view_infos, ":");
|
||||||
|
|
||||||
|
// Date created
|
||||||
|
if (view->read_only) // Date not saved until view is finished writing
|
||||||
|
{
|
||||||
|
creation_date = (view->infos)->creation_date;
|
||||||
|
creation_date_str = ctime(&creation_date);
|
||||||
|
// Delete \n added by ctime
|
||||||
|
creation_date_str[strlen(creation_date_str)-1] = '\0';
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(" Date created: ")+strlen(creation_date_str)+1) * sizeof(char));
|
||||||
|
strcat(view_infos, " Date created: ");
|
||||||
|
strcat(view_infos, creation_date_str);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Line count
|
||||||
|
line_count = (view->infos)->line_count;
|
||||||
|
snprintf(line_count_str, sizeof line_count_str, "%lld", line_count);
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(" ; Line count: ")+strlen(line_count_str)+1) * sizeof(char));
|
||||||
|
strcat(view_infos, " ; Line count: ");
|
||||||
|
strcat(view_infos, line_count_str);
|
||||||
|
|
||||||
|
view_infos = realloc(view_infos, (strlen(view_infos)+2) * sizeof(char));
|
||||||
|
strcat(view_infos, "\n");
|
||||||
|
|
||||||
|
return view_infos;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int obi_view_write_comments(Obiview_p view, const char* comments)
|
int obi_view_write_comments(Obiview_p view, const char* comments)
|
||||||
{
|
{
|
||||||
size_t new_size;
|
size_t new_size;
|
||||||
@ -2767,7 +2910,7 @@ int obi_clean_unfinished_views(OBIDMS_p dms)
|
|||||||
if ((dp->d_name)[0] == '.')
|
if ((dp->d_name)[0] == '.')
|
||||||
continue;
|
continue;
|
||||||
i=0;
|
i=0;
|
||||||
while ((dp->d_name)[i] != '.')
|
while (strncmp((dp->d_name)+i, ".obiview", 8))
|
||||||
i++;
|
i++;
|
||||||
relative_path = (char*) malloc(strlen(VIEW_DIR_NAME) + strlen(dp->d_name) + 2);
|
relative_path = (char*) malloc(strlen(VIEW_DIR_NAME) + strlen(dp->d_name) + 2);
|
||||||
strcpy(relative_path, VIEW_DIR_NAME);
|
strcpy(relative_path, VIEW_DIR_NAME);
|
||||||
@ -3090,7 +3233,7 @@ int obi_create_auto_count_column(Obiview_p view)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (obi_view_add_column(view, COUNT_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(view, COUNT_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "Error adding an automatic count column in a view");
|
obidebug(1, "Error adding an automatic count column in a view");
|
||||||
return -1;
|
return -1;
|
||||||
@ -3142,7 +3285,7 @@ int obi_create_auto_id_column(Obiview_p view, const char* prefix)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create the new ID column
|
// Create the new ID column
|
||||||
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
|
||||||
{
|
{
|
||||||
obidebug(1, "Error adding an automatic ID column in a view");
|
obidebug(1, "Error adding an automatic ID column in a view");
|
||||||
return -1;
|
return -1;
|
||||||
|
131
src/obiview.h
131
src/obiview.h
@ -30,54 +30,56 @@
|
|||||||
#include "obiblob.h"
|
#include "obiblob.h"
|
||||||
|
|
||||||
|
|
||||||
#define OBIVIEW_NAME_MAX_LENGTH (249) /**< The maximum length of an OBIDMS view name, without the extension.
|
#define OBIVIEW_NAME_MAX_LENGTH (249) /**< The maximum length of an OBIDMS view name, without the extension.
|
||||||
*/
|
*/
|
||||||
#define VIEW_TYPE_MAX_LENGTH (1024) /**< The maximum length of the type name of a view.
|
#define VIEW_TYPE_MAX_LENGTH (1024) /**< The maximum length of the type name of a view.
|
||||||
*/
|
*/
|
||||||
#define LINES_COLUMN_NAME "LINES" /**< The name of the column containing the line selections
|
#define LINES_COLUMN_NAME "LINES" /**< The name of the column containing the line selections
|
||||||
* in all views.
|
* in all views.
|
||||||
*/
|
*/
|
||||||
#define VIEW_TYPE_NUC_SEQS "NUC_SEQS_VIEW" /**< The type name of views based on nucleotide sequences
|
#define VIEW_TYPE_NUC_SEQS "NUC_SEQS_VIEW" /**< The type name of views based on nucleotide sequences
|
||||||
* and their metadata.
|
* and their metadata.
|
||||||
*/
|
*/
|
||||||
#define NUC_SEQUENCE_COLUMN "NUC_SEQ" /**< The name of the column containing the nucleotide sequences
|
#define NUC_SEQUENCE_COLUMN "NUC_SEQ" /**< The name of the column containing the nucleotide sequences
|
||||||
* in NUC_SEQS_VIEW views.
|
* in NUC_SEQS_VIEW views.
|
||||||
*/
|
*/
|
||||||
#define ID_COLUMN "ID" /**< The name of the column containing the sequence identifiers
|
#define ID_COLUMN "ID" /**< The name of the column containing the sequence identifiers
|
||||||
* in NUC_SEQS_VIEW views.
|
* in NUC_SEQS_VIEW views.
|
||||||
*/
|
*/
|
||||||
#define DEFINITION_COLUMN "DEFINITION" /**< The name of the column containing the sequence definitions
|
#define DEFINITION_COLUMN "DEFINITION" /**< The name of the column containing the sequence definitions
|
||||||
* in NUC_SEQS_VIEW views.
|
* in NUC_SEQS_VIEW views.
|
||||||
*/
|
*/
|
||||||
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
|
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
|
||||||
* in NUC_SEQS_VIEW views.
|
* in NUC_SEQS_VIEW views.
|
||||||
*/
|
*/
|
||||||
#define REVERSE_QUALITY_COLUMN "REVERSE_QUALITY" /**< The name of the column containing the sequence qualities
|
#define REVERSE_QUALITY_COLUMN "REVERSE_QUALITY" /**< The name of the column containing the sequence qualities
|
||||||
* of the reverse read (generated by ngsfilter, used by alignpairedend).
|
* of the reverse read (generated by ngsfilter, used by alignpairedend).
|
||||||
*/
|
*/
|
||||||
#define REVERSE_SEQUENCE_COLUMN "REVERSE_SEQUENCE" /**< The name of the column containing the sequence
|
#define REVERSE_SEQUENCE_COLUMN "REVERSE_SEQUENCE" /**< The name of the column containing the sequence
|
||||||
* of the reverse read (generated by ngsfilter, used by alignpairedend).
|
* of the reverse read (generated by ngsfilter, used by alignpairedend).
|
||||||
*/
|
*/
|
||||||
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
|
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
|
||||||
* in NUC_SEQS_VIEW views.
|
* in NUC_SEQS_VIEW views.
|
||||||
*/
|
*/
|
||||||
#define COUNT_COLUMN "COUNT" /**< The name of the column containing the sequence counts
|
#define COUNT_COLUMN "COUNT" /**< The name of the column containing the sequence counts
|
||||||
* in NUC_SEQS_VIEW views.
|
* in NUC_SEQS_VIEW views.
|
||||||
*/
|
*/
|
||||||
#define TAXID_COLUMN "TAXID" /**< The name of the column containing the taxids. TODO subtype of INT column?
|
#define SCIENTIFIC_NAME_COLUMN "SCIENTIFIC_NAME" /**< The name of the column containing the taxon scientific name.
|
||||||
*/
|
*/
|
||||||
#define MERGED_TAXID_COLUMN "MERGED_TAXID" /**< The name of the column containing the merged taxids information.
|
#define TAXID_COLUMN "TAXID" /**< The name of the column containing the taxids. TODO subtype of INT column?
|
||||||
*/
|
*/
|
||||||
#define MERGED_PREFIX "MERGED_" /**< The prefix to prepend to column names when merging informations during obi uniq.
|
#define MERGED_TAXID_COLUMN "MERGED_TAXID" /**< The name of the column containing the merged taxids information.
|
||||||
*/
|
*/
|
||||||
#define TAXID_DIST_COLUMN "TAXID_DIST" /**< The name of the column containing a dictionary of taxid:[list of ids] when merging informations during obi uniq.
|
#define MERGED_PREFIX "MERGED_" /**< The prefix to prepend to column names when merging informations during obi uniq.
|
||||||
*/
|
*/
|
||||||
#define MERGED_COLUMN "MERGED" /**< The name of the column containing a list of ids when merging informations during obi uniq.
|
#define TAXID_DIST_COLUMN "TAXID_DIST" /**< The name of the column containing a dictionary of taxid:[list of ids] when merging informations during obi uniq.
|
||||||
*/
|
*/
|
||||||
#define ID_PREFIX "seq" /**< The default prefix of sequence identifiers in automatic ID columns.
|
#define MERGED_COLUMN "MERGED" /**< The name of the column containing a list of ids when merging informations during obi uniq.
|
||||||
*/
|
*/
|
||||||
#define PREDICATE_KEY "predicates" /**< The key used in the json-formatted view comments to store predicates.
|
#define ID_PREFIX "seq" /**< The default prefix of sequence identifiers in automatic ID columns.
|
||||||
*/
|
*/
|
||||||
|
#define PREDICATE_KEY "predicates" /**< The key used in the json-formatted view comments to store predicates.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -398,6 +400,7 @@ Obiview_p obi_open_view(OBIDMS_p dms, const char* view_name);
|
|||||||
* @param elements_names The names of the elements with ';' as separator (no terminal ';'),
|
* @param elements_names The names of the elements with ';' as separator (no terminal ';'),
|
||||||
* if the column is created; NULL or "" if the default names are to be used ("0\01\02\0...\0n").
|
* if the column is created; NULL or "" if the default names are to be used ("0\01\02\0...\0n").
|
||||||
* @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()).
|
* @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()).
|
||||||
|
* @param dict_column Whether the column contains dictionary-like values.
|
||||||
* @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples.
|
* @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples.
|
||||||
* @param to_eval A boolean indicating whether the column contains expressions that should be evaluated
|
* @param to_eval A boolean indicating whether the column contains expressions that should be evaluated
|
||||||
* (typically OBI_STR columns containing character strings to be evaluated by Python).
|
* (typically OBI_STR columns containing character strings to be evaluated by Python).
|
||||||
@ -424,6 +427,7 @@ int obi_view_add_column(Obiview_p view,
|
|||||||
index_t nb_elements_per_line,
|
index_t nb_elements_per_line,
|
||||||
char* elements_names,
|
char* elements_names,
|
||||||
bool elt_names_formatted,
|
bool elt_names_formatted,
|
||||||
|
bool dict_column,
|
||||||
bool tuples,
|
bool tuples,
|
||||||
bool to_eval,
|
bool to_eval,
|
||||||
const char* indexer_name,
|
const char* indexer_name,
|
||||||
@ -519,6 +523,39 @@ OBIDMS_column_p* obi_view_get_pointer_on_column_in_view(Obiview_p view, const ch
|
|||||||
int obi_view_create_column_alias(Obiview_p view, const char* current_name, const char* alias);
|
int obi_view_create_column_alias(Obiview_p view, const char* current_name, const char* alias);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Returns the informations of a view with a human readable format (view name, date created, line count, column informations, comments).
|
||||||
|
*
|
||||||
|
* @warning The returned pointer has to be freed by the caller.
|
||||||
|
*
|
||||||
|
* @param column A pointer on a view.
|
||||||
|
* @param detailed Whether the informations should contain view comments.
|
||||||
|
*
|
||||||
|
* @returns A pointer on a character array where the formatted view informations are stored.
|
||||||
|
* @retval NULL if an error occurred.
|
||||||
|
*
|
||||||
|
* @since September 2020
|
||||||
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
|
*/
|
||||||
|
char* obi_view_formatted_infos(Obiview_p view, bool detailed);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Returns the informations of a view with a human readable format on one line (view name, date created, line count).
|
||||||
|
*
|
||||||
|
* @warning The returned pointer has to be freed by the caller.
|
||||||
|
*
|
||||||
|
* @param column A pointer on a view.
|
||||||
|
*
|
||||||
|
* @returns A pointer on a character array where the formatted view informations are stored.
|
||||||
|
* @retval NULL if an error occurred.
|
||||||
|
*
|
||||||
|
* @since September 2020
|
||||||
|
* @author Celine Mercier (celine.mercier@metabarcoding.org)
|
||||||
|
*/
|
||||||
|
char* obi_view_formatted_infos_one_line(Obiview_p view);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Internal function writing new comments in a view file.
|
* @brief Internal function writing new comments in a view file.
|
||||||
*
|
*
|
||||||
|
Reference in New Issue
Block a user