Compare commits

...

53 Commits

Author SHA1 Message Date
89b0c48141 switch to version 3.0.1b6 2021-03-29 11:18:44 +13:00
7c02782e3c import/export: workaround for issue where flake8(?) reads '\t' as
'\'+'t' when parsing an option value
2021-03-29 11:18:19 +13:00
ecc4c2c78b stats: improved the tabular display 2021-03-29 09:03:32 +13:00
f5413381fd C: taxonomy: fixed a bug where some taxa would not be stored in the
merged index
2021-03-29 09:02:18 +13:00
3e93cfff7b import: Columns are now rewritten in OBI_FLOAT if a value is > INT32_MAX 2021-03-29 09:00:52 +13:00
6d445fe3ad switch to version 3.0.1b5 2021-03-22 09:41:01 +13:00
824deb7e21 new command: obi rm: deletes any view (for now the user deleting a view
accepts that there will be missing information when running obi history
if other views came from the deleted view)
2021-03-18 09:17:06 +13:00
d579bb2749 switch to version 3.0.1b4 2021-03-16 17:40:58 +13:00
10e5ebdbc0 ngsfilter: fixed critical bug where barcodes shorter than the forward
primer would be missed
2021-03-16 15:09:28 +13:00
8833110490 import: fixed the import of tabular files with no header 2021-03-16 09:15:48 +13:00
bd38449f2d switch to version 3.0.1b3 2021-03-15 16:50:17 +13:00
904823c827 uniq: now OK to use -m option even if only one unique key in information
to merge (e.g. one sample)
2021-03-15 16:48:22 +13:00
af68a1024c Switch to version 3.0.1b2 2021-03-15 16:26:43 +13:00
425fe25bd2 Made the OBITools3 more 'empty file friendly' 2021-03-15 16:25:41 +13:00
d48aed38d4 switch to version 3.0.1b1 2021-03-11 17:11:23 +13:00
5e32f8523e Merge branch 'wsl_version' 2021-03-11 16:47:59 +13:00
8f1d94fd24 obi test: fixed bug introduced in ad1fd3c3 2021-03-11 16:31:31 +13:00
38f42cb0fb C: Made maximum file path length 2048 instead of 1024 2021-03-11 15:23:22 +13:00
7f0f63cf26 C: now completely unmapping files before truncating them to a smaller
size (#68)
2021-03-11 15:12:40 +13:00
cba78111c9 obi test: fixed bug introduced in previous version 2021-03-11 11:36:52 +13:00
41fbae7b6c Switch to version 3.0.0b43 2021-03-10 16:52:03 +13:00
ad1fd3c341 Now handling dictionaries with one key 2021-03-10 16:50:30 +13:00
fbf0f7dfb6 import: improved genbank parser and switch to version 3.0.0b42 2021-02-17 15:26:35 +13:00
fda0edd0d8 Switch to version 3.0.0b41 2021-02-10 17:29:08 +13:00
382e37a6ae Fixes #88 2021-02-10 17:28:49 +13:00
5cc3e29f75 obi test: made less heavy by default 2021-02-10 17:28:15 +13:00
a8e2aee281 Switch to version 3.0.0b40 2021-02-06 14:45:07 +13:00
13adb479d3 Adds an extern qualifier to the keep_running declaration. 2021-02-05 15:59:43 +01:00
8ba7acdfe1 export: fixed a bug where exporting to tab format with a header would
not export the first line of data and switch to version 3.0.0b39
2021-01-13 16:09:04 +01:00
38051b1e4f Removed spurious commentaries 2021-01-13 16:07:42 +01:00
52a2e21b38 grep: fixed --id-list option
and switch to version 3.0.0b38
2020-11-06 16:36:37 +01:00
d27a5b9115 Switch to version 3.0.0b37 2020-10-30 10:47:13 +01:00
20bd3350b4 New command: obi addtaxids to add NCBI taxids to sequences from their
taxon name.
2020-10-30 10:46:55 +01:00
2e191372d7 Now handling sequences with Uracil (U) nucleotides by converting to
Thymine (T)
2020-10-30 10:46:17 +01:00
112e12cab0 Taxonomy: new functions to find taxa by name 2020-10-30 10:45:20 +01:00
b9b4cec5b5 import: now can import SILVA fasta files 2020-10-30 10:43:04 +01:00
199f3772e8 Small fixes (potential compilation problems) 2020-10-30 10:41:58 +01:00
422a6450fa ecotag: clarified similarity circle documentation 2020-09-29 17:57:29 +02:00
137c109f86 obi ls: now done in C (preparing things for R packages to read DMS) and
switch to version 3.0.0b36
2020-09-29 17:51:39 +02:00
b6648ae81e Revert "Fixed version numbering mistake (should be b34 not b35)"
This reverts commit f6dffbecfe
2020-09-25 16:25:39 +02:00
f6dffbecfe Fixed version numbering mistake (should be b34 not b35) 2020-09-25 16:24:23 +02:00
c4696ac865 ecotag: added separate threshold for minimum circle identity (and switch
to version 3.0.0b35
2020-09-25 16:22:09 +02:00
11a0945a9b obi cat: fixed open file descriptor leak and switch to version 3.0.0b34 2020-08-28 10:41:22 +02:00
f23c40c905 obi cat: fixed a bug introduced in 3.0.0b28 and switch to version
3.0.0b33
2020-08-27 18:38:16 +02:00
f99fc13b75 switch to version 3.0.0b32 2020-08-13 18:17:09 +02:00
1da6aac1b8 C: patch for failed creation of AVL with errno EEXIST 2020-08-12 17:55:08 +02:00
159803b40a export: now automatically sorts dictionary keys alphabetically for
tab/csv output
2020-07-31 16:43:35 +02:00
7dcbc34017 import: fixed entry count estimation when importing fastq files 2020-07-30 16:56:36 +02:00
db2202c8b4 uniq: added a check to make sure that there is more than one element for
one tag when merging its information
2020-07-30 16:14:37 +02:00
d33ff97846 switch to version 3.0.0b31 2020-07-28 09:31:19 +02:00
1dcdf69f1f export: fixed a bug introduced in version 3.0.0b28 2020-07-28 09:31:05 +02:00
dec114eed6 Python: added "date created" information in view representation 2020-07-27 17:38:45 +02:00
f36691053b Python: added the OBITools3 version that generated the view in view
comments
2020-07-27 16:50:00 +02:00
59 changed files with 1525 additions and 410 deletions

View File

@ -39,6 +39,12 @@ def __addImportInputOption(optionManager):
const=b'fastq', const=b'fastq',
help="Input file is in fastq format") help="Input file is in fastq format")
group.add_argument('--silva-input',
action="store_const", dest="obi:inputformat",
default=None,
const=b'silva',
help="Input file is in SILVA fasta format")
group.add_argument('--embl-input', group.add_argument('--embl-input',
action="store_const", dest="obi:inputformat", action="store_const", dest="obi:inputformat",
default=None, default=None,

View File

@ -0,0 +1,230 @@
#cython: language_level=3
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
from obitools3.dms import DMS
from obitools3.dms.view.view cimport View, Line_selection
from obitools3.uri.decode import open_uri
from obitools3.apps.optiongroups import addMinimalInputOption, addTaxonomyOption, addMinimalOutputOption, addNoProgressBarOption
from obitools3.dms.view import RollbackException
from obitools3.dms.column.column cimport Column
from functools import reduce
from obitools3.apps.config import logger
from obitools3.utils cimport tobytes, str2bytes, tostr
from io import BufferedWriter
from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
ID_COLUMN, \
DEFINITION_COLUMN, \
QUALITY_COLUMN, \
COUNT_COLUMN, \
TAXID_COLUMN
from obitools3.dms.capi.obitypes cimport OBI_INT
from obitools3.dms.capi.obitaxonomy cimport MIN_LOCAL_TAXID
import time
import math
import sys
from cpython.exc cimport PyErr_CheckSignals
__title__="Annotate sequences with their corresponding NCBI taxid found from the taxon scientific name."
def addOptions(parser):
addMinimalInputOption(parser)
addTaxonomyOption(parser)
addMinimalOutputOption(parser)
addNoProgressBarOption(parser)
group=parser.add_argument_group('obi addtaxids specific options')
group.add_argument('-t', '--taxid-tag',
action="store",
dest="addtaxids:taxid_tag",
metavar="<TAXID_TAG>",
default=b"TAXID",
help="Name of the tag to store the found taxid "
"(default: 'TAXID'.")
group.add_argument('-n', '--taxon-name-tag',
action="store",
dest="addtaxids:taxon_name_tag",
metavar="<SCIENTIFIC_NAME_TAG>",
default=b"SCIENTIFIC_NAME",
help="Name of the tag giving the scientific name of the taxon "
"(default: 'SCIENTIFIC_NAME'.")
group.add_argument('-g', '--try-genus-match',
action="store_true", dest="addtaxids:try_genus_match",
default=False,
help="Try matching the first word of <SCIENTIFIC_NAME_TAG> when can't find corresponding taxid for a taxon. "
"If there is a match it is added in the 'parent_taxid' tag. (Can be used by 'obi taxonomy' to add the taxon under that taxid).")
group.add_argument('-a', '--restricting-ancestor',
action="store",
dest="addtaxids:restricting_ancestor",
metavar="<RESTRICTING_ANCESTOR>",
default=None,
help="Enables to restrict the search of taxids under an ancestor specified by its taxid.")
group.add_argument('-l', '--log-file',
action="store",
dest="addtaxids:log_file",
metavar="<LOG_FILE>",
default='',
help="Path to a log file to write informations about not found taxids.")
def run(config):
DMS.obi_atexit()
logger("info", "obi addtaxids")
# Open the input
input = open_uri(config['obi']['inputURI'])
if input is None:
raise Exception("Could not read input view")
i_dms = input[0]
i_view = input[1]
i_view_name = input[1].name
# Open the output: only the DMS, as the output view is going to be created by cloning the input view
# (could eventually be done via an open_uri() argument)
output = open_uri(config['obi']['outputURI'],
input=False,
dms_only=True)
if output is None:
raise Exception("Could not create output view")
o_dms = output[0]
output_0 = output[0]
o_view_name = output[1]
# stdout output: create temporary view
if type(output_0)==BufferedWriter:
o_dms = i_dms
i=0
o_view_name = b"temp"
while o_view_name in i_dms: # Making sure view name is unique in output DMS
o_view_name = o_view_name+b"_"+str2bytes(str(i))
i+=1
imported_view_name = o_view_name
# If the input and output DMS are not the same, import the input view in the output DMS before cloning it to modify it
# (could be the other way around: clone and modify in the input DMS then import the new view in the output DMS)
if i_dms != o_dms:
imported_view_name = i_view_name
i=0
while imported_view_name in o_dms: # Making sure view name is unique in output DMS
imported_view_name = i_view_name+b"_"+str2bytes(str(i))
i+=1
View.import_view(i_dms.full_path[:-7], o_dms.full_path[:-7], i_view_name, imported_view_name)
i_view = o_dms[imported_view_name]
# Clone output view from input view
o_view = i_view.clone(o_view_name)
if o_view is None:
raise Exception("Couldn't create output view")
i_view.close()
# Open taxonomy
taxo_uri = open_uri(config['obi']['taxoURI'])
if taxo_uri is None or taxo_uri[2] == bytes:
raise Exception("Couldn't open taxonomy")
taxo = taxo_uri[1]
# Initialize the progress bar
if config['obi']['noprogressbar'] == False:
pb = ProgressBar(len(o_view), config)
else:
pb = None
try:
if config['addtaxids']['log_file']:
logfile = open(config['addtaxids']['log_file'], 'w')
else:
logfile = None
if config['addtaxids']['try_genus_match']:
try_genus = True
else:
try_genus = False
if 'restricting_ancestor' in config['addtaxids']:
res_anc = int(config['addtaxids']['restricting_ancestor'])
else:
res_anc = None
taxid_column_name = config['addtaxids']['taxid_tag']
parent_taxid_column_name = "PARENT_TAXID" # TODO macro
taxon_name_column_name = config['addtaxids']['taxon_name_tag']
taxid_column = Column.new_column(o_view, taxid_column_name, OBI_INT)
parent_taxid_column = Column.new_column(o_view, parent_taxid_column_name, OBI_INT)
taxon_name_column = o_view[taxon_name_column_name]
found_count = 0
not_found_count = 0
parent_found_count = 0
for i in range(len(o_view)):
PyErr_CheckSignals()
if pb is not None:
pb(i)
taxon_name = taxon_name_column[i]
taxon = taxo.get_taxon_by_name(taxon_name, res_anc)
if taxon is not None:
taxid_column[i] = taxon.taxid
found_count+=1
elif try_genus: # try finding genus or other parent taxon from the first word
taxon_name_sp = taxon_name.split(b" ")
taxon = taxo.get_taxon_by_name(taxon_name_sp[0], res_anc)
if taxon is not None:
parent_taxid_column[i] = taxon.taxid
parent_found_count+=1
if logfile:
print("Found parent taxon for", tostr(taxon_name), file=logfile)
else:
not_found_count+=1
if logfile:
print("No taxid found for", tostr(taxon_name), file=logfile)
else:
not_found_count+=1
if logfile:
print("No taxid found for", tostr(taxon_name), file=logfile)
except Exception, e:
raise RollbackException("obi addtaxids error, rollbacking view: "+str(e), o_view)
if pb is not None:
pb(i, force=True)
print("", file=sys.stderr)
logger("info", "\nTaxids found: "+str(found_count)+"/"+str(len(o_view))+" ("+str(round(found_count*100.0/len(o_view), 2))+"%)")
if config['addtaxids']['try_genus_match']:
logger("info", "\nParent taxids found: "+str(parent_found_count)+"/"+str(len(o_view))+" ("+str(round(parent_found_count*100.0/len(o_view), 2))+"%)")
logger("info", "\nTaxids not found: "+str(not_found_count)+"/"+str(len(o_view))+" ("+str(round(not_found_count*100.0/len(o_view), 2))+"%)")
# Save command config in View and DMS comments
command_line = " ".join(sys.argv[1:])
input_dms_name=[input[0].name]
input_view_name=[i_view_name]
if 'taxoURI' in config['obi'] and config['obi']['taxoURI'] is not None:
input_dms_name.append(config['obi']['taxoURI'].split("/")[-3])
input_view_name.append("taxonomy/"+config['obi']['taxoURI'].split("/")[-1])
o_view.write_config(config, "addtaxids", command_line, input_dms_name=input_dms_name, input_view_name=input_view_name)
o_dms.record_command_line(command_line)
#print("\n\nOutput view:\n````````````", file=sys.stderr)
#print(repr(o_view), file=sys.stderr)
# stdout output: write to buffer
if type(output_0)==BufferedWriter:
logger("info", "Printing to output...")
o_view.print_to_output(output_0, noprogressbar=config['obi']['noprogressbar'])
o_view.close()
# If the input and the output DMS are different or if stdout output, delete the temporary imported view used to create the final view
if i_dms != o_dms or type(output_0)==BufferedWriter:
View.delete_view(o_dms, imported_view_name)
o_dms.close(force=True)
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -205,19 +205,25 @@ def run(config):
if type(entries) == list: if type(entries) == list:
forward = entries[0] forward = entries[0]
reverse = entries[1] reverse = entries[1]
aligner = Kmer_similarity(forward, \ if len(forward) == 0 or len(reverse) == 0:
view2=reverse, \ aligner = None
kmer_size=config['alignpairedend']['kmersize'], \ else:
reversed_column=None) aligner = Kmer_similarity(forward, \
view2=reverse, \
kmer_size=config['alignpairedend']['kmersize'], \
reversed_column=None)
else: else:
aligner = Kmer_similarity(entries, \ if len(entries) == 0:
column2=entries[REVERSE_SEQUENCE_COLUMN], \ aligner = None
qual_column2=entries[REVERSE_QUALITY_COLUMN], \ else:
kmer_size=config['alignpairedend']['kmersize'], \ aligner = Kmer_similarity(entries, \
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool column2=entries[REVERSE_SEQUENCE_COLUMN], \
qual_column2=entries[REVERSE_QUALITY_COLUMN], \
kmer_size=config['alignpairedend']['kmersize'], \
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool
ba = alignmentIterator(entries, aligner) ba = alignmentIterator(entries, aligner)
i = 0 i = 0
for ali in ba: for ali in ba:
@ -251,7 +257,7 @@ def run(config):
pb(i, force=True) pb(i, force=True)
print("", file=sys.stderr) print("", file=sys.stderr)
if kmer_ali : if kmer_ali and aligner is not None:
aligner.free() aligner.free()
# Save command config in View and DMS comments # Save command config in View and DMS comments

View File

@ -4,7 +4,7 @@ from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
from obitools3.dms import DMS from obitools3.dms import DMS
from obitools3.dms.view.view cimport View from obitools3.dms.view.view cimport View
from obitools3.uri.decode import open_uri from obitools3.uri.decode import open_uri
from obitools3.apps.optiongroups import addMinimalOutputOption from obitools3.apps.optiongroups import addMinimalOutputOption, addNoProgressBarOption
from obitools3.dms.view import RollbackException from obitools3.dms.view import RollbackException
from obitools3.apps.config import logger from obitools3.apps.config import logger
from obitools3.utils cimport str2bytes from obitools3.utils cimport str2bytes
@ -28,6 +28,7 @@ __title__="Concatenate views."
def addOptions(parser): def addOptions(parser):
addMinimalOutputOption(parser) addMinimalOutputOption(parser)
addNoProgressBarOption(parser)
group=parser.add_argument_group('obi cat specific options') group=parser.add_argument_group('obi cat specific options')
@ -47,9 +48,9 @@ def run(config):
logger("info", "obi cat") logger("info", "obi cat")
# Open the views to concatenate # Check the views to concatenate
iview_list = []
idms_list = [] idms_list = []
iview_list = []
total_len = 0 total_len = 0
remove_qual = False remove_qual = False
remove_rev_qual = False remove_rev_qual = False
@ -67,8 +68,9 @@ def run(config):
if REVERSE_QUALITY_COLUMN not in i_view: # same as above for reverse quality if REVERSE_QUALITY_COLUMN not in i_view: # same as above for reverse quality
remove_rev_qual = True remove_rev_qual = True
total_len += len(i_view) total_len += len(i_view)
iview_list.append(i_view)
idms_list.append(i_dms) idms_list.append(i_dms)
iview_list.append(i_view.name)
i_view.close()
# Open the output: only the DMS # Open the output: only the DMS
output = open_uri(config['obi']['outputURI'], output = open_uri(config['obi']['outputURI'],
@ -97,8 +99,10 @@ def run(config):
# Initialize multiple elements columns # Initialize multiple elements columns
if type(output_0)==BufferedWriter: if type(output_0)==BufferedWriter:
dict_cols = {} dict_cols = {}
for v in iview_list: for v_uri in config["cat"]["views_to_cat"]:
v = open_uri(v_uri)[1]
for coln in v.keys(): for coln in v.keys():
col = v[coln]
if v[coln].nb_elements_per_line > 1: if v[coln].nb_elements_per_line > 1:
if coln not in dict_cols: if coln not in dict_cols:
dict_cols[coln] = {} dict_cols[coln] = {}
@ -108,9 +112,10 @@ def run(config):
else: else:
dict_cols[coln]['eltnames'] = set(v[coln].elements_names + list(dict_cols[coln]['eltnames'])) dict_cols[coln]['eltnames'] = set(v[coln].elements_names + list(dict_cols[coln]['eltnames']))
dict_cols[coln]['nbelts'] = len(dict_cols[coln]['eltnames']) dict_cols[coln]['nbelts'] = len(dict_cols[coln]['eltnames'])
v.close()
for coln in dict_cols: for coln in dict_cols:
Column.new_column(o_view, coln, dict_cols[coln]['obitype'], Column.new_column(o_view, coln, dict_cols[coln]['obitype'],
nb_elements_per_line=dict_cols[coln]['nbelts'], elements_names=list(dict_cols[coln]['eltnames'])) nb_elements_per_line=dict_cols[coln]['nbelts'], elements_names=list(dict_cols[coln]['eltnames']), dict_column=True)
# Initialize the progress bar # Initialize the progress bar
if not config['obi']['noprogressbar']: if not config['obi']['noprogressbar']:
@ -119,7 +124,8 @@ def run(config):
pb = None pb = None
i = 0 i = 0
for v in iview_list: for v_uri in config["cat"]["views_to_cat"]:
v = open_uri(v_uri)[1]
for entry in v: for entry in v:
PyErr_CheckSignals() PyErr_CheckSignals()
if pb is not None: if pb is not None:
@ -130,6 +136,7 @@ def run(config):
else: else:
o_view[i] = entry o_view[i] = entry
i+=1 i+=1
v.close()
# Deletes quality columns if needed # Deletes quality columns if needed
if type(output_0)!=BufferedWriter: if type(output_0)!=BufferedWriter:
@ -144,7 +151,7 @@ def run(config):
# Save command config in DMS comments # Save command config in DMS comments
command_line = " ".join(sys.argv[1:]) command_line = " ".join(sys.argv[1:])
o_view.write_config(config, "cat", command_line, input_dms_name=[d.name for d in idms_list], input_view_name=[v.name for v in iview_list]) o_view.write_config(config, "cat", command_line, input_dms_name=[d.name for d in idms_list], input_view_name=[vname for vname in iview_list])
o_dms.record_command_line(command_line) o_dms.record_command_line(command_line)
#print("\n\nOutput view:\n````````````", file=sys.stderr) #print("\n\nOutput view:\n````````````", file=sys.stderr)

View File

@ -41,6 +41,17 @@ def addOptions(parser):
help="Minimum identity to consider for assignment, as a normalized identity, e.g. 0.95 for an identity of 95%%. " help="Minimum identity to consider for assignment, as a normalized identity, e.g. 0.95 for an identity of 95%%. "
"Default: 0.00 (no threshold).") "Default: 0.00 (no threshold).")
group.add_argument('--minimum-circle','-c',
action="store", dest="ecotag:bubble_threshold",
metavar='<CIRCLE_THRESHOLD>',
default=0.99,
type=float,
help="Minimum identity considered for the assignment circle "
"(sequence is assigned to the LCA of all sequences within a similarity circle of the best matches; "
"the threshold for this circle is the highest value between <CIRCLE_THRESHOLD> and the best assignment score found for the query sequence). "
"Give value as a normalized identity, e.g. 0.95 for an identity of 95%%. "
"Default: 0.99.")
def run(config): def run(config):
DMS.obi_atexit() DMS.obi_atexit()
@ -66,9 +77,8 @@ def run(config):
ref_view_name = ref[1] ref_view_name = ref[1]
# Check that the threshold demanded is greater than or equal to the threshold used to build the reference database # Check that the threshold demanded is greater than or equal to the threshold used to build the reference database
if config['ecotag']['threshold'] < eval(ref_dms[ref_view_name].comments["ref_db_threshold"]) : if config['ecotag']['bubble_threshold'] < eval(ref_dms[ref_view_name].comments["ref_db_threshold"]) :
print("Error: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).", raise Exception(f"Error: The threshold demanded ({config['ecotag']['bubble_threshold']}) is lower than the threshold used to build the reference database ({float(ref_dms[ref_view_name].comments['ref_db_threshold'])}).")
config['ecotag']['threshold'], ref_dms[ref_view_name].comments["ref_db_threshold"])
# Open the output: only the DMS # Open the output: only the DMS
output = open_uri(config['obi']['outputURI'], output = open_uri(config['obi']['outputURI'],
@ -113,8 +123,9 @@ def run(config):
if obi_ecotag(i_dms.name_with_full_path, tobytes(i_view_name), \ if obi_ecotag(i_dms.name_with_full_path, tobytes(i_view_name), \
ref_dms.name_with_full_path, tobytes(ref_view_name), \ ref_dms.name_with_full_path, tobytes(ref_view_name), \
taxo_dms.name_with_full_path, tobytes(taxonomy_name), \ taxo_dms.name_with_full_path, tobytes(taxonomy_name), \
tobytes(o_view_name), comments, tobytes(o_view_name), comments, \
config['ecotag']['threshold']) < 0: config['ecotag']['threshold'], \
config['ecotag']['bubble_threshold']) < 0:
raise Exception("Error running ecotag") raise Exception("Error running ecotag")
# If the input and output DMS are not the same, export result view to output DMS # If the input and output DMS are not the same, export result view to output DMS

View File

@ -89,7 +89,7 @@ def run(config):
if pb is not None: if pb is not None:
pb(i, force=True) pb(i, force=True)
print("", file=sys.stderr) print("", file=sys.stderr)
# TODO save command in input dms? # TODO save command in input dms?

View File

@ -184,7 +184,7 @@ def Filter_generator(options, tax_filter, i_view):
invert_selection = options["invert_selection"] invert_selection = options["invert_selection"]
id_set = None id_set = None
if "id_list" in options: if "id_list" in options:
id_set = set(x.strip() for x in open(options["id_list"])) id_set = set(x.strip() for x in open(options["id_list"], 'rb'))
# Initialize the regular expression patterns # Initialize the regular expression patterns
seq_pattern = None seq_pattern = None

View File

@ -26,13 +26,15 @@ from obitools3.dms.capi.obiview cimport VIEW_TYPE_NUC_SEQS, \
QUALITY_COLUMN, \ QUALITY_COLUMN, \
COUNT_COLUMN, \ COUNT_COLUMN, \
TAXID_COLUMN, \ TAXID_COLUMN, \
MERGED_PREFIX MERGED_PREFIX, \
SCIENTIFIC_NAME_COLUMN
from obitools3.dms.capi.obidms cimport obi_import_view from obitools3.dms.capi.obidms cimport obi_import_view
from obitools3.dms.capi.obitypes cimport obitype_t, \ from obitools3.dms.capi.obitypes cimport obitype_t, \
OBI_VOID, \ OBI_VOID, \
OBI_QUAL OBI_QUAL, \
OBI_STR
from obitools3.dms.capi.obierrno cimport obi_errno from obitools3.dms.capi.obierrno cimport obi_errno
@ -94,6 +96,7 @@ def run(config):
cdef obitype_t new_type cdef obitype_t new_type
cdef bint get_quality cdef bint get_quality
cdef bint NUC_SEQS_view cdef bint NUC_SEQS_view
cdef bint silva
cdef int nb_elts cdef int nb_elts
cdef object d cdef object d
cdef View view cdef View view
@ -104,6 +107,8 @@ def run(config):
cdef Column seq_col cdef Column seq_col
cdef Column qual_col cdef Column qual_col
cdef Column old_column cdef Column old_column
cdef Column sci_name_col
cdef bytes sci_name
cdef bint rewrite cdef bint rewrite
cdef dict dcols cdef dict dcols
cdef int skipping cdef int skipping
@ -203,9 +208,16 @@ def run(config):
id_col = view[ID_COLUMN] id_col = view[ID_COLUMN]
def_col = view[DEFINITION_COLUMN] def_col = view[DEFINITION_COLUMN]
seq_col = view[NUC_SEQUENCE_COLUMN] seq_col = view[NUC_SEQUENCE_COLUMN]
# Prepare taxon scientific name if SILVA file
if 'inputformat' in config['obi'] and config['obi']['inputformat'] == b"silva":
silva = True
sci_name_col = Column.new_column(view, SCIENTIFIC_NAME_COLUMN, OBI_STR)
else:
silva = False
dcols = {} dcols = {}
# First read through the entries to prepare columns with dictionaries as they are very time-expensive to rewrite # First read through the entries to prepare columns with dictionaries as they are very time-expensive to rewrite
if config['import']['preread']: if config['import']['preread']:
logger("info", "First readthrough...") logger("info", "First readthrough...")
@ -245,7 +257,8 @@ def run(config):
for tag in dict_dict: for tag in dict_dict:
dcols[tag] = (Column.new_column(view, tag, dict_dict[tag][1], \ dcols[tag] = (Column.new_column(view, tag, dict_dict[tag][1], \
nb_elements_per_line=len(dict_dict[tag][0]), \ nb_elements_per_line=len(dict_dict[tag][0]), \
elements_names=list(dict_dict[tag][0])), \ elements_names=list(dict_dict[tag][0]), \
dict_column=True), \
dict_dict[tag][1]) dict_dict[tag][1])
@ -282,7 +295,7 @@ def run(config):
try: try:
if NUC_SEQS_view: if NUC_SEQS_view:
id_col[i] = entry.id id_col[i] = entry.id
def_col[i] = entry.definition def_col[i] = entry.definition
seq_col[i] = entry.seq seq_col[i] = entry.seq
# Check if there is a sequencing quality associated by checking the first entry # TODO haven't found a more robust solution yet # Check if there is a sequencing quality associated by checking the first entry # TODO haven't found a more robust solution yet
@ -293,6 +306,11 @@ def run(config):
qual_col = view[QUALITY_COLUMN] qual_col = view[QUALITY_COLUMN]
if get_quality: if get_quality:
qual_col[i] = entry.quality qual_col[i] = entry.quality
# Parse taxon scientific name if SILVA file
if silva:
sci_name = entry.definition.split(b";")[-1]
sci_name_col[i] = sci_name
for tag in entry : for tag in entry :
@ -311,10 +329,13 @@ def run(config):
value_type = type(value) value_type = type(value)
nb_elts = 1 nb_elts = 1
value_obitype = OBI_VOID value_obitype = OBI_VOID
dict_col = False
if value_type == dict or value_type == list : if value_type == dict or value_type == list :
nb_elts = len(value) nb_elts = len(value)
elt_names = list(value) elt_names = list(value)
if value_type == dict :
dict_col = True
else : else :
nb_elts = 1 nb_elts = 1
elt_names = None elt_names = None
@ -322,11 +343,9 @@ def run(config):
value_obitype = get_obitype(value) value_obitype = get_obitype(value)
if value_obitype != OBI_VOID : if value_obitype != OBI_VOID :
dcols[tag] = (Column.new_column(view, tag, value_obitype, nb_elements_per_line=nb_elts, elements_names=elt_names), value_obitype) dcols[tag] = (Column.new_column(view, tag, value_obitype, nb_elements_per_line=nb_elts, elements_names=elt_names, dict_column=dict_col), value_obitype)
# Fill value # Fill value
if value_type == dict and nb_elts == 1: # special case that makes the OBI3 create a 1 elt/line column which won't read a dict value
value = value[list(value.keys())[0]] # The solution is to transform the value in a simple atomic one acceptable by the column
dcols[tag][0][i] = value dcols[tag][0][i] = value
# TODO else log error? # TODO else log error?
@ -352,8 +371,8 @@ def run(config):
# Fill value # Fill value
dcols[tag][0][i] = value dcols[tag][0][i] = value
except IndexError : except (IndexError, OverflowError):
value_type = type(value) value_type = type(value)
old_column = dcols[tag][0] old_column = dcols[tag][0]
old_nb_elements_per_line = old_column.nb_elements_per_line old_nb_elements_per_line = old_column.nb_elements_per_line
@ -400,7 +419,7 @@ def run(config):
dcols[tag][0][i] = value dcols[tag][0][i] = value
except Exception as e: except Exception as e:
print("\nCould not import sequence id:", entry.id, "(error raised:", e, ")") print("\nCould not import sequence:", entry, "(error raised:", e, ")")
if 'skiperror' in config['obi'] and not config['obi']['skiperror']: if 'skiperror' in config['obi'] and not config['obi']['skiperror']:
raise e raise e
else: else:

View File

@ -31,27 +31,11 @@ def run(config):
input = open_uri(config['obi']['inputURI']) input = open_uri(config['obi']['inputURI'])
if input is None: if input is None:
raise Exception("Could not read input") raise Exception("Could not read input")
if input[2] == DMS and not config['ls']['longformat']:
dms = input[0] # Print representation
l = [] if config['ls']['longformat']:
for viewname in input[0]: print(input[1].repr_longformat())
view = dms[viewname]
l.append(tostr(viewname) + "\t(Date created: " + str(bytes2str_object(view.comments["Date created"]))+")")
view.close()
l.sort()
for v in l:
print(v)
else: else:
print(repr(input[1])) print(repr(input[1]))
if input[2] == DMS:
taxolist = ["\n### Taxonomies:"]
for t in Taxonomy.list_taxos(input[0]):
taxolist.append("\t"+tostr(t))
if len(taxolist) > 1:
for t in taxolist:
print(t)
if config['ls']['longformat'] and len(input[1].comments) > 0:
print("\n### Comments:")
print(str(input[1].comments))
input[0].close(force=True) input[0].close(force=True)

View File

@ -24,10 +24,6 @@ from cpython.exc cimport PyErr_CheckSignals
from io import BufferedWriter from io import BufferedWriter
#REVERSE_SEQ_COLUMN_NAME = b"REVERSE_SEQUENCE" # used by alignpairedend tool
#REVERSE_QUALITY_COLUMN_NAME = b"REVERSE_QUALITY" # used by alignpairedend tool
__title__="Assigns sequence records to the corresponding experiment/sample based on DNA tags and primers" __title__="Assigns sequence records to the corresponding experiment/sample based on DNA tags and primers"
@ -326,7 +322,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
sequences[0] = sequences[0][directmatch[1][2]:] sequences[0] = sequences[0][directmatch[1][2]:]
else: else:
sequences[1] = sequences[1][directmatch[1][2]:] sequences[1] = sequences[1][directmatch[1][2]:]
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
if directmatch[0].forward: if directmatch[0].forward:
@ -373,7 +369,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
sequences[0] = sequences[0][:r[1]] sequences[0] = sequences[0][:r[1]]
else: else:
sequences[1] = sequences[1][:r[1]] sequences[1] = sequences[1][:r[1]]
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
# do the same on the other seq # do the same on the other seq
if first_match_first_seq: if first_match_first_seq:
@ -398,7 +394,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
seq_to_match = sequences[0] seq_to_match = sequences[0]
reversematch = [] reversematch = []
# Compute begin # Compute begin
begin=directmatch[1][2]+1 # end of match + 1 on the same sequence #begin=directmatch[1][2]+1 # end of match + 1 on the same sequence -- No, already cut out forward primer
# Try reverse matching on the other sequence: # Try reverse matching on the other sequence:
new_seq = True new_seq = True
pattern = 0 pattern = 0
@ -412,7 +408,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
primer=p primer=p
# Saving original primer as 4th member of the tuple to serve as correct key in infos dict even if it might have been reversed complemented # Saving original primer as 4th member of the tuple to serve as correct key in infos dict even if it might have been reversed complemented
# (3rd member already used by directmatch) # (3rd member already used by directmatch)
reversematch.append((primer, primer(seq_to_match, same_sequence=not new_seq, pattern=pattern, begin=begin), None, p)) reversematch.append((primer, primer(seq_to_match, same_sequence=not new_seq, pattern=pattern, begin=0), None, p))
new_seq = False new_seq = False
pattern+=1 pattern+=1
# Choose match closer to the end of the sequence # Choose match closer to the end of the sequence
@ -649,6 +645,7 @@ def run(config):
g = 0 g = 0
u = 0 u = 0
i = 0
no_tags = config['ngsfilter']['notags'] no_tags = config['ngsfilter']['notags']
try: try:
for i in range(entries_len): for i in range(entries_len):

View File

@ -0,0 +1,44 @@
#cython: language_level=3
from obitools3.uri.decode import open_uri
from obitools3.apps.config import logger
from obitools3.dms import DMS
from obitools3.apps.optiongroups import addMinimalInputOption
from obitools3.dms.view.view cimport View
import os
__title__="Delete a view"
def addOptions(parser):
addMinimalInputOption(parser)
def run(config):
DMS.obi_atexit()
logger("info", "obi rm")
# Open the input
input = open_uri(config['obi']['inputURI'])
if input is None:
raise Exception("Could not read input")
# Check that it's a view
if isinstance(input[1], View) :
view = input[1]
else:
raise NotImplementedError()
# Get the path to the view file to remove
path = input[0].full_path # dms path
path+=b"/VIEWS/"
path+=view.name
path+=b".obiview"
# Close the view and the DMS
view.close()
input[0].close(force=True)
# Rm
os.remove(path)

View File

@ -238,14 +238,14 @@ def run(config):
else: else:
sdvar= "%s" sdvar= "%s"
hcat = "\t".join([pcat % x for x in config['stats']['categories']]) + "\t" +\ hcat = "\t".join([pcat % x for x in config['stats']['categories']]) + \
"\t".join([minvar % x for x in config['stats']['minimum']]) + "\t" +\ "\t".join([minvar % x for x in config['stats']['minimum']]) + \
"\t".join([maxvar % x for x in config['stats']['maximum']]) + "\t" +\ "\t".join([maxvar % x for x in config['stats']['maximum']]) + \
"\t".join([meanvar % x for x in config['stats']['mean']]) + "\t" +\ "\t".join([meanvar % x for x in config['stats']['mean']]) + \
"\t".join([varvar % x for x in config['stats']['var']]) + "\t" +\ "\t".join([varvar % x for x in config['stats']['var']]) + \
"\t".join([sdvar % x for x in config['stats']['sd']]) + \ "\t".join([sdvar % x for x in config['stats']['sd']]) + \
"\t count" + \ "count\t" + \
"\t total" "total"
print(hcat) print(hcat)
sorted_stats = sorted(catcount.items(), key = lambda kv:(totcount[kv[0]]), reverse=True) sorted_stats = sorted(catcount.items(), key = lambda kv:(totcount[kv[0]]), reverse=True)
for i in range(len(sorted_stats)): for i in range(len(sorted_stats)):

View File

@ -23,6 +23,7 @@ from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, \
import shutil import shutil
import string import string
import random import random
import sys
from cpython.exc cimport PyErr_CheckSignals from cpython.exc cimport PyErr_CheckSignals
@ -300,8 +301,11 @@ def fill_column(config, infos, col) :
def create_random_column(config, infos) : def create_random_column(config, infos) :
alias = random.choice([b'', random_unique_name(infos)]) alias = random.choice([b'', random_unique_name(infos)])
tuples = random.choice([True, False]) tuples = random.choice([True, False])
dict_column = False
if not tuples : if not tuples :
nb_elements_per_line=random.randint(1, config['test']['maxelts']) nb_elements_per_line=random.randint(1, config['test']['maxelts'])
if nb_elements_per_line > 1:
dict_column = True
elements_names = [] elements_names = []
for i in range(nb_elements_per_line) : for i in range(nb_elements_per_line) :
elements_names.append(random_unique_element_name(config, infos)) elements_names.append(random_unique_element_name(config, infos))
@ -317,6 +321,7 @@ def create_random_column(config, infos) :
data_type, data_type,
nb_elements_per_line=nb_elements_per_line, nb_elements_per_line=nb_elements_per_line,
elements_names=elements_names, elements_names=elements_names,
dict_column=dict_column,
tuples=tuples, tuples=tuples,
comments=random_comments(config), comments=random_comments(config),
alias=alias alias=alias
@ -366,7 +371,7 @@ def random_new_view(config, infos, first=False):
infos['view'] = View_NUC_SEQS.new(infos['dms'], random_unique_name(infos), comments=random_comments(config)) # TODO quality column infos['view'] = View_NUC_SEQS.new(infos['dms'], random_unique_name(infos), comments=random_comments(config)) # TODO quality column
else : else :
infos['view'] = View.new(infos['dms'], random_unique_name(infos), comments=random_comments(config)) # TODO quality column infos['view'] = View.new(infos['dms'], random_unique_name(infos), comments=random_comments(config)) # TODO quality column
infos['view'].write_config(config, "test", infos["command_line"], input_dms_name=[infos['dms'].name], input_view_name=["random"])
print_test(config, repr(infos['view'])) print_test(config, repr(infos['view']))
if v_to_clone is not None : if v_to_clone is not None :
if line_selection is None: if line_selection is None:
@ -441,7 +446,7 @@ def addOptions(parser):
default=20, default=20,
type=int, type=int,
help="Maximum length of tuples. " help="Maximum length of tuples. "
"Default: 50") "Default: 20")
group.add_argument('--max_ini_col_count','-o', group.add_argument('--max_ini_col_count','-o',
action="store", dest="test:maxinicolcount", action="store", dest="test:maxinicolcount",
@ -454,7 +459,7 @@ def addOptions(parser):
group.add_argument('--max_line_nb','-l', group.add_argument('--max_line_nb','-l',
action="store", dest="test:maxlinenb", action="store", dest="test:maxlinenb",
metavar='<MAX_LINE_NB>', metavar='<MAX_LINE_NB>',
default=10000, default=1000,
type=int, type=int,
help="Maximum number of lines in a column. " help="Maximum number of lines in a column. "
"Default: 1000") "Default: 1000")
@ -497,7 +502,8 @@ def run(config):
(b"OBI_SEQ", False): random_seq, (b"OBI_SEQ", True): random_seq_tuples, (b"OBI_SEQ", False): random_seq, (b"OBI_SEQ", True): random_seq_tuples,
(b"OBI_STR", False): random_bytes, (b"OBI_STR", True): random_bytes_tuples (b"OBI_STR", False): random_bytes, (b"OBI_STR", True): random_bytes_tuples
}, },
'tests': [test_set_and_get, test_add_col, test_delete_col, test_col_alias, test_new_view] 'tests': [test_set_and_get, test_add_col, test_delete_col, test_col_alias, test_new_view],
'command_line': " ".join(sys.argv[1:])
} }
# TODO ??? # TODO ???

View File

@ -354,6 +354,9 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, di
key = mergedKeys[k] key = mergedKeys[k]
merged_col_name = mergedKeys_m[k] merged_col_name = mergedKeys_m[k]
# if merged_infos[merged_col_name]['nb_elts'] == 1:
# raise Exception("Can't merge information from a tag with only one element (e.g. one sample ; don't use -m option)")
if merged_col_name in view: if merged_col_name in view:
i_col = view[merged_col_name] i_col = view[merged_col_name]
else: else:
@ -375,6 +378,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, di
OBI_INT, OBI_INT,
nb_elements_per_line=merged_infos[merged_col_name]['nb_elts'], nb_elements_per_line=merged_infos[merged_col_name]['nb_elts'],
elements_names=list(merged_infos[merged_col_name]['elt_names']), elements_names=list(merged_infos[merged_col_name]['elt_names']),
dict_column=True,
comments=i_col.comments, comments=i_col.comments,
alias=merged_col_name alias=merged_col_name
) )
@ -397,6 +401,7 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, di
OBI_INT, OBI_INT,
nb_elements_per_line=len(view), nb_elements_per_line=len(view),
elements_names=[id for id in i_id_col], elements_names=[id for id in i_id_col],
dict_column=True,
alias=TAXID_DIST_COLUMN alias=TAXID_DIST_COLUMN
) )

View File

@ -34,6 +34,7 @@ cdef extern from "obidms.h" nogil:
int obi_close_dms(OBIDMS_p dms, bint force) int obi_close_dms(OBIDMS_p dms, bint force)
char* obi_dms_get_dms_path(OBIDMS_p dms) char* obi_dms_get_dms_path(OBIDMS_p dms)
char* obi_dms_get_full_path(OBIDMS_p dms, const_char_p path_name) char* obi_dms_get_full_path(OBIDMS_p dms, const_char_p path_name)
char* obi_dms_formatted_infos(OBIDMS_p dms, bint detailed)
void obi_close_atexit() void obi_close_atexit()
obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number) obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number)

View File

@ -31,6 +31,7 @@ cdef extern from "obidmscolumn.h" nogil:
const_char_p elements_names const_char_p elements_names
OBIType_t returned_data_type OBIType_t returned_data_type
OBIType_t stored_data_type OBIType_t stored_data_type
bint dict_column
bint tuples bint tuples
bint to_eval bint to_eval
time_t creation_date time_t creation_date
@ -63,10 +64,11 @@ cdef extern from "obidmscolumn.h" nogil:
char* obi_get_elements_names(OBIDMS_column_p column) char* obi_get_elements_names(OBIDMS_column_p column)
char* obi_column_formatted_infos(OBIDMS_column_p column)
index_t obi_column_get_element_index_from_name(OBIDMS_column_p column, const char* element_name) index_t obi_column_get_element_index_from_name(OBIDMS_column_p column, const char* element_name)
int obi_column_write_comments(OBIDMS_column_p column, const char* comments) int obi_column_write_comments(OBIDMS_column_p column, const char* comments)
int obi_column_add_comment(OBIDMS_column_p column, const char* key, const char* value) int obi_column_add_comment(OBIDMS_column_p column, const char* key, const char* value)
char* obi_column_formatted_infos(OBIDMS_column_p column, bint detailed)

View File

@ -11,4 +11,5 @@ cdef extern from "obi_ecotag.h" nogil:
const char* taxonomy_name, const char* taxonomy_name,
const char* output_view_name, const char* output_view_name,
const char* output_view_comments, const char* output_view_comments,
double ecotag_threshold) double ecotag_threshold,
double bubble_threshold)

View File

@ -7,6 +7,8 @@ from libc.stdint cimport int32_t
cdef extern from "obidms_taxonomy.h" nogil: cdef extern from "obidms_taxonomy.h" nogil:
extern int MIN_LOCAL_TAXID
struct ecotxnode : struct ecotxnode :
int32_t taxid int32_t taxid
int32_t rank int32_t rank
@ -18,6 +20,13 @@ cdef extern from "obidms_taxonomy.h" nogil:
ctypedef ecotxnode ecotx_t ctypedef ecotxnode ecotx_t
struct econame_t : # can't get this struct to be accepted by Cython ('unknown size')
char* name
char* class_name
int32_t is_scientific_name
ecotxnode* taxon
struct ecotxidx_t : struct ecotxidx_t :
int32_t count int32_t count
int32_t max_taxid int32_t max_taxid
@ -30,9 +39,14 @@ cdef extern from "obidms_taxonomy.h" nogil:
char** label char** label
struct econameidx_t :
int32_t count
econame_t* names
struct OBIDMS_taxonomy_t : struct OBIDMS_taxonomy_t :
ecorankidx_t* ranks ecorankidx_t* ranks
# econameidx_t* names econameidx_t* names
ecotxidx_t* taxa ecotxidx_t* taxa
ctypedef OBIDMS_taxonomy_t* OBIDMS_taxonomy_p ctypedef OBIDMS_taxonomy_t* OBIDMS_taxonomy_p
@ -51,7 +65,11 @@ cdef extern from "obidms_taxonomy.h" nogil:
ecotx_t* obi_taxo_get_parent_at_rank(ecotx_t* taxon, int32_t rankidx) ecotx_t* obi_taxo_get_parent_at_rank(ecotx_t* taxon, int32_t rankidx)
ecotx_t* obi_taxo_get_taxon_with_taxid(OBIDMS_taxonomy_p taxonomy, int32_t taxid) ecotx_t* obi_taxo_get_taxon_with_taxid(OBIDMS_taxonomy_p taxonomy, int32_t taxid)
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
bint obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid) bint obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid)
ecotx_t* obi_taxo_get_species(ecotx_t* taxon, OBIDMS_taxonomy_p taxonomy) ecotx_t* obi_taxo_get_species(ecotx_t* taxon, OBIDMS_taxonomy_p taxonomy)
@ -71,4 +89,4 @@ cdef extern from "obidms_taxonomy.h" nogil:
int obi_taxo_add_preferred_name_with_taxon(OBIDMS_taxonomy_p tax, ecotx_t* taxon, const char* preferred_name) int obi_taxo_add_preferred_name_with_taxon(OBIDMS_taxonomy_p tax, ecotx_t* taxon, const char* preferred_name)
const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks) const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks)

View File

@ -53,6 +53,8 @@ cdef extern from "obitypes.h" nogil:
extern const_char_p OBIQual_char_NA extern const_char_p OBIQual_char_NA
extern uint8_t* OBIQual_int_NA extern uint8_t* OBIQual_int_NA
extern void* OBITuple_NA extern void* OBITuple_NA
extern obiint_t OBI_INT_MAX
const_char_p name_data_type(int data_type) const_char_p name_data_type(int data_type)

View File

@ -27,6 +27,7 @@ cdef extern from "obiview.h" nogil:
extern const_char_p REVERSE_QUALITY_COLUMN extern const_char_p REVERSE_QUALITY_COLUMN
extern const_char_p REVERSE_SEQUENCE_COLUMN extern const_char_p REVERSE_SEQUENCE_COLUMN
extern const_char_p COUNT_COLUMN extern const_char_p COUNT_COLUMN
extern const_char_p SCIENTIFIC_NAME_COLUMN
extern const_char_p TAXID_COLUMN extern const_char_p TAXID_COLUMN
extern const_char_p MERGED_TAXID_COLUMN extern const_char_p MERGED_TAXID_COLUMN
extern const_char_p MERGED_PREFIX extern const_char_p MERGED_PREFIX
@ -94,6 +95,7 @@ cdef extern from "obiview.h" nogil:
index_t nb_elements_per_line, index_t nb_elements_per_line,
char* elements_names, char* elements_names,
bint elt_names_formatted, bint elt_names_formatted,
bint dict_column,
bint tuples, bint tuples,
bint to_eval, bint to_eval,
const_char_p indexer_name, const_char_p indexer_name,
@ -103,13 +105,17 @@ cdef extern from "obiview.h" nogil:
bint create) bint create)
int obi_view_delete_column(Obiview_p view, const_char_p column_name, bint delete_file) int obi_view_delete_column(Obiview_p view, const_char_p column_name, bint delete_file)
OBIDMS_column_p obi_view_get_column(Obiview_p view, const_char_p column_name) OBIDMS_column_p obi_view_get_column(Obiview_p view, const_char_p column_name)
OBIDMS_column_p* obi_view_get_pointer_on_column_in_view(Obiview_p view, const_char_p column_name) OBIDMS_column_p* obi_view_get_pointer_on_column_in_view(Obiview_p view, const_char_p column_name)
int obi_view_create_column_alias(Obiview_p view, const_char_p current_name, const_char_p alias) int obi_view_create_column_alias(Obiview_p view, const_char_p current_name, const_char_p alias)
char* obi_view_formatted_infos(Obiview_p view, bint detailed)
char* obi_view_formatted_infos_one_line(Obiview_p view)
int obi_view_write_comments(Obiview_p view, const_char_p comments) int obi_view_write_comments(Obiview_p view, const_char_p comments)
int obi_view_add_comment(Obiview_p view, const_char_p key, const_char_p value) int obi_view_add_comment(Obiview_p view, const_char_p key, const_char_p value)

View File

@ -90,6 +90,7 @@ cdef class Column(OBIWrapper) :
obitype_t data_type, obitype_t data_type,
index_t nb_elements_per_line=1, index_t nb_elements_per_line=1,
list elements_names=None, list elements_names=None,
bint dict_column=False,
bint tuples=False, bint tuples=False,
bint to_eval=False, bint to_eval=False,
object associated_column_name=b"", object associated_column_name=b"",
@ -152,6 +153,7 @@ cdef class Column(OBIWrapper) :
nb_elements_per_line = nb_elements_per_line, nb_elements_per_line = nb_elements_per_line,
elements_names = elements_names_p, elements_names = elements_names_p,
elt_names_formatted = False, elt_names_formatted = False,
dict_column = dict_column,
tuples = tuples, tuples = tuples,
to_eval = to_eval, to_eval = to_eval,
indexer_name = NULL, indexer_name = NULL,
@ -200,7 +202,7 @@ cdef class Column(OBIWrapper) :
column_p = column_pp[0] column_p = column_pp[0]
column_type = column_p.header.returned_data_type column_type = column_p.header.returned_data_type
column_class = Column.get_column_class(column_type, (column_p.header.nb_elements_per_line > 1), column_p.header.tuples) column_class = Column.get_column_class(column_type, (column_p.header.nb_elements_per_line > 1 or column_p.header.dict_column == True), column_p.header.tuples)
column = OBIWrapper.new_wrapper(column_class, column_pp) column = OBIWrapper.new_wrapper(column_class, column_pp)
column._view = view column._view = view
@ -236,6 +238,7 @@ cdef class Column(OBIWrapper) :
nb_elements_per_line = -1, nb_elements_per_line = -1,
elements_names = NULL, elements_names = NULL,
elt_names_formatted = False, elt_names_formatted = False,
dict_column = False,
tuples = False, tuples = False,
to_eval = False, to_eval = False,
indexer_name = NULL, indexer_name = NULL,
@ -302,15 +305,24 @@ cdef class Column(OBIWrapper) :
@OBIWrapper.checkIsActive @OBIWrapper.checkIsActive
def __repr__(self) : def __repr__(self) :
cdef bytes s cdef str s
#cdef char* s_b cdef char* sc
#cdef str s_str cdef OBIDMS_column_p pointer = self.pointer()
#s_b = obi_column_formatted_infos(self.pointer()) sc = obi_column_formatted_infos(pointer, False)
#s_str = bytes2str(s_b) s = bytes2str(sc)
#free(s_b) free(sc)
s = self._alias + b", data type: " + self.data_type return s
#return s_str
return bytes2str(s)
@OBIWrapper.checkIsActive
def repr_longformat(self) :
cdef str s
cdef char* sc
cdef OBIDMS_column_p pointer = self.pointer()
sc = obi_column_formatted_infos(pointer, True)
s = bytes2str(sc)
free(sc)
return s
def close(self): # TODO discuss, can't be called bc then bug when closing view that tries to close it in C def close(self): # TODO discuss, can't be called bc then bug when closing view that tries to close it in C
@ -365,6 +377,13 @@ cdef class Column(OBIWrapper) :
raise OBIDeactivatedInstanceError() raise OBIDeactivatedInstanceError()
return self.pointer().header.nb_elements_per_line return self.pointer().header.nb_elements_per_line
# dict_column property getter
@property
def dict_column(self):
if not self.active() :
raise OBIDeactivatedInstanceError()
return self.pointer().header.dict_column
# data_type property getter # data_type property getter
@property @property
def data_type(self): def data_type(self):

View File

@ -38,11 +38,13 @@ cdef class Column_bool(Column):
object column_name, object column_name,
index_t nb_elements_per_line=1, index_t nb_elements_per_line=1,
object elements_names=None, object elements_names=None,
bint dict_column=False,
bint tuples=False, bint tuples=False,
object comments={}): object comments={}):
return Column.new_column(view, column_name, OBI_BOOL, return Column.new_column(view, column_name, OBI_BOOL,
nb_elements_per_line=nb_elements_per_line, nb_elements_per_line=nb_elements_per_line,
elements_names=elements_names, elements_names=elements_names,
dict_column=dict_column,
tuples=tuples, tuples=tuples,
comments=comments) comments=comments)

View File

@ -36,12 +36,14 @@ cdef class Column_char(Column):
object column_name, object column_name,
index_t nb_elements_per_line=1, index_t nb_elements_per_line=1,
object elements_names=None, object elements_names=None,
bint dict_column=False,
bint tuples=False, bint tuples=False,
object comments={}): object comments={}):
return Column.new_column(view, column_name, OBI_CHAR, return Column.new_column(view, column_name, OBI_CHAR,
nb_elements_per_line=nb_elements_per_line, nb_elements_per_line=nb_elements_per_line,
elements_names=elements_names, elements_names=elements_names,
dict_column=dict_column,
tuples=tuples, tuples=tuples,
comments=comments) comments=comments)

View File

@ -36,12 +36,14 @@ cdef class Column_float(Column):
object column_name, object column_name,
index_t nb_elements_per_line=1, index_t nb_elements_per_line=1,
object elements_names=None, object elements_names=None,
bint dict_column=False,
bint tuples=False, bint tuples=False,
object comments={}): object comments={}):
return Column.new_column(view, column_name, OBI_FLOAT, return Column.new_column(view, column_name, OBI_FLOAT,
nb_elements_per_line=nb_elements_per_line, nb_elements_per_line=nb_elements_per_line,
elements_names=elements_names, elements_names=elements_names,
dict_column=dict_column,
tuples=tuples, tuples=tuples,
comments=comments) comments=comments)

View File

@ -38,12 +38,14 @@ cdef class Column_int(Column):
object column_name, object column_name,
index_t nb_elements_per_line=1, index_t nb_elements_per_line=1,
object elements_names=None, object elements_names=None,
bint dict_column=False,
bint tuples=False, bint tuples=False,
object comments={}): object comments={}):
return Column.new_column(view, column_name, OBI_INT, return Column.new_column(view, column_name, OBI_INT,
nb_elements_per_line=nb_elements_per_line, nb_elements_per_line=nb_elements_per_line,
elements_names=elements_names, elements_names=elements_names,
dict_column=dict_column,
tuples=tuples, tuples=tuples,
comments=comments) comments=comments)

View File

@ -38,6 +38,7 @@ cdef class Column_qual(Column_idx):
object column_name, object column_name,
index_t nb_elements_per_line=1, index_t nb_elements_per_line=1,
object elements_names=None, object elements_names=None,
bint dict_column=False,
object associated_column_name=b"", object associated_column_name=b"",
int associated_column_version=-1, int associated_column_version=-1,
object comments={}): object comments={}):
@ -45,6 +46,7 @@ cdef class Column_qual(Column_idx):
return Column.new_column(view, column_name, OBI_QUAL, return Column.new_column(view, column_name, OBI_QUAL,
nb_elements_per_line=nb_elements_per_line, nb_elements_per_line=nb_elements_per_line,
elements_names=elements_names, elements_names=elements_names,
dict_column=dict_column,
tuples=False, tuples=False,
associated_column_name=associated_column_name, associated_column_name=associated_column_name,
associated_column_version=associated_column_name, associated_column_version=associated_column_name,

View File

@ -39,12 +39,14 @@ cdef class Column_seq(Column_idx):
object column_name, object column_name,
index_t nb_elements_per_line=1, index_t nb_elements_per_line=1,
object elements_names=None, object elements_names=None,
bint dict_column=False,
bint tuples=False, bint tuples=False,
object comments={}): object comments={}):
return Column.new_column(view, column_name, OBI_SEQ, return Column.new_column(view, column_name, OBI_SEQ,
nb_elements_per_line=nb_elements_per_line, nb_elements_per_line=nb_elements_per_line,
elements_names=elements_names, elements_names=elements_names,
dict_column=dict_column,
tuples=tuples, tuples=tuples,
comments=comments) comments=comments)

View File

@ -38,12 +38,14 @@ cdef class Column_str(Column_idx):
object column_name, object column_name,
index_t nb_elements_per_line=1, index_t nb_elements_per_line=1,
object elements_names=None, object elements_names=None,
bint dict_column=False,
bint tuples=False, bint tuples=False,
object comments={}): object comments={}):
return Column.new_column(view, column_name, OBI_STR, return Column.new_column(view, column_name, OBI_STR,
nb_elements_per_line=nb_elements_per_line, nb_elements_per_line=nb_elements_per_line,
elements_names=elements_names, elements_names=elements_names,
dict_column=dict_column,
tuples=tuples, tuples=tuples,
comments=comments) comments=comments)

View File

@ -10,7 +10,8 @@ from .capi.obidms cimport obi_open_dms, \
obi_dms_exists, \ obi_dms_exists, \
obi_dms_get_full_path, \ obi_dms_get_full_path, \
obi_close_atexit, \ obi_close_atexit, \
obi_dms_write_comments obi_dms_write_comments, \
obi_dms_formatted_infos
from .capi.obitypes cimport const_char_p from .capi.obitypes cimport const_char_p
@ -32,6 +33,8 @@ from .object import OBIWrapper
import json import json
import time import time
from libc.stdlib cimport free
cdef class DMS(OBIWrapper): cdef class DMS(OBIWrapper):
@ -223,13 +226,24 @@ cdef class DMS(OBIWrapper):
@OBIWrapper.checkIsActive @OBIWrapper.checkIsActive
def __repr__(self): def __repr__(self) :
cdef str s cdef str s
s="" cdef char* sc
for view_name in self.keys(): cdef OBIDMS_p pointer = self.pointer()
view = self.get_view(view_name) sc = obi_dms_formatted_infos(pointer, False)
s = s + repr(view) + "\n" s = bytes2str(sc)
view.close() free(sc)
return s
@OBIWrapper.checkIsActive
def repr_longformat(self) :
cdef str s
cdef char* sc
cdef OBIDMS_p pointer = self.pointer()
sc = obi_dms_formatted_infos(pointer, True)
s = bytes2str(sc)
free(sc)
return s return s

View File

@ -11,11 +11,14 @@ cdef class Taxonomy(OBIWrapper) :
cdef bytes _name cdef bytes _name
cdef DMS _dms cdef DMS _dms
cdef list _ranks cdef list _ranks
cdef dict _name_dict
cdef inline OBIDMS_taxonomy_p pointer(self) cdef inline OBIDMS_taxonomy_p pointer(self)
cdef fill_name_dict(self)
cpdef Taxon get_taxon_by_idx(self, int idx) cpdef Taxon get_taxon_by_idx(self, int idx)
cpdef Taxon get_taxon_by_taxid(self, int taxid) cpdef Taxon get_taxon_by_taxid(self, int taxid)
cpdef Taxon get_taxon_by_name(self, object taxon_name, object restricting_taxid=*)
cpdef write(self, object prefix) cpdef write(self, object prefix)
cpdef int add_taxon(self, str name, str rank_name, int parent_taxid, int min_taxid=*) cpdef int add_taxon(self, str name, str rank_name, int parent_taxid, int min_taxid=*)
cpdef object get_species(self, int taxid) cpdef object get_species(self, int taxid)

View File

@ -15,7 +15,11 @@ from ..capi.obitaxonomy cimport obi_taxonomy_exists, \
obi_taxo_get_species, \ obi_taxo_get_species, \
obi_taxo_get_genus, \ obi_taxo_get_genus, \
obi_taxo_get_family, \ obi_taxo_get_family, \
ecotx_t ecotx_t, \
econame_t, \
obi_taxo_get_name_from_name_idx, \
obi_taxo_get_taxon_from_name_idx
from cpython.pycapsule cimport PyCapsule_New, PyCapsule_GetPointer from cpython.pycapsule cimport PyCapsule_New, PyCapsule_GetPointer
import tarfile import tarfile
@ -24,11 +28,29 @@ from libc.stdlib cimport free
cdef class Taxonomy(OBIWrapper) : cdef class Taxonomy(OBIWrapper) :
# TODO function to import taxonomy? # TODO function to import taxonomy?
cdef inline OBIDMS_taxonomy_p pointer(self) : cdef inline OBIDMS_taxonomy_p pointer(self) :
return <OBIDMS_taxonomy_p>(self._pointer) return <OBIDMS_taxonomy_p>(self._pointer)
cdef fill_name_dict(self):
print("Indexing taxon names...")
cdef OBIDMS_taxonomy_p pointer = self.pointer()
cdef ecotx_t* taxon_p
cdef object taxon_capsule
cdef bytes name
cdef int count
cdef int n
count = (<OBIDMS_taxonomy_p>pointer).names.count
for n in range(count) :
name = obi_taxo_get_name_from_name_idx(pointer, n)
taxon_p = obi_taxo_get_taxon_from_name_idx(pointer, n)
taxon_capsule = PyCapsule_New(taxon_p, NULL, NULL)
self._name_dict[name] = Taxon(taxon_capsule, self)
@staticmethod @staticmethod
def exists(DMS dms, object name) : def exists(DMS dms, object name) :
@ -75,7 +97,8 @@ cdef class Taxonomy(OBIWrapper) :
taxo._dms = dms taxo._dms = dms
taxo._name = tobytes(name) taxo._name = tobytes(name)
taxo._name_dict = {}
taxo.fill_name_dict()
taxo._ranks = [] taxo._ranks = []
for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) : for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) :
taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks)) taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks))
@ -118,7 +141,8 @@ cdef class Taxonomy(OBIWrapper) :
taxo._dms = dms taxo._dms = dms
taxo._name = folder_path taxo._name = folder_path
taxo._name_dict = {}
taxo.fill_name_dict()
taxo._ranks = [] taxo._ranks = []
for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) : for r in range((<OBIDMS_taxonomy_p>pointer).ranks.count) :
taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks)) taxo._ranks.append(obi_taxo_rank_index_to_label(r, (<OBIDMS_taxonomy_p>pointer).ranks))
@ -129,8 +153,8 @@ cdef class Taxonomy(OBIWrapper) :
def __getitem__(self, object ref): def __getitem__(self, object ref):
if type(ref) == int : if type(ref) == int :
return self.get_taxon_by_taxid(ref) return self.get_taxon_by_taxid(ref)
else : elif type(ref) == str or type(ref) == bytes :
raise NotImplementedError() return self.get_taxon_by_name(ref)
cpdef Taxon get_taxon_by_taxid(self, int taxid): cpdef Taxon get_taxon_by_taxid(self, int taxid):
@ -143,6 +167,19 @@ cdef class Taxonomy(OBIWrapper) :
return Taxon(taxon_capsule, self) return Taxon(taxon_capsule, self)
cpdef Taxon get_taxon_by_name(self, object taxon_name, object restricting_taxid=None):
taxon = self._name_dict.get(tobytes(taxon_name), None)
if not taxon:
return None
elif restricting_taxid:
if self.is_ancestor(restricting_taxid, taxon.taxid):
return taxon
else:
return None
else:
return taxon
cpdef Taxon get_taxon_by_idx(self, int idx): cpdef Taxon get_taxon_by_idx(self, int idx):
cdef ecotx_t* taxa cdef ecotx_t* taxa
cdef ecotx_t* taxon_p cdef ecotx_t* taxon_p
@ -232,7 +269,7 @@ cdef class Taxonomy(OBIWrapper) :
taxa = self.pointer().taxa.taxon taxa = self.pointer().taxa.taxon
# Yield each taxid # Yield each taxon
for t in range(self.pointer().taxa.count): for t in range(self.pointer().taxa.count):
taxon_p = <ecotx_t*> (taxa+t) taxon_p = <ecotx_t*> (taxa+t)
taxon_capsule = PyCapsule_New(taxon_p, NULL, NULL) taxon_capsule = PyCapsule_New(taxon_p, NULL, NULL)

View File

@ -7,6 +7,7 @@ cdef dict __VIEW_CLASS__= {}
from libc.stdlib cimport malloc from libc.stdlib cimport malloc
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
from obitools3.version import version
from ..capi.obiview cimport Alias_column_pair_p, \ from ..capi.obiview cimport Alias_column_pair_p, \
obi_new_view, \ obi_new_view, \
@ -18,7 +19,9 @@ from ..capi.obiview cimport Alias_column_pair_p, \
obi_view_delete_column, \ obi_view_delete_column, \
obi_view_create_column_alias, \ obi_view_create_column_alias, \
obi_view_write_comments, \ obi_view_write_comments, \
obi_delete_view obi_delete_view, \
obi_view_formatted_infos, \
obi_view_formatted_infos_one_line
from ..capi.obidmscolumn cimport OBIDMS_column_p from ..capi.obidmscolumn cimport OBIDMS_column_p
from ..capi.obidms cimport OBIDMS_p from ..capi.obidms cimport OBIDMS_p
@ -58,6 +61,8 @@ import pkgutil
import json import json
import sys import sys
from libc.stdlib cimport free
cdef class View(OBIWrapper) : cdef class View(OBIWrapper) :
@ -183,11 +188,24 @@ cdef class View(OBIWrapper) :
@OBIWrapper.checkIsActive @OBIWrapper.checkIsActive
def __repr__(self) : def __repr__(self) :
cdef str s = "#View name:\n{name:s}\n#Line count:\n{line_count:d}\n#Columns:\n".format(name = bytes2str(self.name), cdef str s
line_count = self.line_count) cdef char* sc
for column_name in self.keys() : cdef Obiview_p pointer = self.pointer()
s = s + repr(self[column_name]) + '\n' sc = obi_view_formatted_infos(pointer, False)
s = bytes2str(sc)
free(sc)
return s
@OBIWrapper.checkIsActive
def repr_longformat(self) :
cdef str s
cdef char* sc
cdef Obiview_p pointer = self.pointer()
sc = obi_view_formatted_infos(pointer, True)
s = bytes2str(sc)
free(sc)
return s return s
@ -325,7 +343,7 @@ cdef class View(OBIWrapper) :
new_column = Column.new_column(self, old_column.pointer().header.name, new_data_type, new_column = Column.new_column(self, old_column.pointer().header.name, new_data_type,
nb_elements_per_line=new_nb_elements_per_line, elements_names=new_elements_names, nb_elements_per_line=new_nb_elements_per_line, elements_names=new_elements_names,
comments=old_column.comments, alias=column_name_b+tobytes('___new___')) dict_column=(new_nb_elements_per_line>1), comments=old_column.comments, alias=column_name_b+tobytes('___new___'))
switch_to_dict = old_column.nb_elements_per_line == 1 and new_nb_elements_per_line > 1 switch_to_dict = old_column.nb_elements_per_line == 1 and new_nb_elements_per_line > 1
ori_key = old_column._elements_names[0] ori_key = old_column._elements_names[0]
@ -386,6 +404,7 @@ cdef class View(OBIWrapper) :
col.data_type_int, col.data_type_int,
nb_elements_per_line = col.nb_elements_per_line, nb_elements_per_line = col.nb_elements_per_line,
elements_names = col._elements_names, elements_names = col._elements_names,
dict_column = col.dict_column,
tuples = col.tuples, tuples = col.tuples,
to_eval = col.to_eval, to_eval = col.to_eval,
comments = col.comments, comments = col.comments,
@ -434,6 +453,7 @@ cdef class View(OBIWrapper) :
for i in range(len(input_view_name)): for i in range(len(input_view_name)):
input_str.append(tostr(input_dms_name[i])+"/"+tostr(input_view_name[i])) input_str.append(tostr(input_dms_name[i])+"/"+tostr(input_view_name[i]))
comments["input_str"] = input_str comments["input_str"] = input_str
comments["version"] = version
return bytes2str_object(comments) return bytes2str_object(comments)
@ -580,7 +600,8 @@ cdef class View(OBIWrapper) :
if element is not None: if element is not None:
if element.comments[b"input_dms_name"] is not None : if element.comments[b"input_dms_name"] is not None :
for i in range(len(element.comments[b"input_dms_name"])) : for i in range(len(element.comments[b"input_dms_name"])) :
if element.comments[b"input_dms_name"][i] == element.dms.name and b"/" not in element.comments[b"input_view_name"][i]: # Same DMS and not a special element like a taxonomy if b"/" not in element.comments[b"input_view_name"][i] and element.comments[b"input_view_name"][i] in element.dms \
and element.comments[b"input_dms_name"][i] == element.dms.name : # Same DMS and not a special element like a taxonomy and view was not deleted
top_level.append(element.dms[element.comments[b"input_view_name"][i]]) top_level.append(element.dms[element.comments[b"input_view_name"][i]])
else: else:
top_level.append(None) top_level.append(None)
@ -786,7 +807,7 @@ cdef class Line :
def __repr__(self): def __repr__(self):
return bytes2str(self).repr_bytes() return bytes2str(self.repr_bytes())
cpdef repr_bytes(self): cpdef repr_bytes(self):

View File

@ -5,6 +5,7 @@ from obitools3.dms.view.view cimport Line
from obitools3.utils cimport bytes2str_object, str2bytes, tobytes from obitools3.utils cimport bytes2str_object, str2bytes, tobytes
from obitools3.dms.column.column cimport Column_line, Column_multi_elts from obitools3.dms.column.column cimport Column_line, Column_multi_elts
import sys
cdef class TabFormat: cdef class TabFormat:
@ -22,33 +23,45 @@ cdef class TabFormat:
if self.first_line: if self.first_line:
self.tags = [k for k in data.keys()] self.tags = [k for k in data.keys()]
for k in self.tags: if self.header and self.first_line:
for k in self.tags:
if self.header and self.first_line:
if isinstance(data.view[k], Column_multi_elts): if isinstance(data.view[k], Column_multi_elts):
for k2 in data.view[k].keys(): keys = data.view[k].keys()
keys.sort()
for k2 in keys:
line.append(tobytes(k)+b':'+tobytes(k2)) line.append(tobytes(k)+b':'+tobytes(k2))
else: else:
line.append(tobytes(k)) line.append(tobytes(k))
else: r = self.sep.join(value for value in line)
value = data[k] r += b'\n'
if isinstance(data.view[k], Column_multi_elts): line = []
if value is None: # all keys at None
for k2 in data.view[k].keys(): # TODO could be much more efficient for k in self.tags:
line.append(self.NAString) value = data[k]
else: if isinstance(data.view[k], Column_multi_elts):
for k2 in data.view[k].keys(): # TODO could be much more efficient keys = data.view[k].keys()
if value[k2] is not None: keys.sort()
line.append(str2bytes(str(bytes2str_object(value[k2])))) # genius programming if value is None: # all keys at None
else: for k2 in keys: # TODO could be much more efficient
line.append(self.NAString)
else:
if value is not None:
line.append(str2bytes(str(bytes2str_object(value))))
else:
line.append(self.NAString) line.append(self.NAString)
else:
for k2 in keys: # TODO could be much more efficient
if value[k2] is not None:
line.append(str2bytes(str(bytes2str_object(value[k2])))) # genius programming
else:
line.append(self.NAString)
else:
if value is not None:
line.append(str2bytes(str(bytes2str_object(value))))
else:
line.append(self.NAString)
if self.header and self.first_line:
r += self.sep.join(value for value in line)
else:
r = self.sep.join(value for value in line)
if self.first_line: if self.first_line:
self.first_line = False self.first_line = False
return self.sep.join(value for value in line) return r

View File

@ -22,10 +22,10 @@ from libc.stdlib cimport free, malloc, realloc
from libc.string cimport strcpy, strlen from libc.string cimport strcpy, strlen
_featureMatcher = re.compile(b'^FEATURES.+\n(?=ORIGIN)',re.DOTALL + re.M) _featureMatcher = re.compile(b'^FEATURES.+\n(?=ORIGIN )',re.DOTALL + re.M)
_headerMatcher = re.compile(b'^LOCUS.+(?=\nFEATURES)', re.DOTALL + re.M) _headerMatcher = re.compile(b'^LOCUS.+(?=\nFEATURES)', re.DOTALL + re.M)
_seqMatcher = re.compile(b'ORIGIN.+(?=//\n)', re.DOTALL + re.M) _seqMatcher = re.compile(b'ORIGIN .+(?=//\n)', re.DOTALL + re.M)
_cleanSeq1 = re.compile(b'ORIGIN.+\n') _cleanSeq1 = re.compile(b'ORIGIN.+\n')
_cleanSeq2 = re.compile(b'[ \n0-9]+') _cleanSeq2 = re.compile(b'[ \n0-9]+')
_acMatcher = re.compile(b'(?<=^ACCESSION ).+',re.M) _acMatcher = re.compile(b'(?<=^ACCESSION ).+',re.M)

View File

@ -8,7 +8,7 @@ Created on feb 20th 2018
import types import types
from obitools3.utils cimport __etag__ from obitools3.utils cimport __etag__
from obitools3.utils cimport str2bytes
def tabIterator(lineiterator, def tabIterator(lineiterator,
bint header = False, bint header = False,
@ -75,7 +75,7 @@ def tabIterator(lineiterator,
continue continue
else: else:
# TODO ??? default column names? like R? # TODO ??? default column names? like R?
keys = [i for i in range(len(line.split(sep)))] keys = [str2bytes(str(i)) for i in range(len(line.split(sep)))]
while skipped < skip : while skipped < skip :
line = next(iterator) line = next(iterator)

View File

@ -53,7 +53,11 @@ def entryIteratorFactory(lineiterator,
i = iterator i = iterator
first=next(i) try:
first=next(i)
except StopIteration:
first=""
pass
format=b"tabular" format=b"tabular"

View File

@ -276,11 +276,11 @@ def open_uri(uri,
iseq = urib iseq = urib
objclass = bytes objclass = bytes
else: # TODO update uopen to be able to write? else: # TODO update uopen to be able to write?
if urip.path == b'-': if not urip.path or urip.path == b'-':
file = sys.stdout.buffer file = sys.stdout.buffer
elif urip.path : else:
file = open(urip.path, 'wb') file = open(urip.path, 'wb')
if file is not None: if file is not None:
qualifiers=parse_qs(urip.query) qualifiers=parse_qs(urip.query)
@ -386,10 +386,13 @@ def open_uri(uri,
raise MalformedURIException('Malformed header argument in URI') raise MalformedURIException('Malformed header argument in URI')
if b"sep" in qualifiers: if b"sep" in qualifiers:
sep=tobytes(qualifiers[b"sep"][0][0]) sep = tobytes(qualifiers[b"sep"][0][0])
else: else:
try: try:
sep=tobytes(config["obi"]["sep"]) sep = config["obi"]["sep"]
if sep == '\\t': # dirty workaround for flake8(?) issue that reads '\t' as '\'+'t' when parsing the option value
sep = '\t'
sep = tobytes(sep)
except KeyError: except KeyError:
sep=None sep=None
@ -464,7 +467,7 @@ def open_uri(uri,
if format is not None: if format is not None:
if seqtype==b"nuc": if seqtype==b"nuc":
objclass = Nuc_Seq # Nuc_Seq_Stored? TODO objclass = Nuc_Seq # Nuc_Seq_Stored? TODO
if format==b"fasta": if format==b"fasta" or format==b"silva":
if input: if input:
iseq = fastaNucIterator(file, iseq = fastaNucIterator(file,
skip=skip, skip=skip,

View File

@ -2,7 +2,7 @@
from obitools3.dms.capi.obitypes cimport obitype_t, index_t from obitools3.dms.capi.obitypes cimport obitype_t, index_t
cpdef bytes format_separator(bytes format) cpdef bytes format_uniq_pattern(bytes format)
cpdef int count_entries(file, bytes format) cpdef int count_entries(file, bytes format)
cdef obi_errno_to_exception(index_t line_nb=*, object elt_id=*, str error_message=*) cdef obi_errno_to_exception(index_t line_nb=*, object elt_id=*, str error_message=*)

View File

@ -9,7 +9,8 @@ from obitools3.dms.capi.obitypes cimport is_a_DNA_seq, \
OBI_QUAL, \ OBI_QUAL, \
OBI_SEQ, \ OBI_SEQ, \
OBI_STR, \ OBI_STR, \
index_t index_t, \
OBI_INT_MAX
from obitools3.dms.capi.obierrno cimport OBI_LINE_IDX_ERROR, \ from obitools3.dms.capi.obierrno cimport OBI_LINE_IDX_ERROR, \
OBI_ELT_IDX_ERROR, \ OBI_ELT_IDX_ERROR, \
@ -24,11 +25,11 @@ import glob
import gzip import gzip
cpdef bytes format_separator(bytes format): cpdef bytes format_uniq_pattern(bytes format):
if format == b"fasta": if format == b"fasta":
return b"\n>" return b"\n>"
elif format == b"fastq": elif format == b"fastq":
return b"\n@" return b"\n\+\n"
elif format == b"ngsfilter" or format == b"tabular": elif format == b"ngsfilter" or format == b"tabular":
return b"\n" return b"\n"
elif format == b"genbank" or format == b"embl": elif format == b"genbank" or format == b"embl":
@ -42,7 +43,7 @@ cpdef bytes format_separator(bytes format):
cpdef int count_entries(file, bytes format): cpdef int count_entries(file, bytes format):
try: try:
sep = format_separator(format) sep = format_uniq_pattern(format)
if sep is None: if sep is None:
return -1 return -1
sep = re.compile(sep) sep = re.compile(sep)
@ -72,7 +73,7 @@ cpdef int count_entries(file, bytes format):
return -1 return -1
mmapped_file = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) mmapped_file = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
total_count += len(re.findall(sep, mmapped_file)) total_count += len(re.findall(sep, mmapped_file))
if format != b"ngsfilter" and format != b"tabular" and format != b"embl" and format != b"genbank": if format != b"ngsfilter" and format != b"tabular" and format != b"embl" and format != b"genbank" and format != b"fastq":
total_count += 1 # adding +1 for 1st entry because separators include \n (ngsfilter and tabular already count one more because of last \n) total_count += 1 # adding +1 for 1st entry because separators include \n (ngsfilter and tabular already count one more because of last \n)
except: except:
@ -258,7 +259,7 @@ cdef obitype_t update_obitype(obitype_t obitype, object new_value) :
new_type = type(new_value) new_type = type(new_value)
if obitype == OBI_INT : if obitype == OBI_INT :
if new_type == float : if new_type == float or new_value > OBI_INT_MAX :
return OBI_FLOAT return OBI_FLOAT
# TODO BOOL vers INT/FLOAT # TODO BOOL vers INT/FLOAT
elif new_type == str or new_type == bytes : elif new_type == str or new_type == bytes :

View File

@ -1,5 +1,5 @@
major = 3 major = 3
minor = 0 minor = 0
serial= '0b30' serial= '1b6'
version ="%d.%d.%s" % (major,minor,serial) version ="%d.%d.%s" % (major,minor,serial)

View File

@ -243,6 +243,7 @@ int build_reference_db(const char* dms_name,
false, false,
false, false,
false, false,
false,
"", "",
"", "",
-1, -1,
@ -392,6 +393,7 @@ int build_reference_db(const char* dms_name,
1, 1,
"", "",
false, false,
false,
true, true,
false, false,
"", "",
@ -415,6 +417,7 @@ int build_reference_db(const char* dms_name,
1, 1,
"", "",
false, false,
false,
true, true,
false, false,
"", "",

View File

@ -36,10 +36,12 @@ bool only_ATGC(const char* seq)
{ {
if (!((*c == 'A') || \ if (!((*c == 'A') || \
(*c == 'T') || \ (*c == 'T') || \
(*c == 'U') || \
(*c == 'G') || \ (*c == 'G') || \
(*c == 'C') || \ (*c == 'C') || \
(*c == 'a') || \ (*c == 'a') || \
(*c == 't') || \ (*c == 't') || \
(*c == 'u') || \
(*c == 'g') || \ (*c == 'g') || \
(*c == 'c'))) (*c == 'c')))
{ {
@ -182,6 +184,8 @@ byte_t* encode_seq_on_2_bits(const char* seq, int32_t length)
break; break;
case 't': case 't':
case 'T': case 'T':
case 'u':
case 'U':
seq_b[i/4] |= NUC_T_2b; seq_b[i/4] |= NUC_T_2b;
break; break;
default: default:
@ -288,6 +292,8 @@ byte_t* encode_seq_on_4_bits(const char* seq, int32_t length)
break; break;
case 't': case 't':
case 'T': case 'T':
case 'u': // discussable
case 'U':
seq_b[i/2] |= NUC_T_4b; seq_b[i/2] |= NUC_T_4b;
break; break;
case 'r': case 'r':

View File

@ -64,7 +64,7 @@ enum
/** /**
* @brief Checks if there are only 'atgcATGC' characters in a * @brief Checks if there are only 'atgcuATGCU' characters in a
* character string. * character string.
* *
* @param seq The sequence to check. * @param seq The sequence to check.
@ -129,12 +129,13 @@ byte_t get_nucleotide_from_encoded_seq(byte_t* seq, int32_t idx, uint8_t encodin
/** /**
* @brief Encodes a DNA sequence with each nucleotide coded on 2 bits. * @brief Encodes a DNA sequence with each nucleotide coded on 2 bits.
* *
* A or a : 00 * A or a : 00
* C or c : 01 * C or c : 01
* T or t : 10 * T or t or U or u : 10
* G or g : 11 * G or g : 11
* *
* @warning The DNA sequence must contain only 'atgcATGC' characters. * @warning The DNA sequence must contain only 'atgcuATGCU' characters.
* @warning Uracil ('U') bases are encoded as Thymine ('T') bases.
* *
* @param seq The sequence to encode. * @param seq The sequence to encode.
* @param length The length of the sequence to encode. * @param length The length of the sequence to encode.
@ -169,23 +170,24 @@ char* decode_seq_on_2_bits(byte_t* seq_b, int32_t length_seq);
/** /**
* @brief Encodes a DNA sequence with each nucleotide coded on 4 bits. * @brief Encodes a DNA sequence with each nucleotide coded on 4 bits.
* *
* A or a : 0001 * A or a : 0001
* C or c : 0010 * C or c : 0010
* G or g : 0011 * G or g : 0011
* T or t : 0100 * T or t or U or u : 0100
* R or r : 0101 * R or r : 0101
* Y or y : 0110 * Y or y : 0110
* S or s : 0111 * S or s : 0111
* W or w : 1000 * W or w : 1000
* K or k : 1001 * K or k : 1001
* M or m : 1010 * M or m : 1010
* B or b : 1011 * B or b : 1011
* D or d : 1100 * D or d : 1100
* H or h : 1101 * H or h : 1101
* V or v : 1110 * V or v : 1110
* N or n : 1111 * N or n : 1111
* *
* @warning The DNA sequence must contain only IUPAC characters. * @warning The DNA sequence must contain only IUPAC characters.
* @warning Uracil ('U') bases are encoded as Thymine ('T') bases.
* *
* @param seq The sequence to encode. * @param seq The sequence to encode.
* @param length The length of the sequence to encode. * @param length The length of the sequence to encode.

View File

@ -88,42 +88,42 @@ static int create_output_columns(Obiview_p o_view,
int sample_count) int sample_count)
{ {
// Status column // Status column
if (obi_view_add_column(o_view, CLEAN_STATUS_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, sample_count, (sample_column->header)->elements_names, true, false, false, NULL, NULL, -1, CLEAN_STATUS_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, CLEAN_STATUS_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, sample_count, (sample_column->header)->elements_names, true, true, false, false, NULL, NULL, -1, CLEAN_STATUS_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", CLEAN_STATUS_COLUMN_NAME); obidebug(1, "\nError creating the %s column", CLEAN_STATUS_COLUMN_NAME);
return -1; return -1;
} }
// Head column // Head column
if (obi_view_add_column(o_view, CLEAN_HEAD_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_HEAD_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, CLEAN_HEAD_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_HEAD_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", CLEAN_HEAD_COLUMN_NAME); obidebug(1, "\nError creating the %s column", CLEAN_HEAD_COLUMN_NAME);
return -1; return -1;
} }
// Sample count column // Sample count column
if (obi_view_add_column(o_view, CLEAN_SAMPLECOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_SAMPLECOUNT_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, CLEAN_SAMPLECOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_SAMPLECOUNT_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", CLEAN_SAMPLECOUNT_COLUMN_NAME); obidebug(1, "\nError creating the %s column", CLEAN_SAMPLECOUNT_COLUMN_NAME);
return -1; return -1;
} }
// Head count column // Head count column
if (obi_view_add_column(o_view, CLEAN_HEADCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_HEADCOUNT_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, CLEAN_HEADCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_HEADCOUNT_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", CLEAN_HEADCOUNT_COLUMN_NAME); obidebug(1, "\nError creating the %s column", CLEAN_HEADCOUNT_COLUMN_NAME);
return -1; return -1;
} }
// Internal count column // Internal count column
if (obi_view_add_column(o_view, CLEAN_INTERNALCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_INTERNALCOUNT_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, CLEAN_INTERNALCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_INTERNALCOUNT_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", CLEAN_INTERNALCOUNT_COLUMN_NAME); obidebug(1, "\nError creating the %s column", CLEAN_INTERNALCOUNT_COLUMN_NAME);
return -1; return -1;
} }
// Singleton count column // Singleton count column
if (obi_view_add_column(o_view, CLEAN_SINGLETONCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, CLEAN_SINGLETONCOUNT_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, CLEAN_SINGLETONCOUNT_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, CLEAN_SINGLETONCOUNT_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", CLEAN_SINGLETONCOUNT_COLUMN_NAME); obidebug(1, "\nError creating the %s column", CLEAN_SINGLETONCOUNT_COLUMN_NAME);
return -1; return -1;
@ -229,6 +229,8 @@ int obi_clean(const char* dms_name,
return -1; return -1;
} }
seq_count = (i_view->infos)->line_count;
// Open the sequence column // Open the sequence column
if (strcmp((i_view->infos)->view_type, VIEW_TYPE_NUC_SEQS) == 0) if (strcmp((i_view->infos)->view_type, VIEW_TYPE_NUC_SEQS) == 0)
iseq_column = obi_view_get_column(i_view, NUC_SEQUENCE_COLUMN); iseq_column = obi_view_get_column(i_view, NUC_SEQUENCE_COLUMN);
@ -245,7 +247,7 @@ int obi_clean(const char* dms_name,
} }
// Open the sample column if there is one // Open the sample column if there is one
if ((strcmp(sample_column_name, "") == 0) || (sample_column_name == NULL)) if ((strcmp(sample_column_name, "") == 0) || (sample_column_name == NULL) || (seq_count == 0))
{ {
fprintf(stderr, "Info: No sample information provided, assuming one sample.\n"); fprintf(stderr, "Info: No sample information provided, assuming one sample.\n");
sample_column = obi_view_get_column(i_view, COUNT_COLUMN); sample_column = obi_view_get_column(i_view, COUNT_COLUMN);
@ -340,66 +342,67 @@ int obi_clean(const char* dms_name,
return -1; return -1;
} }
// Build kmer tables if (seq_count > 0)
ktable = hash_seq_column(i_view, iseq_column, 0);
if (ktable == NULL)
{ {
obi_set_errno(OBI_CLEAN_ERROR); // Build kmer tables
obidebug(1, "\nError building kmer tables before aligning"); ktable = hash_seq_column(i_view, iseq_column, 0);
return -1; if (ktable == NULL)
} {
obi_set_errno(OBI_CLEAN_ERROR);
obidebug(1, "\nError building kmer tables before aligning");
return -1;
}
seq_count = (i_view->infos)->line_count; // Allocate arrays for sample counts otherwise reading in mapped files takes longer
complete_sample_count_array = (int*) malloc(seq_count * sample_count * sizeof(int));
// Allocate arrays for sample counts otherwise reading in mapped files takes longer if (complete_sample_count_array == NULL)
complete_sample_count_array = (int*) malloc(seq_count * sample_count * sizeof(int)); {
if (complete_sample_count_array == NULL) obi_set_errno(OBI_MALLOC_ERROR);
{ obidebug(1, "\nError allocating memory for the array of sample counts, size: %lld", seq_count * sample_count * sizeof(int));
obi_set_errno(OBI_MALLOC_ERROR); return -1;
obidebug(1, "\nError allocating memory for the array of sample counts, size: %lld", seq_count * sample_count * sizeof(int)); }
return -1;
}
for (samp=0; samp < sample_count; samp++)
{
for (k=0; k<seq_count; k++)
complete_sample_count_array[k+(samp*seq_count)] = obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp);
}
// Allocate arrays for blobs otherwise reading in mapped files takes longer
blob_array = (Obi_blob_p*) malloc(seq_count * sizeof(Obi_blob_p));
if (blob_array == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for the array of blobs");
return -1;
}
for (k=0; k<seq_count; k++)
{
blob_array[k] = obi_get_blob_with_elt_idx_and_col_p_in_view(i_view, iseq_column, k, 0);
}
// Allocate alignment result array (byte at 0 if not aligned yet,
// 1 if sequence at index has a similarity above the threshold with the current sequence,
// 2 if sequence at index has a similarity below the threshold with the current sequence)
alignment_result_array = (byte_t*) calloc(seq_count, sizeof(byte_t));
if (alignment_result_array == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for alignment result array");
return -1;
}
// Initialize all sequences to singletons or NA if no sequences in that sample
for (k=0; k<seq_count; k++)
{
for (samp=0; samp < sample_count; samp++) for (samp=0; samp < sample_count; samp++)
{ {
if (obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp) != OBIInt_NA) // Only initialize samples where there are some sequences for (k=0; k<seq_count; k++)
complete_sample_count_array[k+(samp*seq_count)] = obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp);
}
// Allocate arrays for blobs otherwise reading in mapped files takes longer
blob_array = (Obi_blob_p*) malloc(seq_count * sizeof(Obi_blob_p));
if (blob_array == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for the array of blobs");
return -1;
}
for (k=0; k<seq_count; k++)
{
blob_array[k] = obi_get_blob_with_elt_idx_and_col_p_in_view(i_view, iseq_column, k, 0);
}
// Allocate alignment result array (byte at 0 if not aligned yet,
// 1 if sequence at index has a similarity above the threshold with the current sequence,
// 2 if sequence at index has a similarity below the threshold with the current sequence)
alignment_result_array = (byte_t*) calloc(seq_count, sizeof(byte_t));
if (alignment_result_array == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for alignment result array");
return -1;
}
// Initialize all sequences to singletons or NA if no sequences in that sample
for (k=0; k<seq_count; k++)
{
for (samp=0; samp < sample_count; samp++)
{ {
if (obi_set_char_with_elt_idx_and_col_p_in_view(o_view, status_column, k, samp, 's') < 0) if (obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp) != OBIInt_NA) // Only initialize samples where there are some sequences
{ {
obidebug(1, "\nError initializing all sequences to singletons"); if (obi_set_char_with_elt_idx_and_col_p_in_view(o_view, status_column, k, samp, 's') < 0)
return -1; {
obidebug(1, "\nError initializing all sequences to singletons");
return -1;
}
} }
} }
} }
@ -551,17 +554,20 @@ int obi_clean(const char* dms_name,
} }
} }
free_kmer_tables(ktable, seq_count); if (seq_count > 0)
free(complete_sample_count_array); {
free(blob_array); free_kmer_tables(ktable, seq_count);
free(alignment_result_array); free(complete_sample_count_array);
free(blob_array);
free(alignment_result_array);
}
fprintf(stderr, "\n"); fprintf(stderr, "\n");
if (stop) if (stop)
return -1; return -1;
if (heads_only) if (heads_only && (seq_count > 0))
{ {
line_selection = malloc((((o_view->infos)->line_count) + 1) * sizeof(index_t)); line_selection = malloc((((o_view->infos)->line_count) + 1) * sizeof(index_t));
if (line_selection == NULL) if (line_selection == NULL)
@ -635,7 +641,7 @@ int obi_clean(const char* dms_name,
} }
// Flag the end of the line selection // Flag the end of the line selection
if (heads_only) if (heads_only && (seq_count > 0))
line_selection[l] = -1; line_selection[l] = -1;
// Create new view with line selection if heads only // Create new view with line selection if heads only

View File

@ -150,49 +150,49 @@ static int print_seq(Obiview_p i_view, Obiview_p o_view,
static int create_output_columns(Obiview_p o_view, bool kingdom_mode) static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
{ {
// Original length column // Original length column
if (obi_view_add_column(o_view, ECOPCR_SEQLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SEQLEN_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_SEQLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SEQLEN_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_SEQLEN_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_SEQLEN_COLUMN_NAME);
return -1; return -1;
} }
// Amplicon length column // Amplicon length column
if (obi_view_add_column(o_view, ECOPCR_AMPLICONLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_AMPLICONLEN_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_AMPLICONLEN_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_AMPLICONLEN_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_AMPLICONLEN_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_AMPLICONLEN_COLUMN_NAME);
return -1; return -1;
} }
// Taxid column // Taxid column
if (obi_view_add_column(o_view, TAXID_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, TAXID_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, TAXID_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, TAXID_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", TAXID_COLUMN); obidebug(1, "\nError creating the %s column", TAXID_COLUMN);
return -1; return -1;
} }
// Taxonomic rank column // Taxonomic rank column
if (obi_view_add_column(o_view, ECOPCR_RANK_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_RANK_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_RANK_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_RANK_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_RANK_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_RANK_COLUMN_NAME);
return -1; return -1;
} }
// Species taxid column // Species taxid column
if (obi_view_add_column(o_view, ECOPCR_SPECIES_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_TAXID_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_SPECIES_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_TAXID_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_TAXID_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_TAXID_COLUMN_NAME);
return -1; return -1;
} }
// Genus taxid column // Genus taxid column
if (obi_view_add_column(o_view, ECOPCR_GENUS_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_TAXID_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_GENUS_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_TAXID_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_TAXID_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_TAXID_COLUMN_NAME);
return -1; return -1;
} }
// Family taxid column // Family taxid column
if (obi_view_add_column(o_view, ECOPCR_FAMILY_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_TAXID_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_FAMILY_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_TAXID_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_TAXID_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_TAXID_COLUMN_NAME);
return -1; return -1;
@ -201,7 +201,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
if (kingdom_mode) if (kingdom_mode)
{ {
// Kingdom taxid column // Kingdom taxid column
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_TAXID_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_KINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_TAXID_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_TAXID_COLUMN_NAME);
return -1; return -1;
@ -210,7 +210,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
else else
{ {
// Superkingdom taxid column // Superkingdom taxid column
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_TAXID_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_TAXID_COLUMN_NAME);
return -1; return -1;
@ -218,28 +218,28 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
} }
// Scientific name column // Scientific name column
if (obi_view_add_column(o_view, ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SCIENTIFIC_NAME_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SCIENTIFIC_NAME_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_SCIENTIFIC_NAME_COLUMN_NAME);
return -1; return -1;
} }
// Species name column // Species name column
if (obi_view_add_column(o_view, ECOPCR_SPECIES_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_NAME_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_SPECIES_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SPECIES_NAME_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_NAME_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_SPECIES_NAME_COLUMN_NAME);
return -1; return -1;
} }
// Genus name column // Genus name column
if (obi_view_add_column(o_view, ECOPCR_GENUS_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_NAME_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_GENUS_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_GENUS_NAME_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_NAME_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_GENUS_NAME_COLUMN_NAME);
return -1; return -1;
} }
// Family name column // Family name column
if (obi_view_add_column(o_view, ECOPCR_FAMILY_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_NAME_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_FAMILY_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_FAMILY_NAME_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_NAME_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_FAMILY_NAME_COLUMN_NAME);
return -1; return -1;
@ -248,7 +248,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
if (kingdom_mode) if (kingdom_mode)
{ {
// Kingdom name column // Kingdom name column
if (obi_view_add_column(o_view, ECOPCR_KINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_NAME_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_KINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_KINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_NAME_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_KINGDOM_NAME_COLUMN_NAME);
return -1; return -1;
@ -257,7 +257,7 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
else else
{ {
// Superkingdom name column // Superkingdom name column
if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_NAME_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_SUPERKINGDOM_NAME_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_SUPERKINGDOM_NAME_COLUMN_NAME);
return -1; return -1;
@ -265,49 +265,49 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode)
} }
// Strand column // Strand column
if (obi_view_add_column(o_view, ECOPCR_STRAND_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_STRAND_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_STRAND_COLUMN_NAME, -1, NULL, OBI_CHAR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_STRAND_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_STRAND_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_STRAND_COLUMN_NAME);
return -1; return -1;
} }
// Primer 1 column // Primer 1 column
if (obi_view_add_column(o_view, ECOPCR_PRIMER1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER1_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_PRIMER1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER1_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER1_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER1_COLUMN_NAME);
return -1; return -1;
} }
// Primer 2 column // Primer 2 column
if (obi_view_add_column(o_view, ECOPCR_PRIMER2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER2_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_PRIMER2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_PRIMER2_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER2_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_PRIMER2_COLUMN_NAME);
return -1; return -1;
} }
// Error 1 column // Error 1 column
if (obi_view_add_column(o_view, ECOPCR_ERROR1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_ERROR1_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_ERROR1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_ERROR1_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_ERROR1_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_ERROR1_COLUMN_NAME);
return -1; return -1;
} }
// Error 2 column // Error 2 column
if (obi_view_add_column(o_view, ECOPCR_ERROR2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_ERROR2_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_ERROR2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_ERROR2_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_ERROR2_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_ERROR2_COLUMN_NAME);
return -1; return -1;
} }
// Temperature 1 column // Temperature 1 column
if (obi_view_add_column(o_view, ECOPCR_TEMP1_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_TEMP1_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_TEMP1_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_TEMP1_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_TEMP1_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_TEMP1_COLUMN_NAME);
return -1; return -1;
} }
// Temperature 2 column // Temperature 2 column
if (obi_view_add_column(o_view, ECOPCR_TEMP2_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOPCR_TEMP2_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(o_view, ECOPCR_TEMP2_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ECOPCR_TEMP2_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the %s column", ECOPCR_TEMP2_COLUMN_NAME); obidebug(1, "\nError creating the %s column", ECOPCR_TEMP2_COLUMN_NAME);
return -1; return -1;

View File

@ -104,42 +104,42 @@ int print_assignment_result(Obiview_p output_view, index_t line,
static int create_output_columns(Obiview_p o_view) static int create_output_columns(Obiview_p o_view)
{ {
// Score column // Score column
if (obi_view_add_column(o_view, ECOTAG_SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(o_view, ECOTAG_SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "\nError creating the column for the score in ecotag"); obidebug(1, "\nError creating the column for the score in ecotag");
return -1; return -1;
} }
// Assigned taxid column // Assigned taxid column
if (obi_view_add_column(o_view, ECOTAG_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(o_view, ECOTAG_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "\nError creating the column for the assigned taxid in ecotag"); obidebug(1, "\nError creating the column for the assigned taxid in ecotag");
return -1; return -1;
} }
// Assigned scientific name column // Assigned scientific name column
if (obi_view_add_column(o_view, ECOTAG_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(o_view, ECOTAG_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "\nError creating the column for the assigned scientific name in ecotag"); obidebug(1, "\nError creating the column for the assigned scientific name in ecotag");
return -1; return -1;
} }
// Assignement status column // Assignement status column
if (obi_view_add_column(o_view, ECOTAG_STATUS_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(o_view, ECOTAG_STATUS_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "\nError creating the column for the assignment status in ecotag"); obidebug(1, "\nError creating the column for the assignment status in ecotag");
return -1; return -1;
} }
// Column for array of best match ids // Column for array of best match ids
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, true, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, true, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "\nError creating the column for the array of ids of best matches in ecotag"); obidebug(1, "\nError creating the column for the array of ids of best matches in ecotag");
return -1; return -1;
} }
// Column for array of best match taxids // Column for array of best match taxids
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_TAXIDS_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, true, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_TAXIDS_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, true, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "\nError creating the column for the array of taxids of best matches in ecotag"); obidebug(1, "\nError creating the column for the array of taxids of best matches in ecotag");
return -1; return -1;
@ -218,7 +218,8 @@ int obi_ecotag(const char* dms_name,
const char* taxonomy_name, const char* taxonomy_name,
const char* output_view_name, const char* output_view_name,
const char* output_view_comments, const char* output_view_comments,
double ecotag_threshold) // TODO different threshold for the similarity sphere around ref seqs double ecotag_threshold,
double bubble_threshold)
{ {
// For each sequence // For each sequence
@ -239,6 +240,7 @@ int obi_ecotag(const char* dms_name,
index_t query_seq_idx, ref_seq_idx; index_t query_seq_idx, ref_seq_idx;
double score, best_score; double score, best_score;
double threshold; double threshold;
double lca_threshold;
int lcs_length; int lcs_length;
int ali_length; int ali_length;
Kmer_table_p ktable; Kmer_table_p ktable;
@ -389,10 +391,10 @@ int obi_ecotag(const char* dms_name,
return -1; return -1;
} }
free(db_threshold_str); free(db_threshold_str);
if (ecotag_threshold < db_threshold) if (bubble_threshold < db_threshold)
{ {
fprintf(stderr, "\nError: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).\n\n", fprintf(stderr, "\nError: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).\n\n",
ecotag_threshold, db_threshold); bubble_threshold, db_threshold);
return -1; return -1;
} }
@ -597,11 +599,16 @@ int obi_ecotag(const char* dms_name,
{ {
best_match_idx = best_match_array[j]; best_match_idx = best_match_array[j];
// Find the LCA for the chosen threshold // Find the LCA for the highest threshold between best_score and the chosen bubble threshold
score_array = obi_get_array_with_col_p_in_view(ref_view, score_a_column, best_match_idx, &lca_array_length); score_array = obi_get_array_with_col_p_in_view(ref_view, score_a_column, best_match_idx, &lca_array_length);
if (bubble_threshold < best_score)
lca_threshold = best_score;
else
lca_threshold = bubble_threshold;
k = 0; k = 0;
while ((k < lca_array_length) && (score_array[k] >= best_score)) while ((k < lca_array_length) && (score_array[k] >= lca_threshold))
k++; k++;
if (k>0) if (k>0)

View File

@ -42,12 +42,14 @@
* @param output_view_name The name to give to the output view. * @param output_view_name The name to give to the output view.
* @param output_view_comments The comments to associate to the output view. * @param output_view_comments The comments to associate to the output view.
* @param ecotag_threshold The threshold at which to assign. * @param ecotag_threshold The threshold at which to assign.
* @param bubble_threshold The threshold at which to look for an LCA (i.e. minimum identity considered for the assignment circle);
* the threshold actually used will be the highest between this value and the best assignment score found.
* *
* The algorithm works like this: * The algorithm works like this:
* For each query sequence: * For each query sequence:
* Align with reference database * Align with reference database
* Keep the indices of all the best matches * Keep the indices of all the best matches
* For each kept index, get the LCA at that threshold as stored in the reference database, then the LCA of those LCAs * For each kept index, get the LCA at the highest threshold between bubble_threshold and the best assignment score found (as stored in the reference database), then the LCA of those LCAs
* Write result (max score, threshold, taxid and scientific name of the LCA assigned, list of the ids of the best matches) * Write result (max score, threshold, taxid and scientific name of the LCA assigned, list of the ids of the best matches)
* *
* @returns A value indicating the success of the operation. * @returns A value indicating the success of the operation.
@ -65,7 +67,8 @@ int obi_ecotag(const char* dms_name,
const char* taxonomy_name, const char* taxonomy_name,
const char* output_view_name, const char* output_view_name,
const char* output_view_comments, const char* output_view_comments,
double ecotag_threshold); double ecotag_threshold,
double bubble_threshold);
#endif /* OBI_ECOTAG_H_ */ #endif /* OBI_ECOTAG_H_ */

View File

@ -155,35 +155,35 @@ static int create_alignment_output_columns(Obiview_p output_view,
bool normalize, int reference, bool similarity_mode) bool normalize, int reference, bool similarity_mode)
{ {
// Create the column for the ids of the 1st sequence aligned // Create the column for the ids of the 1st sequence aligned
if (obi_view_add_column(output_view, ID1_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, id1_indexer_name, NULL, -1, ID1_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, ID1_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, id1_indexer_name, NULL, -1, ID1_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the first column for the sequence ids when aligning"); obidebug(1, "\nError creating the first column for the sequence ids when aligning");
return -1; return -1;
} }
// Create the column for the ids of the 2nd sequence aligned // Create the column for the ids of the 2nd sequence aligned
if (obi_view_add_column(output_view, ID2_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, id2_indexer_name, NULL, -1, ID2_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, ID2_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, id2_indexer_name, NULL, -1, ID2_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the second column for the sequence ids when aligning"); obidebug(1, "\nError creating the second column for the sequence ids when aligning");
return -1; return -1;
} }
// Create the column for the index (in the input view) of the first sequences aligned // Create the column for the index (in the input view) of the first sequences aligned
if (obi_view_add_column(output_view, IDX1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, IDX1_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, IDX1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, IDX1_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the first column for the sequence indices when aligning"); obidebug(1, "\nError creating the first column for the sequence indices when aligning");
return -1; return -1;
} }
// Create the column for the index (in the input view) of the second sequences aligned // Create the column for the index (in the input view) of the second sequences aligned
if (obi_view_add_column(output_view, IDX2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, IDX2_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, IDX2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, IDX2_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the second column for the sequence indices when aligning"); obidebug(1, "\nError creating the second column for the sequence indices when aligning");
return -1; return -1;
} }
// Create the column for the LCS length // Create the column for the LCS length
if (obi_view_add_column(output_view, LCS_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, LCS_LENGTH_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, LCS_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, LCS_LENGTH_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the column for the LCS length when aligning"); obidebug(1, "\nError creating the column for the LCS length when aligning");
return -1; return -1;
@ -192,7 +192,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
// Create the column for the alignment length if it is computed // Create the column for the alignment length if it is computed
if ((reference == ALILEN) && (normalize || !similarity_mode)) if ((reference == ALILEN) && (normalize || !similarity_mode))
{ {
if (obi_view_add_column(output_view, ALI_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ALI_LENGTH_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, ALI_LENGTH_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, ALI_LENGTH_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the column for the alignment length when aligning"); obidebug(1, "\nError creating the column for the alignment length when aligning");
return -1; return -1;
@ -201,7 +201,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
// Create the column for the alignment score // Create the column for the alignment score
if (normalize) if (normalize)
{ {
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0) if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
{ {
obidebug(1, "\nError creating the column for the score when aligning"); obidebug(1, "\nError creating the column for the score when aligning");
return -1; return -1;
@ -209,7 +209,7 @@ static int create_alignment_output_columns(Obiview_p output_view,
} }
else else
{ {
if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0) if (obi_view_add_column(output_view, SCORE_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, SCORE_COLUMN_NAME, true) < 0)
{ {
obidebug(1, "\nError creating the column for the score when aligning"); obidebug(1, "\nError creating the column for the score when aligning");
return -1; return -1;
@ -219,14 +219,14 @@ static int create_alignment_output_columns(Obiview_p output_view,
if (print_seq) if (print_seq)
{ {
// Create the column for the first sequences aligned // Create the column for the first sequences aligned
if (obi_view_add_column(output_view, SEQ1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, seq1_indexer_name, NULL, -1, SEQ1_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, SEQ1_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, seq1_indexer_name, NULL, -1, SEQ1_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the first column for the sequences when aligning"); obidebug(1, "\nError creating the first column for the sequences when aligning");
return -1; return -1;
} }
// Create the column for the second sequences aligned // Create the column for the second sequences aligned
if (obi_view_add_column(output_view, SEQ2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, seq2_indexer_name, NULL, -1, SEQ2_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, SEQ2_COLUMN_NAME, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, seq2_indexer_name, NULL, -1, SEQ2_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the second column for the sequences when aligning"); obidebug(1, "\nError creating the second column for the sequences when aligning");
return -1; return -1;
@ -235,14 +235,14 @@ static int create_alignment_output_columns(Obiview_p output_view,
if (print_count) if (print_count)
{ {
// Create the column for the count of the first sequences aligned // Create the column for the count of the first sequences aligned
if (obi_view_add_column(output_view, COUNT1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, COUNT1_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, COUNT1_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, COUNT1_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the first column for the sequence counts when aligning"); obidebug(1, "\nError creating the first column for the sequence counts when aligning");
return -1; return -1;
} }
// Create the column for the count of the second sequences aligned // Create the column for the count of the second sequences aligned
if (obi_view_add_column(output_view, COUNT2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, COUNT2_COLUMN_COMMENTS, true) < 0) if (obi_view_add_column(output_view, COUNT2_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, COUNT2_COLUMN_COMMENTS, true) < 0)
{ {
obidebug(1, "\nError creating the second column for the sequence counts when aligning"); obidebug(1, "\nError creating the second column for the sequence counts when aligning");
return -1; return -1;

View File

@ -582,6 +582,7 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
{ {
size_t file_size; size_t file_size;
size_t new_data_size; size_t new_data_size;
size_t header_size;
double multiple; double multiple;
int file_descriptor; int file_descriptor;
@ -589,6 +590,8 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
multiple = ceil((double) (ONE_IF_ZERO((avl->header)->nb_items * sizeof(AVL_node_t))) / (double) getpagesize()); multiple = ceil((double) (ONE_IF_ZERO((avl->header)->nb_items * sizeof(AVL_node_t))) / (double) getpagesize());
new_data_size = ((size_t) multiple) * getpagesize(); new_data_size = ((size_t) multiple) * getpagesize();
header_size = (avl->header)->header_size;
// Check that it is actually greater than the current size of the file, otherwise no need to truncate // Check that it is actually greater than the current size of the file, otherwise no need to truncate
if ((avl->header)->avl_size == new_data_size) if ((avl->header)->avl_size == new_data_size)
return 0; return 0;
@ -596,16 +599,22 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
// Get the file descriptor // Get the file descriptor
file_descriptor = avl->avl_fd; file_descriptor = avl->avl_fd;
// Unmap the tree before truncating the file // Unmap the entire file before truncating it (WSL requirement)
if (munmap(avl->tree, (avl->header)->avl_size) < 0) if (munmap(avl->tree, (avl->header)->avl_size) < 0)
{ {
obi_set_errno(OBI_AVL_ERROR); obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the tree of an AVL before truncating"); obidebug(1, "\nError munmapping the tree of an AVL before truncating");
return -1; return -1;
} }
if (munmap(avl->header, header_size) < 0)
{
obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the tree of an AVL before truncating");
return -1;
}
// Truncate the file // Truncate the file
file_size = (avl->header)->header_size + new_data_size; file_size = header_size + new_data_size;
if (ftruncate(file_descriptor, file_size) < 0) if (ftruncate(file_descriptor, file_size) < 0)
{ {
obi_set_errno(OBI_AVL_ERROR); obi_set_errno(OBI_AVL_ERROR);
@ -613,7 +622,22 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
return -1; return -1;
} }
// Remap the data // Remap the header and the data
avl->header = mmap(NULL,
header_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
file_descriptor,
0
);
if (avl->header == MAP_FAILED)
{
obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError re-mmapping the header of an AVL after truncating");
return -1;
}
avl->tree = mmap(NULL, avl->tree = mmap(NULL,
new_data_size, new_data_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
@ -640,6 +664,7 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
{ {
size_t file_size; size_t file_size;
index_t new_data_size; index_t new_data_size;
size_t header_size;
double multiple; double multiple;
int file_descriptor; int file_descriptor;
@ -647,6 +672,8 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
multiple = ceil((double) (ONE_IF_ZERO((avl_data->header)->data_size_used)) / (double) getpagesize()); multiple = ceil((double) (ONE_IF_ZERO((avl_data->header)->data_size_used)) / (double) getpagesize());
new_data_size = ((index_t) multiple) * getpagesize(); new_data_size = ((index_t) multiple) * getpagesize();
header_size = (avl_data->header)->header_size;
// Check that it is actually greater than the current size of the file, otherwise no need to truncate // Check that it is actually greater than the current size of the file, otherwise no need to truncate
if ((avl_data->header)->data_size_max >= new_data_size) if ((avl_data->header)->data_size_max >= new_data_size)
return 0; return 0;
@ -654,7 +681,8 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
// Get the file descriptor // Get the file descriptor
file_descriptor = avl_data->data_fd; file_descriptor = avl_data->data_fd;
// Unmap the data before truncating the file // Unmap the entire file before truncating it (WSL requirement)
if (munmap(avl_data->data, (avl_data->header)->data_size_max) < 0) if (munmap(avl_data->data, (avl_data->header)->data_size_max) < 0)
{ {
obi_set_errno(OBI_AVL_ERROR); obi_set_errno(OBI_AVL_ERROR);
@ -662,8 +690,15 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
return -1; return -1;
} }
if (munmap(avl_data->header, header_size) < 0)
{
obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the header of an AVL before truncating");
return -1;
}
// Truncate the file // Truncate the file
file_size = (avl_data->header)->header_size + new_data_size; file_size = header_size + new_data_size;
if (ftruncate(file_descriptor, file_size) < 0) if (ftruncate(file_descriptor, file_size) < 0)
{ {
obi_set_errno(OBI_AVL_ERROR); obi_set_errno(OBI_AVL_ERROR);
@ -672,6 +707,22 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
} }
// Remap the data // Remap the data
avl_data->header = mmap(NULL,
header_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
file_descriptor,
0
);
if (avl_data->header == MAP_FAILED)
{
obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError re-mmapping the header of an AVL after truncating");
return -1;
}
avl_data->data = mmap(NULL, avl_data->data = mmap(NULL,
new_data_size, new_data_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
@ -710,6 +761,20 @@ int grow_avl(OBIDMS_avl_p avl) // TODO Lock when needed
header_size = (avl->header)->header_size; header_size = (avl->header)->header_size;
file_size = header_size + new_data_size; file_size = header_size + new_data_size;
// Unmap the entire file before truncating it (WSL requirement)
if (munmap(avl->tree, old_data_size) < 0)
{
obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the tree of an AVL tree file before enlarging");
return -1;
}
if (munmap(avl->header, header_size) < 0)
{
obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the header of an AVL tree file before enlarging");
return -1;
}
// Enlarge the file // Enlarge the file
if (ftruncate(avl_file_descriptor, file_size) < 0) if (ftruncate(avl_file_descriptor, file_size) < 0)
{ {
@ -718,12 +783,20 @@ int grow_avl(OBIDMS_avl_p avl) // TODO Lock when needed
return -1; return -1;
} }
// Unmap and re-map the data // Re-map
if (munmap(avl->tree, old_data_size) < 0) avl->header = mmap(NULL,
header_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
avl_file_descriptor,
0
);
if (avl->header == MAP_FAILED)
{ {
obi_set_errno(OBI_AVL_ERROR); obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the tree of an AVL tree file before enlarging"); obidebug(1, "\nError re-mmapping the header of an AVL tree file after enlarging the file");
return -1; return -1;
} }
@ -768,6 +841,20 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
header_size = (avl_data->header)->header_size; header_size = (avl_data->header)->header_size;
file_size = header_size + new_data_size; file_size = header_size + new_data_size;
// Unmap the entire file before truncating it (WSL requirement)
if (munmap(avl_data->data, old_data_size) < 0)
{
obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the data of an AVL tree data file before enlarging");
return -1;
}
if (munmap(avl_data->header, header_size) < 0)
{
obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the header of an AVL tree data file before enlarging");
return -1;
}
// Enlarge the file // Enlarge the file
if (ftruncate(avl_data_file_descriptor, file_size) < 0) if (ftruncate(avl_data_file_descriptor, file_size) < 0)
{ {
@ -776,12 +863,19 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
return -1; return -1;
} }
// Unmap and re-map the data // Re-map
if (munmap(avl_data->data, old_data_size) < 0) avl_data->header = mmap(NULL,
header_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
avl_data_file_descriptor,
0
);
if (avl_data->header == MAP_FAILED)
{ {
obi_set_errno(OBI_AVL_ERROR); obi_set_errno(OBI_AVL_ERROR);
obidebug(1, "\nError munmapping the data of an AVL tree data file before enlarging"); obidebug(1, "\nError re-mmapping the header of an AVL tree data file after enlarging the file");
return -1; return -1;
} }
@ -792,7 +886,6 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
avl_data_file_descriptor, avl_data_file_descriptor,
header_size header_size
); );
if (avl_data->data == MAP_FAILED) if (avl_data->data == MAP_FAILED)
{ {
obi_set_errno(OBI_AVL_ERROR); obi_set_errno(OBI_AVL_ERROR);

View File

@ -316,6 +316,15 @@ static int enlarge_infos_file(OBIDMS_p dms, size_t new_size)
multiple = ceil((double) new_size / (double) getpagesize()); multiple = ceil((double) new_size / (double) getpagesize());
rounded_new_size = multiple * getpagesize(); rounded_new_size = multiple * getpagesize();
// Unmap the entire file before truncating it (WSL requirement)
if (munmap(dms->infos, (dms->infos)->file_size) < 0)
{
obi_set_errno(OBIDMS_UNKNOWN_ERROR);
obidebug(1, "\nError munmapping a DMS information file when enlarging");
close(infos_file_descriptor);
return -1;
}
// Enlarge the file // Enlarge the file
if (ftruncate(infos_file_descriptor, rounded_new_size) < 0) if (ftruncate(infos_file_descriptor, rounded_new_size) < 0)
{ {
@ -325,15 +334,7 @@ static int enlarge_infos_file(OBIDMS_p dms, size_t new_size)
return -1; return -1;
} }
// Unmap and remap the file // Remap the file
if (munmap(dms->infos, (dms->infos)->file_size) < 0)
{
obi_set_errno(OBIDMS_UNKNOWN_ERROR);
obidebug(1, "\nError munmapping a DMS information file when enlarging");
close(infos_file_descriptor);
return -1;
}
dms->infos = mmap(NULL, dms->infos = mmap(NULL,
rounded_new_size, rounded_new_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
@ -1409,6 +1410,107 @@ DIR* opendir_in_dms(OBIDMS_p dms, const char* path_name)
} }
char* obi_dms_formatted_infos(OBIDMS_p dms, bool detailed)
{
char* dms_infos = NULL;
char* view_infos = NULL;
char* view_name = NULL;
char* tax_name = NULL;
char* all_tax_dir_path = NULL;
int i;
struct dirent* dp;
Obiview_p view;
// DMS name
dms_infos = (char*) malloc((strlen("# DMS name: ")+strlen(dms->dms_name)+strlen("\n# Views:\n")+1) * sizeof(char));
if (dms_infos == NULL)
{
obidebug(1, "\nError allocating memory for DMS formatted infos");
return NULL;
}
strcpy(dms_infos, "# DMS name: ");
strcat(dms_infos, dms->dms_name);
strcat(dms_infos, "\n# Views:\n");
// Go through views and get their infos
rewinddir(dms->view_directory);
while ((dp = readdir(dms->view_directory)) != NULL)
{
if ((dp->d_name)[0] == '.')
continue;
i=0;
while ((dp->d_name)[i] != '.')
i++;
view_name = (char*) malloc((i+1) * sizeof(char));
if (view_name == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for a view name when getting formatted DMS infos: file %s", dp->d_name);
return NULL;
}
strncpy(view_name, dp->d_name, i);
view_name[i] = '\0';
view = obi_open_view(dms, view_name);
if (view == NULL)
{
obidebug(1, "\nError opening a view to get DMS formatted infos");
return NULL;
}
if (detailed)
view_infos = obi_view_formatted_infos(view, detailed);
else
view_infos = obi_view_formatted_infos_one_line(view);
if (view_infos == NULL)
{
obidebug(1, "\nError getting a view infos to get DMS formatted infos");
return NULL;
}
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen(view_infos)+1) * sizeof(char));
if (dms_infos == NULL)
{
obidebug(1, "\nError reallocating memory for DMS formatted infos");
return NULL;
}
strcat(dms_infos, view_infos);
if (obi_save_and_close_view(view) < 0)
{
obidebug(1, "\nError closing view while getting DMS formatted infos");
return NULL;
}
if (detailed)
{
dms_infos = realloc(dms_infos, (strlen(dms_infos)+2) * sizeof(char));
strcat(dms_infos, "\n");
}
}
// Add taxonomies
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen("\n# Taxonomies:\n")+1) * sizeof(char));
if (dms_infos == NULL)
{
obidebug(1, "\nError reallocating memory for DMS formatted infos");
return NULL;
}
strcat(dms_infos, "# Taxonomies:\n");
rewinddir(dms->tax_directory);
while ((dp = readdir(dms->tax_directory)) != NULL)
{
if ((dp->d_name)[0] == '.')
continue;
tax_name = dp->d_name;
dms_infos = realloc(dms_infos, (strlen(dms_infos)+strlen(" # ")+strlen(view_infos)+1) * sizeof(char));
if (dms_infos == NULL)
{
obidebug(1, "\nError reallocating memory for DMS formatted infos");
return NULL;
}
strcat(dms_infos, " # ");
strcat(dms_infos, tax_name);
}
return dms_infos;
}
// TODO move somewhere else maybe // TODO move somewhere else maybe
// TODO discuss arguments // TODO discuss arguments
obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number) obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, const char* column_name, obiversion_t version_number)
@ -1474,8 +1576,8 @@ obiversion_t obi_import_column(const char* dms_path_1, const char* dms_path_2, c
// Create new column // Create new column
column_2 = obi_create_column(dms_2, column_name, header_1->returned_data_type, header_1->line_count, column_2 = obi_create_column(dms_2, column_name, header_1->returned_data_type, header_1->line_count,
header_1->nb_elements_per_line, header_1->elements_names, true, header_1->tuples, header_1->nb_elements_per_line, header_1->elements_names, true, header_1->dict_column,
header_1->to_eval, new_avl_name, (header_1->associated_column).column_name, header_1->tuples, header_1->to_eval, new_avl_name, (header_1->associated_column).column_name,
(header_1->associated_column).version, header_1->comments); (header_1->associated_column).version, header_1->comments);
if (column_2 == NULL) if (column_2 == NULL)
@ -1712,6 +1814,7 @@ int obi_import_view(const char* dms_path_1, const char* dms_path_2, const char*
false, false,
false, false,
false, false,
false,
NULL, NULL,
NULL, NULL,
-1, -1,

View File

@ -40,7 +40,7 @@
*/ */
#define MAX_NB_OPENED_INDEXERS (1000) /**< The maximum number of indexers open at the same time. #define MAX_NB_OPENED_INDEXERS (1000) /**< The maximum number of indexers open at the same time.
*/ */
#define MAX_PATH_LEN (1024) /**< Maximum length for the character string defining a #define MAX_PATH_LEN (2048) /**< Maximum length for the character string defining a
* file or directory path. * file or directory path.
*/ */
@ -459,6 +459,23 @@ char* obi_dms_get_full_path(OBIDMS_p dms, const char* path_name);
DIR* opendir_in_dms(OBIDMS_p dms, const char* path_name); DIR* opendir_in_dms(OBIDMS_p dms, const char* path_name);
/**
* @brief Returns the informations of a DMS with a human readable format (dms name, taxonomies and view infos).
*
* @warning The returned pointer has to be freed by the caller.
*
* @param column A pointer on a DMS.
* @param detailed Whether the informations should contain detailed view infos.
*
* @returns A pointer on a character array where the formatted DMS informations are stored.
* @retval NULL if an error occurred.
*
* @since September 2020
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
char* obi_dms_formatted_infos(OBIDMS_p dms, bool detailed);
/** /**
* @brief Imports a column, copying it from a DMS to another DMS, and returns the version of the column in the destination DMS. * @brief Imports a column, copying it from a DMS to another DMS, and returns the version of the column in the destination DMS.
* *

View File

@ -2463,6 +2463,32 @@ int read_merged_dmp(const char* taxdump, OBIDMS_taxonomy_p tax, int32_t* delnode
return -1; return -1;
} }
// Write the rest of the taxa from the current taxa list
while (nT < (tax->taxa)->count)
{
// Add element from taxa list
// Enlarge structure if needed
if (n == buffer_size)
{
buffer_size = buffer_size * 2;
tax->merged_idx = (ecomergedidx_t*) realloc(tax->merged_idx, sizeof(ecomergedidx_t) + sizeof(ecomerged_t) * buffer_size);
if (tax->merged_idx == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError reallocating memory for a taxonomy structure");
closedir(tax_dir);
fclose(file);
return -1;
}
}
(tax->merged_idx)->merged[n].taxid = (tax->taxa)->taxon[nT].taxid;
(tax->merged_idx)->merged[n].idx = nT;
nT++;
n++;
}
// Store count // Store count
(tax->merged_idx)->count = n; (tax->merged_idx)->count = n;
@ -3649,6 +3675,18 @@ ecotx_t* obi_taxo_get_taxon_with_taxid(OBIDMS_taxonomy_p taxonomy, int32_t taxid
} }
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
{
return (((taxonomy->names)->names)[idx]).name;
}
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx)
{
return (((taxonomy->names)->names)[idx]).taxon;
}
int obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid) // TODO discuss that this doesn't work with deprecated taxids int obi_taxo_is_taxon_under_taxid(ecotx_t* taxon, int32_t other_taxid) // TODO discuss that this doesn't work with deprecated taxids
{ {
ecotx_t* next_parent; ecotx_t* next_parent;

View File

@ -75,7 +75,7 @@ typedef struct {
*/ */
int32_t max_taxid; /**< Maximum taxid existing in the taxon index. int32_t max_taxid; /**< Maximum taxid existing in the taxon index.
*/ */
int32_t buffer_size; /**< Number of taxa. // TODO kept this but not sure of its use int32_t buffer_size; /**< . // TODO kept this but not sure of its use
*/ */
ecotx_t taxon[]; /**< Taxon array. ecotx_t taxon[]; /**< Taxon array.
*/ */
@ -447,8 +447,51 @@ ecotx_t* obi_taxo_get_superkingdom(ecotx_t* taxon, OBIDMS_taxonomy_p taxonomy);
const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks); const char* obi_taxo_rank_index_to_label(int32_t rank_idx, ecorankidx_t* ranks);
// TODO /**
* @brief Function checking whether a taxid is included in a subset of the taxonomy.
*
* @param taxonomy A pointer on the taxonomy structure.
* @param restrict_to_taxids An array of taxids. The researched taxid must be under at least one of those array taxids.
* @param count Number of taxids in restrict_to_taxids.
* @param taxid The taxid to check.
*
* @returns A value indicating whether the taxid is included in the chosen subset of the taxonomy.
* @retval 0 if the taxid is not included in the subset of the taxonomy.
* @retval 1 if the taxid is included in the subset of the taxonomy.
*
* @since October 2020
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
int obi_taxo_is_taxid_included(OBIDMS_taxonomy_p taxonomy, int obi_taxo_is_taxid_included(OBIDMS_taxonomy_p taxonomy,
int32_t* restrict_to_taxids, int32_t* restrict_to_taxids,
int32_t count, int32_t count,
int32_t taxid); int32_t taxid);
/**
* @brief Function returning the name of a taxon from its index in the taxonomy name index (econameidx_t).
*
* @param taxonomy A pointer on the taxonomy structure.
* @param idx The index at which the name is in the taxonomy name index (econameidx_t).
*
* @returns The taxon name.
*
* @since October 2020
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
char* obi_taxo_get_name_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx);
/**
* @brief Function returning a taxon structure from its index in the taxonomy name index (econameidx_t).
*
* @param taxonomy A pointer on the taxonomy structure.
* @param idx The index at which the taxon is in the taxonomy name index (econameidx_t).
*
* @returns The taxon structure.
*
* @since October 2020
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
ecotx_t* obi_taxo_get_taxon_from_name_idx(OBIDMS_taxonomy_p taxonomy, int32_t idx);

View File

@ -1024,6 +1024,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
index_t nb_elements_per_line, index_t nb_elements_per_line,
char* elements_names, char* elements_names,
bool elt_names_formatted, bool elt_names_formatted,
bool dict_column,
bool tuples, bool tuples,
bool to_eval, bool to_eval,
const char* indexer_name, const char* indexer_name,
@ -1282,6 +1283,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
header->nb_elements_per_line = nb_elements_per_line; header->nb_elements_per_line = nb_elements_per_line;
header->stored_data_type = stored_data_type; header->stored_data_type = stored_data_type;
header->returned_data_type = returned_data_type; header->returned_data_type = returned_data_type;
header->dict_column = dict_column;
header->tuples = tuples; header->tuples = tuples;
header->to_eval = to_eval; header->to_eval = to_eval;
header->creation_date = time(NULL); header->creation_date = time(NULL);
@ -1611,6 +1613,7 @@ OBIDMS_column_p obi_clone_column(OBIDMS_p dms,
nb_elements_per_line, nb_elements_per_line,
(column_to_clone->header)->elements_names, (column_to_clone->header)->elements_names,
true, true,
(column_to_clone->header)->dict_column,
(column_to_clone->header)->tuples, (column_to_clone->header)->tuples,
(column_to_clone->header)->to_eval, (column_to_clone->header)->to_eval,
(column_to_clone->header)->indexer_name, (column_to_clone->header)->indexer_name,
@ -1725,16 +1728,32 @@ int obi_close_column(OBIDMS_column_p column)
int obi_clone_column_indexer(OBIDMS_column_p column) int obi_clone_column_indexer(OBIDMS_column_p column)
{ {
char* new_indexer_name; char* new_indexer_name;
int i;
new_indexer_name = obi_build_indexer_name((column->header)->name, (column->header)->version); i=0;
if (new_indexer_name == NULL) while (true) // find avl name not already used
return -1;
column->indexer = obi_clone_indexer(column->indexer, new_indexer_name); // TODO Need to lock this somehow?
if (column->indexer == NULL)
{ {
obidebug(1, "\nError cloning a column's indexer to make it writable"); new_indexer_name = obi_build_indexer_name((column->header)->name, ((column->header)->version)+i);
return -1; if (new_indexer_name == NULL)
return -1;
column->indexer = obi_clone_indexer(column->indexer, new_indexer_name); // TODO Need to lock this somehow?
if (column->indexer == NULL)
{
if (errno == EEXIST)
{
free(new_indexer_name);
i++;
}
else
{
free(new_indexer_name);
obidebug(1, "\nError cloning a column's indexer to make it writable");
return -1;
}
}
else
break;
} }
strcpy((column->header)->indexer_name, new_indexer_name); strcpy((column->header)->indexer_name, new_indexer_name);
@ -1750,6 +1769,7 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
{ {
size_t file_size; size_t file_size;
size_t data_size; size_t data_size;
size_t header_size;
index_t new_line_count; index_t new_line_count;
double multiple; double multiple;
int column_file_descriptor; int column_file_descriptor;
@ -1772,6 +1792,8 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
data_size = obi_array_sizeof((column->header)->stored_data_type, new_line_count, (column->header)->nb_elements_per_line); data_size = obi_array_sizeof((column->header)->stored_data_type, new_line_count, (column->header)->nb_elements_per_line);
header_size = (column->header)->header_size;
// Check that it is actually greater than the current data size, otherwise no need to truncate // Check that it is actually greater than the current data size, otherwise no need to truncate
if ((column->header)->data_size == data_size) if ((column->header)->data_size == data_size)
return 0; return 0;
@ -1836,7 +1858,7 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
return -1; return -1;
} }
// Unmap the data before truncating the file // Unmap the entire file before truncating it (WSL requirement)
if (munmap(column->data, (column->header)->data_size) < 0) if (munmap(column->data, (column->header)->data_size) < 0)
{ {
obi_set_errno(OBICOL_UNKNOWN_ERROR); obi_set_errno(OBICOL_UNKNOWN_ERROR);
@ -1844,9 +1866,16 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
close(column_file_descriptor); close(column_file_descriptor);
return -1; return -1;
} }
if (munmap(column->header, header_size) < 0)
{
obi_set_errno(OBICOL_UNKNOWN_ERROR);
obidebug(1, "\nError munmapping the header of a column before truncating");
close(column_file_descriptor);
return -1;
}
// Truncate the column file // Truncate the column file
file_size = (column->header)->header_size + data_size; file_size = header_size + data_size;
if (ftruncate(column_file_descriptor, file_size) < 0) if (ftruncate(column_file_descriptor, file_size) < 0)
{ {
obi_set_errno(OBICOL_UNKNOWN_ERROR); obi_set_errno(OBICOL_UNKNOWN_ERROR);
@ -1855,13 +1884,30 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
return -1; return -1;
} }
// Remap the data // Remap the header and the data
column->header = mmap(NULL,
header_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
column_file_descriptor,
0
);
if (column->header == MAP_FAILED)
{
obi_set_errno(OBICOL_UNKNOWN_ERROR);
obidebug(1, "\nError re-mmapping the header of a column after truncating");
close(column_file_descriptor);
return -1;
}
column->data = mmap(NULL, column->data = mmap(NULL,
data_size, data_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED, MAP_SHARED,
column_file_descriptor, column_file_descriptor,
(column->header)->header_size header_size
); );
if (column->data == MAP_FAILED) if (column->data == MAP_FAILED)
@ -2415,17 +2461,81 @@ char* obi_get_formatted_elements_names(OBIDMS_column_p column)
} }
char* obi_column_formatted_infos(OBIDMS_column_p column) char* obi_column_formatted_infos(OBIDMS_column_p column, bool detailed)
{ {
char* column_infos; char* column_infos = NULL;
char* elt_names; char* elt_names = NULL;
char* data_type_str = NULL;
column_infos = malloc(1024 * sizeof(char)); char* comments = NULL;
// Get element names informations
elt_names = obi_get_formatted_elements_names(column); elt_names = obi_get_formatted_elements_names(column);
if (elt_names == NULL)
{
obidebug(1, "\nError getting formatted elements names for formatted columns infos");
return NULL;
}
// Get data type informations
data_type_str = name_data_type((column->header)->returned_data_type);
if (data_type_str == NULL)
{
obidebug(1, "\nError getting formatted data type for formatted columns infos");
return NULL;
}
// Get commments if detailed informations required
if (detailed)
comments = (column->header)->comments;
// Build the string of formatted infos, allocating memory as needed
// Data type
column_infos = (char*) malloc((strlen("data type: ")+strlen(data_type_str)+1) * sizeof(char));
if (column_infos == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for formatted column infos");
return NULL;
}
strcpy(column_infos, "data type: ");
strcat(column_infos, data_type_str);
// Element names if more than 1
if ((column->header)->nb_elements_per_line > 1)
{
column_infos = realloc(column_infos, (strlen(column_infos)+strlen(", elements: ")+strlen(elt_names)+1) * sizeof(char));
if (column_infos == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for formatted column infos");
return NULL;
}
strcat(column_infos, ", elements: ");
strcat(column_infos, elt_names);
}
if (detailed && (strlen(comments)>2)) // Add all comments if required and not empty
{
column_infos = realloc(column_infos, (strlen(column_infos)+strlen("\nComments:\n")+strlen(comments)+1) * sizeof(char));
if (column_infos == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for formatted column infos");
return NULL;
}
strcat(column_infos, "\nComments:\n");
strcat(column_infos, comments);
}
// "data type: OBI_TYPE, element names: [formatted element names](, all comments)"
free(elt_names); free(elt_names);
free(data_type_str);
return column_infos; return column_infos;
} }
@ -2472,7 +2582,6 @@ int obi_column_prepare_to_set_value(OBIDMS_column_p column, index_t line_nb, ind
} }
int obi_column_prepare_to_get_value(OBIDMS_column_p column, index_t line_nb) int obi_column_prepare_to_get_value(OBIDMS_column_p column, index_t line_nb)
{ {
if ((line_nb+1) > ((column->header)->line_count)) if ((line_nb+1) > ((column->header)->line_count))

View File

@ -77,6 +77,8 @@ typedef struct OBIDMS_column_header {
OBIType_t stored_data_type; /**< Type of the data that is actually stored in the data OBIType_t stored_data_type; /**< Type of the data that is actually stored in the data
* part of the column. * part of the column.
*/ */
bool dict_column; /**< Whether the column contains dictionary-like values.
*/
bool tuples; /**< A boolean indicating whether the column contains indices referring to indexed tuples. bool tuples; /**< A boolean indicating whether the column contains indices referring to indexed tuples.
*/ */
bool to_eval; /**< A boolean indicating whether the column contains expressions that should be evaluated bool to_eval; /**< A boolean indicating whether the column contains expressions that should be evaluated
@ -249,6 +251,7 @@ size_t obi_calculate_header_size(index_t nb_elements_per_line, int64_t elts_name
* @param elements_names The names of the elements with ';' as separator (no terminal ';'), * @param elements_names The names of the elements with ';' as separator (no terminal ';'),
* NULL or "" if the default names are to be used ("0\01\02\0...\0n"). * NULL or "" if the default names are to be used ("0\01\02\0...\0n").
* @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()). * @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()).
* @param dict_column A boolean indicating whether the column should contain dictionary-like values.
* @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples. * @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples.
* @param to_eval A boolean indicating whether the column contains expressions that should be evaluated * @param to_eval A boolean indicating whether the column contains expressions that should be evaluated
* (typically OBI_STR columns containing character strings to be evaluated by Python). * (typically OBI_STR columns containing character strings to be evaluated by Python).
@ -271,6 +274,7 @@ OBIDMS_column_p obi_create_column(OBIDMS_p dms,
index_t nb_elements_per_line, index_t nb_elements_per_line,
char* elements_names, char* elements_names,
bool elt_names_formatted, bool elt_names_formatted,
bool dict_column,
bool tuples, bool tuples,
bool to_eval, bool to_eval,
const char* indexer_name, const char* indexer_name,
@ -505,12 +509,37 @@ index_t obi_column_get_element_index_from_name(OBIDMS_column_p column, const cha
char* obi_get_elements_names(OBIDMS_column_p column); char* obi_get_elements_names(OBIDMS_column_p column);
// TODO /**
//char* obi_get_formatted_elements_names(OBIDMS_column_p column); * @brief Recovers the elements names of the lines of a column with a human readable format ("0; 1; 2; ...; n\0").
*
* @warning The returned pointer has to be freed by the caller.
*
* @param column A pointer on an OBIDMS column.
*
* @returns A pointer on a character array where the elements names are stored.
* @retval NULL if an error occurred.
*
* @since September 2020
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
char* obi_get_formatted_elements_names(OBIDMS_column_p column);
// TODO /**
//char* obi_column_formatted_infos(OBIDMS_column_p column); * @brief Returns the informations of a column with a human readable format (data type, element names, comments).
*
* @warning The returned pointer has to be freed by the caller.
*
* @param column A pointer on an OBIDMS column.
* @param detailed Whether the informations should contain column comments or just data type and element names.
*
* @returns A pointer on a character array where the formatted column informations are stored.
* @retval NULL if an error occurred.
*
* @since September 2020
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
char* obi_column_formatted_infos(OBIDMS_column_p column, bool detailed);
/** /**

View File

@ -25,7 +25,7 @@
* @author Celine Mercier (celine.mercier@metabarcoding.org) * @author Celine Mercier (celine.mercier@metabarcoding.org)
* *
*/ */
bool volatile keep_running; extern bool volatile keep_running;
void sig_handler(int signum); void sig_handler(int signum);

View File

@ -29,6 +29,8 @@
#define OBIQual_int_NA (NULL) /**< NA value for the type OBI_QUAL if the quality is in integer format */ #define OBIQual_int_NA (NULL) /**< NA value for the type OBI_QUAL if the quality is in integer format */
#define OBITuple_NA (NULL) /**< NA value for tuples of any type */ #define OBITuple_NA (NULL) /**< NA value for tuples of any type */
#define OBI_INT_MAX (INT32_MAX) /**< Maximum value for the type OBI_INT */
/** /**
* @brief enum for the boolean OBIType. * @brief enum for the boolean OBIType.

View File

@ -17,6 +17,7 @@
#include <sys/mman.h> #include <sys/mman.h>
#include <inttypes.h> #include <inttypes.h>
#include <math.h> #include <math.h>
#include <time.h>
//#include <ctype.h> //#include <ctype.h>
#include "obiview.h" #include "obiview.h"
@ -637,6 +638,15 @@ static int enlarge_view_file(Obiview_p view, size_t new_size)
multiple = ceil((double) new_size / (double) getpagesize()); multiple = ceil((double) new_size / (double) getpagesize());
rounded_new_size = multiple * getpagesize(); rounded_new_size = multiple * getpagesize();
// Unmap the entire file before truncating it (WSL requirement)
if (munmap(view->infos, (view->infos)->file_size) < 0)
{
obi_set_errno(OBIVIEW_ERROR);
obidebug(1, "\nError munmapping a view file when enlarging");
close(obiview_file_descriptor);
return -1;
}
// Enlarge the file // Enlarge the file
if (ftruncate(obiview_file_descriptor, rounded_new_size) < 0) if (ftruncate(obiview_file_descriptor, rounded_new_size) < 0)
{ {
@ -646,15 +656,7 @@ static int enlarge_view_file(Obiview_p view, size_t new_size)
return -1; return -1;
} }
// Unmap and remap the file // Remap the file
if (munmap(view->infos, (view->infos)->file_size) < 0)
{
obi_set_errno(OBIVIEW_ERROR);
obidebug(1, "\nError munmapping a view file when enlarging");
close(obiview_file_descriptor);
return -1;
}
view->infos = mmap(NULL, view->infos = mmap(NULL,
rounded_new_size, rounded_new_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
@ -1185,6 +1187,7 @@ static int close_view(Obiview_p view)
obidebug(1, "\nError getting a column to close from the linked list of column pointers of a view"); obidebug(1, "\nError getting a column to close from the linked list of column pointers of a view");
return -1; return -1;
} }
if (obi_close_column(column) < 0) if (obi_close_column(column) < 0)
{ {
obidebug(1, "\nError closing a column while closing a view"); obidebug(1, "\nError closing a column while closing a view");
@ -1710,7 +1713,7 @@ Obiview_p obi_new_view(OBIDMS_p dms, const char* view_name, Obiview_p view_to_cl
// If there is a new line selection, build it by combining it with the one from the view to clone if there is one // If there is a new line selection, build it by combining it with the one from the view to clone if there is one
else if (line_selection != NULL) else if (line_selection != NULL)
{ {
view->line_selection = obi_create_column(view->dms, LINES_COLUMN_NAME, OBI_IDX, 0, 1, NULL, false, false, false, NULL, NULL, -1, NULL); view->line_selection = obi_create_column(view->dms, LINES_COLUMN_NAME, OBI_IDX, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, NULL);
if ((view->line_selection) == NULL) if ((view->line_selection) == NULL)
{ {
obidebug(1, "\nError creating a column corresponding to a line selection"); obidebug(1, "\nError creating a column corresponding to a line selection");
@ -1860,6 +1863,7 @@ Obiview_p obi_new_view(OBIDMS_p dms, const char* view_name, Obiview_p view_to_cl
false, false,
false, false,
false, false,
false,
NULL, NULL,
NULL, NULL,
-1, -1,
@ -1928,19 +1932,19 @@ Obiview_p obi_new_view_nuc_seqs(OBIDMS_p dms, const char* view_name, Obiview_p v
if ((view_to_clone == NULL) && create_default_columns) if ((view_to_clone == NULL) && create_default_columns)
{ {
// Adding sequence column // Adding sequence column
if (obi_view_add_column(view, NUC_SEQUENCE_COLUMN, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) // discuss using same indexer "NUC_SEQ_INDEXER" if (obi_view_add_column(view, NUC_SEQUENCE_COLUMN, -1, NULL, OBI_SEQ, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0) // discuss using same indexer "NUC_SEQ_INDEXER"
{ {
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view"); obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
return NULL; return NULL;
} }
// Adding id column // Adding id column
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view"); obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
return NULL; return NULL;
} }
// Adding definition column // Adding definition column
if (obi_view_add_column(view, DEFINITION_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(view, DEFINITION_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view"); obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
return NULL; return NULL;
@ -1949,7 +1953,7 @@ Obiview_p obi_new_view_nuc_seqs(OBIDMS_p dms, const char* view_name, Obiview_p v
if (quality_column) if (quality_column)
{ {
associated_nuc_column = obi_view_get_column(view, NUC_SEQUENCE_COLUMN); associated_nuc_column = obi_view_get_column(view, NUC_SEQUENCE_COLUMN);
if (obi_view_add_column(view, QUALITY_COLUMN, -1, NULL, OBI_QUAL, 0, 1, NULL, false, false, false, NULL, (associated_nuc_column->header)->name, (associated_nuc_column->header)->version, "{}", true) < 0) // TODO discuss automatic association if (obi_view_add_column(view, QUALITY_COLUMN, -1, NULL, OBI_QUAL, 0, 1, NULL, false, false, false, false, NULL, (associated_nuc_column->header)->name, (associated_nuc_column->header)->version, "{}", true) < 0) // TODO discuss automatic association
{ {
obidebug(1, "Error adding an obligatory column in a nucleotide sequences view"); obidebug(1, "Error adding an obligatory column in a nucleotide sequences view");
return NULL; return NULL;
@ -2282,6 +2286,7 @@ int obi_view_add_column(Obiview_p view,
index_t nb_elements_per_line, index_t nb_elements_per_line,
char* elements_names, char* elements_names,
bool elt_names_formatted, bool elt_names_formatted,
bool dict_column,
bool tuples, bool tuples,
bool to_eval, bool to_eval,
const char* indexer_name, const char* indexer_name,
@ -2364,7 +2369,7 @@ int obi_view_add_column(Obiview_p view,
// Open or create the column // Open or create the column
if (create) if (create)
{ // Create column { // Create column
column = obi_create_column(view->dms, column_name, data_type, nb_lines, nb_elements_per_line, elements_names, elt_names_formatted, tuples, to_eval, indexer_name, associated_column_name, associated_column_version, comments); column = obi_create_column(view->dms, column_name, data_type, nb_lines, nb_elements_per_line, elements_names, elt_names_formatted, dict_column, tuples, to_eval, indexer_name, associated_column_name, associated_column_version, comments);
if (column == NULL) if (column == NULL)
{ {
obidebug(1, "\nError creating a column to add to a view"); obidebug(1, "\nError creating a column to add to a view");
@ -2603,6 +2608,144 @@ int obi_view_create_column_alias(Obiview_p view, const char* current_name, const
} }
char* obi_view_formatted_infos(Obiview_p view, bool detailed)
{
int i;
char* view_infos = NULL;
char* view_name = NULL;
time_t creation_date;
char* creation_date_str = NULL;
index_t line_count;
char line_count_str[256];
OBIDMS_column_p column;
char* column_alias = NULL;
char* column_infos = NULL;
char* comments = NULL;
// View name
view_name = (view->infos)->name;
view_infos = (char*) malloc((strlen("# View name:\n")+strlen(view_name)+1) * sizeof(char));
strcpy(view_infos, "# View name:\n");
strcat(view_infos, view_name);
// Date created
if (view->read_only) // Date not saved until view is finished writing
{
creation_date = (view->infos)->creation_date;
creation_date_str = ctime(&creation_date);
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Date created:\n")+strlen(creation_date_str)+1) * sizeof(char));
strcat(view_infos, "\n# Date created:\n");
strcat(view_infos, creation_date_str);
}
// Line count
line_count = (view->infos)->line_count;
snprintf(line_count_str, sizeof line_count_str, "%lld", line_count);
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Line count:\n")+strlen(line_count_str)+1) * sizeof(char));
strcat(view_infos, "# Line count:\n");
strcat(view_infos, line_count_str);
// Columns: go through each, print their alias then their infos
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Columns:")+1) * sizeof(char));
strcat(view_infos, "\n# Columns:");
for (i=0; i<((view->infos)->column_count); i++)
{
column = *((OBIDMS_column_p*)ll_get(view->columns, i));
if (column == NULL)
{
obidebug(1, "\nError getting a column from the linked list of column pointers of a view to format view infos");
return NULL;
}
// Column alias
column_alias = (((view->infos)->column_references)[i]).alias;
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n")+strlen(column_alias)+strlen(", ")+1) * sizeof(char));
strcat(view_infos, "\n");
strcat(view_infos, column_alias);
strcat(view_infos, ", ");
// Column infos
column_infos = obi_column_formatted_infos(column, detailed);
if (column_infos == NULL)
{
obidebug(1, "\nError getting column infos to format view infos");
return NULL;
}
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(column_infos)+1) * sizeof(char));
strcat(view_infos, column_infos);
free(column_infos);
}
// Get commments if detailed informations required
if (detailed)
{
comments = (view->infos)->comments;
if (strlen(comments)>2) // Add all comments if not empty
{
view_infos = realloc(view_infos, (strlen(view_infos)+strlen("\n# Comments:\n")+strlen(comments)+1) * sizeof(char));
if (view_infos == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for formatted view infos");
return NULL;
}
strcat(view_infos, "\n# Comments:\n");
strcat(view_infos, comments);
}
}
view_infos = realloc(view_infos, (strlen(view_infos)+2) * sizeof(char));
strcat(view_infos, "\n");
return view_infos;
}
char* obi_view_formatted_infos_one_line(Obiview_p view)
{
int i;
char* view_infos = NULL;
char* view_name = NULL;
time_t creation_date;
char* creation_date_str = NULL;
index_t line_count;
char line_count_str[256];
// View name
view_name = (view->infos)->name;
view_infos = (char*) malloc((strlen(" # ")+strlen(view_name)+2) * sizeof(char));
strcpy(view_infos, " # ");
strcat(view_infos, view_name);
strcat(view_infos, ":");
// Date created
if (view->read_only) // Date not saved until view is finished writing
{
creation_date = (view->infos)->creation_date;
creation_date_str = ctime(&creation_date);
// Delete \n added by ctime
creation_date_str[strlen(creation_date_str)-1] = '\0';
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(" Date created: ")+strlen(creation_date_str)+1) * sizeof(char));
strcat(view_infos, " Date created: ");
strcat(view_infos, creation_date_str);
}
// Line count
line_count = (view->infos)->line_count;
snprintf(line_count_str, sizeof line_count_str, "%lld", line_count);
view_infos = realloc(view_infos, (strlen(view_infos)+strlen(" ; Line count: ")+strlen(line_count_str)+1) * sizeof(char));
strcat(view_infos, " ; Line count: ");
strcat(view_infos, line_count_str);
view_infos = realloc(view_infos, (strlen(view_infos)+2) * sizeof(char));
strcat(view_infos, "\n");
return view_infos;
}
int obi_view_write_comments(Obiview_p view, const char* comments) int obi_view_write_comments(Obiview_p view, const char* comments)
{ {
size_t new_size; size_t new_size;
@ -3090,7 +3233,7 @@ int obi_create_auto_count_column(Obiview_p view)
return -1; return -1;
} }
if (obi_view_add_column(view, COUNT_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(view, COUNT_COLUMN, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "Error adding an automatic count column in a view"); obidebug(1, "Error adding an automatic count column in a view");
return -1; return -1;
@ -3142,7 +3285,7 @@ int obi_create_auto_id_column(Obiview_p view, const char* prefix)
} }
// Create the new ID column // Create the new ID column
if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0) if (obi_view_add_column(view, ID_COLUMN, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{ {
obidebug(1, "Error adding an automatic ID column in a view"); obidebug(1, "Error adding an automatic ID column in a view");
return -1; return -1;

View File

@ -30,54 +30,56 @@
#include "obiblob.h" #include "obiblob.h"
#define OBIVIEW_NAME_MAX_LENGTH (249) /**< The maximum length of an OBIDMS view name, without the extension. #define OBIVIEW_NAME_MAX_LENGTH (249) /**< The maximum length of an OBIDMS view name, without the extension.
*/ */
#define VIEW_TYPE_MAX_LENGTH (1024) /**< The maximum length of the type name of a view. #define VIEW_TYPE_MAX_LENGTH (1024) /**< The maximum length of the type name of a view.
*/ */
#define LINES_COLUMN_NAME "LINES" /**< The name of the column containing the line selections #define LINES_COLUMN_NAME "LINES" /**< The name of the column containing the line selections
* in all views. * in all views.
*/ */
#define VIEW_TYPE_NUC_SEQS "NUC_SEQS_VIEW" /**< The type name of views based on nucleotide sequences #define VIEW_TYPE_NUC_SEQS "NUC_SEQS_VIEW" /**< The type name of views based on nucleotide sequences
* and their metadata. * and their metadata.
*/ */
#define NUC_SEQUENCE_COLUMN "NUC_SEQ" /**< The name of the column containing the nucleotide sequences #define NUC_SEQUENCE_COLUMN "NUC_SEQ" /**< The name of the column containing the nucleotide sequences
* in NUC_SEQS_VIEW views. * in NUC_SEQS_VIEW views.
*/ */
#define ID_COLUMN "ID" /**< The name of the column containing the sequence identifiers #define ID_COLUMN "ID" /**< The name of the column containing the sequence identifiers
* in NUC_SEQS_VIEW views. * in NUC_SEQS_VIEW views.
*/ */
#define DEFINITION_COLUMN "DEFINITION" /**< The name of the column containing the sequence definitions #define DEFINITION_COLUMN "DEFINITION" /**< The name of the column containing the sequence definitions
* in NUC_SEQS_VIEW views. * in NUC_SEQS_VIEW views.
*/ */
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities #define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
* in NUC_SEQS_VIEW views. * in NUC_SEQS_VIEW views.
*/ */
#define REVERSE_QUALITY_COLUMN "REVERSE_QUALITY" /**< The name of the column containing the sequence qualities #define REVERSE_QUALITY_COLUMN "REVERSE_QUALITY" /**< The name of the column containing the sequence qualities
* of the reverse read (generated by ngsfilter, used by alignpairedend). * of the reverse read (generated by ngsfilter, used by alignpairedend).
*/ */
#define REVERSE_SEQUENCE_COLUMN "REVERSE_SEQUENCE" /**< The name of the column containing the sequence #define REVERSE_SEQUENCE_COLUMN "REVERSE_SEQUENCE" /**< The name of the column containing the sequence
* of the reverse read (generated by ngsfilter, used by alignpairedend). * of the reverse read (generated by ngsfilter, used by alignpairedend).
*/ */
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities #define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
* in NUC_SEQS_VIEW views. * in NUC_SEQS_VIEW views.
*/ */
#define COUNT_COLUMN "COUNT" /**< The name of the column containing the sequence counts #define COUNT_COLUMN "COUNT" /**< The name of the column containing the sequence counts
* in NUC_SEQS_VIEW views. * in NUC_SEQS_VIEW views.
*/ */
#define TAXID_COLUMN "TAXID" /**< The name of the column containing the taxids. TODO subtype of INT column? #define SCIENTIFIC_NAME_COLUMN "SCIENTIFIC_NAME" /**< The name of the column containing the taxon scientific name.
*/ */
#define MERGED_TAXID_COLUMN "MERGED_TAXID" /**< The name of the column containing the merged taxids information. #define TAXID_COLUMN "TAXID" /**< The name of the column containing the taxids. TODO subtype of INT column?
*/ */
#define MERGED_PREFIX "MERGED_" /**< The prefix to prepend to column names when merging informations during obi uniq. #define MERGED_TAXID_COLUMN "MERGED_TAXID" /**< The name of the column containing the merged taxids information.
*/ */
#define TAXID_DIST_COLUMN "TAXID_DIST" /**< The name of the column containing a dictionary of taxid:[list of ids] when merging informations during obi uniq. #define MERGED_PREFIX "MERGED_" /**< The prefix to prepend to column names when merging informations during obi uniq.
*/ */
#define MERGED_COLUMN "MERGED" /**< The name of the column containing a list of ids when merging informations during obi uniq. #define TAXID_DIST_COLUMN "TAXID_DIST" /**< The name of the column containing a dictionary of taxid:[list of ids] when merging informations during obi uniq.
*/ */
#define ID_PREFIX "seq" /**< The default prefix of sequence identifiers in automatic ID columns. #define MERGED_COLUMN "MERGED" /**< The name of the column containing a list of ids when merging informations during obi uniq.
*/ */
#define PREDICATE_KEY "predicates" /**< The key used in the json-formatted view comments to store predicates. #define ID_PREFIX "seq" /**< The default prefix of sequence identifiers in automatic ID columns.
*/ */
#define PREDICATE_KEY "predicates" /**< The key used in the json-formatted view comments to store predicates.
*/
/** /**
@ -398,6 +400,7 @@ Obiview_p obi_open_view(OBIDMS_p dms, const char* view_name);
* @param elements_names The names of the elements with ';' as separator (no terminal ';'), * @param elements_names The names of the elements with ';' as separator (no terminal ';'),
* if the column is created; NULL or "" if the default names are to be used ("0\01\02\0...\0n"). * if the column is created; NULL or "" if the default names are to be used ("0\01\02\0...\0n").
* @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()). * @param elt_names_formatted Whether the separator for the elements names is ';' (false), or '\0' (true, as formatted by format_elements_names()).
* @param dict_column Whether the column contains dictionary-like values.
* @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples. * @param tuples A boolean indicating whether the column should contain indices referring to indexed tuples.
* @param to_eval A boolean indicating whether the column contains expressions that should be evaluated * @param to_eval A boolean indicating whether the column contains expressions that should be evaluated
* (typically OBI_STR columns containing character strings to be evaluated by Python). * (typically OBI_STR columns containing character strings to be evaluated by Python).
@ -424,6 +427,7 @@ int obi_view_add_column(Obiview_p view,
index_t nb_elements_per_line, index_t nb_elements_per_line,
char* elements_names, char* elements_names,
bool elt_names_formatted, bool elt_names_formatted,
bool dict_column,
bool tuples, bool tuples,
bool to_eval, bool to_eval,
const char* indexer_name, const char* indexer_name,
@ -519,6 +523,39 @@ OBIDMS_column_p* obi_view_get_pointer_on_column_in_view(Obiview_p view, const ch
int obi_view_create_column_alias(Obiview_p view, const char* current_name, const char* alias); int obi_view_create_column_alias(Obiview_p view, const char* current_name, const char* alias);
/**
* @brief Returns the informations of a view with a human readable format (view name, date created, line count, column informations, comments).
*
* @warning The returned pointer has to be freed by the caller.
*
* @param column A pointer on a view.
* @param detailed Whether the informations should contain view comments.
*
* @returns A pointer on a character array where the formatted view informations are stored.
* @retval NULL if an error occurred.
*
* @since September 2020
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
char* obi_view_formatted_infos(Obiview_p view, bool detailed);
/**
* @brief Returns the informations of a view with a human readable format on one line (view name, date created, line count).
*
* @warning The returned pointer has to be freed by the caller.
*
* @param column A pointer on a view.
*
* @returns A pointer on a character array where the formatted view informations are stored.
* @retval NULL if an error occurred.
*
* @since September 2020
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
char* obi_view_formatted_infos_one_line(Obiview_p view);
/** /**
* @brief Internal function writing new comments in a view file. * @brief Internal function writing new comments in a view file.
* *