Compare commits

..

37 Commits

Author SHA1 Message Date
8d0b17d87d Switch to version 3.0.0-beta14 2020-04-15 17:47:26 +02:00
343999a627 Taxonomy: fixed a critical memory bug when building the list of merged
taxids
2020-04-15 17:46:13 +02:00
e9a40630e9 C: Columns: rounding column growth to ceil to avoid looping on small
values
2020-04-13 19:02:10 +02:00
8dbcd3025a C: Columns: reduced column growth factor from 2 to 1.3 to avoid errno28 2020-04-13 14:47:56 +02:00
4cf635d001 Switch to version 3.0.0-beta13 2020-04-12 17:42:58 +02:00
b7e7cc232a Made completion script cleaner 2020-04-12 17:41:59 +02:00
b6ab792ceb C: made error message more detailed when checking that sequences and
qualities match
2020-04-12 17:40:24 +02:00
ddea5a2964 obi import: fixed inconsequential error when precomputing number of
entries in some formats
2020-04-12 17:38:42 +02:00
30852ab7d5 View bash history: removed useless shebang 2020-04-12 17:36:04 +02:00
4d0299904e all commands (almost): cleaner DMS closing at the end 2020-04-12 17:31:58 +02:00
eef5156d95 obi stats: fixed error when printing bool keys 2020-04-12 17:12:04 +02:00
e62c991bbc goes with previous commit 2020-04-10 11:22:26 +02:00
1218eed7fd ecopcr: now printing a warning instead of interrupting with an error
when a taxid is not found
2020-04-10 11:22:04 +02:00
cd9cea8c97 obi import: fixed critical bug where the last entry of embl and genbank
files was not imported
2020-04-09 19:26:27 +02:00
98cfb70d73 ecopcr: made some errors more informative 2020-04-09 09:15:28 +02:00
b9f68c76c8 ecopcr: added warnings and check of primer length (related to #75) 2020-04-05 18:40:56 +02:00
0b98371688 ngsfilter: added warning about primer length in -h (#75) 2020-04-05 18:39:20 +02:00
f0d152fcbd ngsfilter: now checking primer length (fixes #75) 2020-04-05 18:29:10 +02:00
8019dee68e ecotag: now closing all DMS properly 2020-04-05 13:20:49 +02:00
0b4a234671 Swich to version 3.0.0-beta11 2020-02-12 14:23:42 +01:00
d32cfdcce5 ecotag: fixed the generated column comments formatting that would
generate errors
2020-02-12 14:23:17 +01:00
219c0d6fdc obi cat: Fixed the handling when concatenating views with dictionaries
having different key sets
2020-02-12 14:21:39 +01:00
dc9f897917 switch to version 3.0.0-beta10 2020-02-02 21:15:27 +01:00
bb72682f7d obi import: new option --preread to do a first readthrough of the
dataset if it contains huge dictionaries for a much faster import.
2020-02-02 21:12:34 +01:00
52920c3c71 URI decoding: dirty temp fix for bug where default dms makes a mess when
should guess file
2020-02-02 21:11:05 +01:00
18c22cecf9 switch to version 3.0.0-beta9 2020-02-01 15:48:55 +01:00
1bfb96023c obi import: rewriting a column now deletes the old one to save disk
space
2020-02-01 15:31:14 +01:00
c67d668989 obi import: fixed a bug when the first entry would contain a dictionary
with one key. Switch to beta8
2020-01-29 20:23:39 +01:00
db0ac37d41 switch to version 3.0.0-beta7 2020-01-29 16:18:53 +01:00
d0c21ecd39 Removed an OpenMP clause that was not obligatory and triggered a known
gcc bug involving macros
2020-01-24 16:00:53 +01:00
53212168a2 History: added 'obi' in bash history for practical reasons 2020-01-23 16:51:49 +01:00
b4b2e62195 Cleaner handling of reverse quality columns 2020-01-18 19:28:12 +01:00
ced82c4242 Switching to version 3.0-beta6 2020-01-18 17:29:23 +01:00
a524f8829e New command: obi cat to concatenate views (not optimized yet) 2020-01-18 17:28:31 +01:00
5c9091e9eb C: closing DMS after cleaning it instead of counting on upper layer 2020-01-18 17:27:35 +01:00
822000cb70 Fixes in documentation 2020-01-18 17:26:18 +01:00
b9cd9bee9a C: Changed obibool definitions because of conflict with R 2020-01-06 15:11:31 +01:00
47 changed files with 509 additions and 149 deletions

View File

@ -1,4 +1,3 @@
#/usr/bin/env bash
_obi_comp ()
{

View File

@ -222,7 +222,7 @@ def __addDMSOutputOption(optionManager):
group.add_argument('--no-create-dms',
action="store_true", dest="obi:nocreatedms",
default=False,
help="Don't create an output DMS it does not already exist")
help="Don't create an output DMS if it does not already exist")
def __addEltLimitOption(optionManager):

View File

@ -266,9 +266,9 @@ def run(config):
# If the input and the output DMS are different, delete the temporary result view in the input DMS
if i_dms != o_dms:
View.delete_view(i_dms, o_view_name)
o_dms.close()
o_dms.close(force=True)
i_dms.close()
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -14,7 +14,7 @@ from obitools3.libalign._qsrassemble import QSolexaRightReverseAssemble
from obitools3.libalign._solexapairend import buildConsensus, buildJoinedSequence
from obitools3.dms.obiseq cimport Nuc_Seq
from obitools3.libalign.shifted_ali cimport Kmer_similarity, Ali_shifted
from obitools3.commands.ngsfilter import REVERSE_SEQ_COLUMN_NAME, REVERSE_QUALITY_COLUMN_NAME
from obitools3.dms.capi.obiview cimport REVERSE_SEQUENCE_COLUMN, REVERSE_QUALITY_COLUMN
import sys
import os
@ -102,7 +102,7 @@ def alignmentIterator(entries, aligner):
seqR = reverse[i]
else:
seqF = Nuc_Seq.new_from_stored(entries[i])
seqR = Nuc_Seq(seqF.id, seqF[REVERSE_SEQ_COLUMN_NAME], quality=seqF[REVERSE_QUALITY_COLUMN_NAME])
seqR = Nuc_Seq(seqF.id, seqF[REVERSE_SEQUENCE_COLUMN], quality=seqF[REVERSE_QUALITY_COLUMN])
seqR.index = i
ali = aligner(seqF, seqR)
@ -196,8 +196,8 @@ def run(config):
reversed_column=None)
else:
aligner = Kmer_similarity(entries, \
column2=entries[REVERSE_SEQ_COLUMN_NAME], \
qual_column2=entries[REVERSE_QUALITY_COLUMN_NAME], \
column2=entries[REVERSE_SEQUENCE_COLUMN], \
qual_column2=entries[REVERSE_QUALITY_COLUMN], \
kmer_size=config['alignpairedend']['kmersize'], \
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool
@ -221,7 +221,7 @@ def run(config):
buildConsensus(ali, consensus, seqF)
else:
if not two_views:
seqR = Nuc_Seq(seqF.id, seqF[REVERSE_SEQ_COLUMN_NAME], quality = seqF[REVERSE_QUALITY_COLUMN_NAME])
seqR = Nuc_Seq(seqF.id, seqF[REVERSE_SEQUENCE_COLUMN], quality = seqF[REVERSE_QUALITY_COLUMN])
else:
seqR = reverse[i]
buildJoinedSequence(ali, seqR, consensus, forward=seqF)
@ -247,10 +247,10 @@ def run(config):
#print("\n\nOutput view:\n````````````", file=sys.stderr)
#print(repr(view), file=sys.stderr)
input[0].close()
input[0].close(force=True)
if two_views:
rinput[0].close()
output[0].close()
rinput[0].close(force=True)
output[0].close(force=True)
logger("info", "Done.")

View File

@ -379,7 +379,7 @@ def run(config):
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
if i_dms != o_dms:
View.delete_view(o_dms, imported_view_name)
o_dms.close()
i_dms.close()
o_dms.close(force=True)
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -97,9 +97,9 @@ def run(config):
# If the input and the output DMS are different, delete the temporary result view in the input DMS
if i_dms != o_dms:
View.delete_view(i_dms, o_view_name)
o_dms.close()
o_dms.close(force=True)
i_dms.close()
i_dms.close(force=True)
logger("info", "Done.")

139
python/obitools3/commands/cat.pyx Executable file
View File

@ -0,0 +1,139 @@
#cython: language_level=3
from obitools3.apps.progress cimport ProgressBar # @UnresolvedImport
from obitools3.dms import DMS
from obitools3.dms.view.view cimport View
from obitools3.uri.decode import open_uri
from obitools3.apps.optiongroups import addMinimalOutputOption
from obitools3.dms.view import RollbackException
from obitools3.apps.config import logger
from obitools3.utils cimport str2bytes
from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
from obitools3.dms.view.view cimport View
from obitools3.dms.capi.obiview cimport NUC_SEQUENCE_COLUMN, REVERSE_SEQUENCE_COLUMN, \
QUALITY_COLUMN, REVERSE_QUALITY_COLUMN
from obitools3.dms.capi.obitypes cimport OBI_SEQ, OBI_QUAL
from obitools3.dms.column.column cimport Column
import time
import sys
from cpython.exc cimport PyErr_CheckSignals
__title__="Concatenate views."
def addOptions(parser):
addMinimalOutputOption(parser)
group=parser.add_argument_group('obi cat specific options')
group.add_argument("-c",
action="append", dest="cat:views_to_cat",
metavar="<VIEW_NAME>",
default=[],
type=str,
help="URI of a view to concatenate. (e.g. 'my_dms/my_view'). "
"Several -c options can be used on the same "
"command line.")
def run(config):
DMS.obi_atexit()
logger("info", "obi cat")
# Open the views to concatenate
iview_list = []
idms_list = []
total_len = 0
remove_qual = False
remove_rev_qual = False
v_type = View_NUC_SEQS
for v_uri in config["cat"]["views_to_cat"]:
input = open_uri(v_uri)
if input is None:
raise Exception("Could not read input view")
i_dms = input[0]
i_view = input[1]
if input[2] != View_NUC_SEQS: # Check view type (output view is nuc_seqs view if all input view are nuc_seqs view)
v_type = View
if QUALITY_COLUMN not in i_view: # Check if keep quality column in output view (if all input views have it)
remove_qual = True
if REVERSE_QUALITY_COLUMN not in i_view: # same as above for reverse quality
remove_rev_qual = True
total_len += len(i_view)
iview_list.append(i_view)
idms_list.append(i_dms)
# Open the output: only the DMS
output = open_uri(config['obi']['outputURI'],
input=False,
newviewtype=v_type)
if output is None:
raise Exception("Could not create output view")
o_dms = output[0]
o_view = output[1]
# Initialize quality columns and their associated sequence columns if needed
if not remove_qual:
if NUC_SEQUENCE_COLUMN not in o_view:
Column.new_column(o_view, NUC_SEQUENCE_COLUMN, OBI_SEQ)
Column.new_column(o_view, QUALITY_COLUMN, OBI_QUAL, associated_column_name=NUC_SEQUENCE_COLUMN, associated_column_version=o_view[NUC_SEQUENCE_COLUMN].version)
if not remove_rev_qual:
Column.new_column(o_view, REVERSE_SEQUENCE_COLUMN, OBI_SEQ)
Column.new_column(o_view, REVERSE_QUALITY_COLUMN, OBI_QUAL, associated_column_name=REVERSE_SEQUENCE_COLUMN, associated_column_version=o_view[REVERSE_SEQUENCE_COLUMN].version)
# Initialize multiple elements columns
dict_cols = {}
for v in iview_list:
for coln in v.keys():
if v[coln].nb_elements_per_line > 1:
if coln not in dict_cols:
dict_cols[coln] = {}
dict_cols[coln]['eltnames'] = set(v[coln].elements_names)
dict_cols[coln]['nbelts'] = v[coln].nb_elements_per_line
dict_cols[coln]['obitype'] = v[coln].data_type_int
else:
dict_cols[coln]['eltnames'] = set(v[coln].elements_names + list(dict_cols[coln]['eltnames']))
dict_cols[coln]['nbelts'] = len(dict_cols[coln]['eltnames'])
for coln in dict_cols:
Column.new_column(o_view, coln, dict_cols[coln]['obitype'],
nb_elements_per_line=dict_cols[coln]['nbelts'], elements_names=list(dict_cols[coln]['eltnames']))
# Initialize the progress bar
pb = ProgressBar(total_len, config, seconde=5)
i = 0
for v in iview_list:
for l in v:
PyErr_CheckSignals()
pb(i)
o_view[i] = l
i+=1
# Deletes quality columns if needed
if QUALITY_COLUMN in o_view and remove_qual :
o_view.delete_column(QUALITY_COLUMN)
if REVERSE_QUALITY_COLUMN in o_view and remove_rev_qual :
o_view.delete_column(REVERSE_QUALITY_COLUMN)
pb(i, force=True)
print("", file=sys.stderr)
# Save command config in DMS comments
command_line = " ".join(sys.argv[1:])
o_view.write_config(config, "cat", command_line, input_dms_name=[d.name for d in idms_list], input_view_name=[v.name for v in iview_list])
o_dms.record_command_line(command_line)
#print("\n\nOutput view:\n````````````", file=sys.stderr)
#print(repr(view), file=sys.stderr)
for d in idms_list:
d.close(force=True)
o_dms.close(force=True)
logger("info", "Done.")

View File

@ -124,8 +124,8 @@ def run(config):
# If the input and the output DMS are different, delete the temporary result view in the input DMS
if i_dms != o_dms:
View.delete_view(i_dms, o_view_name)
o_dms.close()
o_dms.close(force=True)
i_dms.close()
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -56,3 +56,5 @@ def run(config):
print(count2)
else:
print(count1)
input[0].close(force=True)

View File

@ -35,13 +35,13 @@ def addOptions(parser):
action="store", dest="ecopcr:primer1",
metavar='<PRIMER>',
type=str,
help="Forward primer.")
help="Forward primer, length must be less than or equal to 32")
group.add_argument('--primer2', '-R',
action="store", dest="ecopcr:primer2",
metavar='<PRIMER>',
type=str,
help="Reverse primer.")
help="Reverse primer, length must be less than or equal to 32")
group.add_argument('--error', '-e',
action="store", dest="ecopcr:error",
@ -203,6 +203,7 @@ def run(config):
#print("\n\nOutput view:\n````````````", file=sys.stderr)
#print(repr(o_dms[o_view_name]), file=sys.stderr)
o_dms.close()
i_dms.close(force=True)
o_dms.close(force=True)
logger("info", "Done.")

View File

@ -64,10 +64,10 @@ def run(config):
ref_view_name = ref[1]
# Check that the threshold demanded is greater than or equal to the threshold used to build the reference database
if config['ecotag']['threshold'] < eval(i_dms[ref_view_name].comments["ref_db_threshold"]) :
if config['ecotag']['threshold'] < eval(ref_dms[ref_view_name].comments["ref_db_threshold"]) :
print("Error: The threshold demanded (%f) is lower than the threshold used to build the reference database (%f).",
config['ecotag']['threshold'], i_dms[ref_view_name].comments["ref_db_threshold"])
config['ecotag']['threshold'], ref_dms[ref_view_name].comments["ref_db_threshold"])
# Open the output: only the DMS
output = open_uri(config['obi']['outputURI'],
input=False,
@ -126,9 +126,11 @@ def run(config):
# If the input and the output DMS are different, delete the temporary result view in the input DMS
if i_dms != o_dms:
View.delete_view(i_dms, o_view_name)
o_dms.close()
o_dms.close(force=True)
i_dms.close()
taxo_dms.close(force=True)
ref_dms.close(force=True)
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -86,7 +86,7 @@ def run(config):
if not BrokenPipeError and not IOError:
output_object.close()
iview.close()
input[0].close()
input[0].close(force=True)
logger("info", "Done.")

View File

@ -36,14 +36,13 @@ def addOptions(parser):
metavar="<PREDICATE>",
default=[],
type=str,
help="Warning: use bytes for character strings (b'text' instead of 'text'). "
"Python boolean expression to be evaluated in the "
help="Python boolean expression to be evaluated in the "
"sequence/line context. The attribute name can be "
"used in the expression as a variable name. "
"An extra variable named 'sequence' or 'line' refers "
"to the sequence or line object itself. "
"Several -p options can be used on the same "
"commande line.")
"command line.")
group.add_argument("-S", "--sequence",
action="store", dest="grep:seq_pattern",
@ -371,7 +370,7 @@ def run(config):
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
if i_dms != o_dms:
View.delete_view(i_dms, o_view_name)
o_dms.close()
i_dms.close()
o_dms.close(force=True)
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -103,7 +103,7 @@ def run(config):
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
if i_dms != o_dms:
View.delete_view(i_dms, o_view_name)
o_dms.close()
i_dms.close()
o_dms.close(force=True)
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -54,4 +54,5 @@ def run(config):
print(bytes2str(entries.ascii_history))
else:
raise Exception("ASCII history only available for views")
input[0].close(force=True)

View File

@ -11,6 +11,7 @@ from obitools3.dms.column.column cimport Column
from obitools3.dms.obiseq cimport Nuc_Seq
from obitools3.dms import DMS
from obitools3.dms.taxo.taxo cimport Taxonomy
from obitools3.files.uncompress cimport CompressedFile
from obitools3.utils cimport tobytes, \
@ -65,6 +66,14 @@ def addOptions(parser):
addTaxdumpInputOption(parser)
addMinimalOutputOption(parser)
group = parser.add_argument_group('obi import specific options')
group.add_argument('--preread',
action="store_true", dest="import:preread",
default=False,
help="Do a first readthrough of the dataset if it contains huge dictionaries (more than 100 keys) for "
"a much faster import.")
def run(config):
@ -169,8 +178,6 @@ def run(config):
if entry_count >= 0:
pb = ProgressBar(entry_count, config, seconde=5)
entries = input[1]
NUC_SEQS_view = False
if isinstance(output[1], View) :
@ -188,6 +195,60 @@ def run(config):
dcols = {}
# First read through the entries to prepare columns with dictionaries as they are very time-expensive to rewrite
if config['import']['preread']:
logger("info", "First readthrough...")
entries = input[1]
i = 0
dict_dict = {}
for entry in entries:
PyErr_CheckSignals()
if entry is None: # error or exception handled at lower level, not raised because Python generators can't resume after any exception is raised
if config['obi']['skiperror']:
i-=1
continue
else:
raise Exception("obi import error in first readthrough")
if pb is not None:
pb(i)
elif not i%50000:
logger("info", "Read %d entries", i)
for tag in entry :
if type(entry[tag]) == dict :
if tag in dict_dict:
dict_dict[tag][0].update(entry[tag].keys())
else:
dict_dict[tag] = [set(entry[tag].keys()), get_obitype(entry[tag])]
i+=1
if pb is not None:
pb(i, force=True)
print("", file=sys.stderr)
for tag in dict_dict:
dcols[tag] = (Column.new_column(view, tag, dict_dict[tag][1], \
nb_elements_per_line=len(dict_dict[tag][0]), \
elements_names=list(dict_dict[tag][0])), \
value_obitype)
# Reinitialize the input
if isinstance(input[0], CompressedFile):
input_is_file = True
if entry_count >= 0:
pb = ProgressBar(entry_count, config, seconde=5)
try:
input[0].close()
except AttributeError:
pass
input = open_uri(config['obi']['inputURI'], force_file=input_is_file)
if input is None:
raise Exception("Could not open input URI")
entries = input[1]
i = 0
for entry in entries :
@ -247,6 +308,8 @@ def run(config):
dcols[tag] = (Column.new_column(view, tag, value_obitype, nb_elements_per_line=nb_elts, elements_names=elt_names), value_obitype)
# Fill value
if value_type == dict and nb_elts == 1: # special case that makes the OBI3 create a 1 elt/line column which won't read a dict value
value = value[list(value.keys())[0]] # The solution is to transform the value in a simple atomic one acceptable by the column
dcols[tag][0][i] = value
# TODO else log error?
@ -263,6 +326,12 @@ def run(config):
rewrite = True
try:
# Check that it's not the case where the first entry contained a dict of length 1 and now there is a new key
if type(value) == dict and \
dcols[tag][0].nb_elements_per_line == 1 and len(value.keys()) == 1 \
and dcols[tag][0].elements_names[0] != list(value.keys())[0] :
raise IndexError # trigger column rewrite
# Fill value
dcols[tag][0][i] = value

View File

@ -46,5 +46,5 @@ def run(config):
process.wait()
iview.close()
input[0].close()
input[0].close(force=True)

View File

@ -36,6 +36,7 @@ def run(config):
l = []
for view in input[0]:
l.append(tostr(view) + "\t(Date created: " + str(bytes2str_object(dms[view].comments["Date created"]))+")")
dms[view].close()
l.sort()
for v in l:
print(v)
@ -51,4 +52,5 @@ def run(config):
if config['ls']['longformat'] and len(input[1].comments) > 0:
print("\n### Comments:")
print(str(input[1].comments))
input[0].close(force=True)

View File

@ -13,6 +13,7 @@ from obitools3.libalign.apat_pattern import Primer_search
from obitools3.dms.obiseq cimport Nuc_Seq
from obitools3.dms.capi.obitypes cimport OBI_SEQ, OBI_QUAL
from obitools3.dms.capi.apat cimport MAX_PATTERN
from obitools3.dms.capi.obiview cimport REVERSE_SEQUENCE_COLUMN, REVERSE_QUALITY_COLUMN
from obitools3.utils cimport tobytes
from libc.stdint cimport INT32_MAX
@ -22,8 +23,8 @@ import sys
from cpython.exc cimport PyErr_CheckSignals
REVERSE_SEQ_COLUMN_NAME = b"REVERSE_SEQUENCE" # used by alignpairedend tool
REVERSE_QUALITY_COLUMN_NAME = b"REVERSE_QUALITY" # used by alignpairedend tool
#REVERSE_SEQ_COLUMN_NAME = b"REVERSE_SEQUENCE" # used by alignpairedend tool
#REVERSE_QUALITY_COLUMN_NAME = b"REVERSE_QUALITY" # used by alignpairedend tool
__title__="Assigns sequence records to the corresponding experiment/sample based on DNA tags and primers"
@ -41,7 +42,8 @@ def addOptions(parser):
metavar="<URI>",
type=str,
default=None,
help="URI to the view containing the samples definition (with tags, primers, sample names,...)")
help="URI to the view containing the samples definition (with tags, primers, sample names,...)"
"Warning: primer lengths must be less than or equal to 32")
group.add_argument('-R', '--reverse-reads',
action="store", dest="ngsfilter:reverse",
@ -171,6 +173,13 @@ cdef read_info_view(info_view, max_errors=2, verbose=False, not_aligned=False):
primer_list = []
i=0
for p in info_view:
# Check primer length: should not be longer than 32, the max allowed by the apat lib
if len(p[b'forward_primer']) > 32:
raise RollbackException("Error: primers can not be longer than 32bp, rollbacking views")
if len(p[b'reverse_primer']) > 32:
raise RollbackException("Error: primers can not be longer than 32bp, rollbacking views")
forward=Primer(p[b'forward_primer'],
len(p[b'forward_tag']) if (b'forward_tag' in p and p[b'forward_tag']!=None) else None,
True,
@ -259,8 +268,8 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
if not_aligned:
sequences[1] = sequences[1].clone()
sequences[0][REVERSE_SEQ_COLUMN_NAME] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN_NAME] = sequences[1].quality # used by alignpairedend tool
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
for seq in sequences:
if hasattr(seq, "quality_array"):
@ -295,8 +304,8 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
if directmatch is None:
if not_aligned:
sequences[0][REVERSE_SEQ_COLUMN_NAME] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN_NAME] = sequences[1].quality # used by alignpairedend tool
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
sequences[0][b'error']=b'No primer match'
return False, sequences[0]
@ -314,8 +323,8 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
sequences[0] = sequences[0][directmatch[1][2]:]
else:
sequences[1] = sequences[1][directmatch[1][2]:]
sequences[0][REVERSE_SEQ_COLUMN_NAME] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN_NAME] = sequences[1].quality # used by alignpairedend tool
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
if directmatch[0].forward:
sequences[0][b'direction']=b'forward'
@ -361,8 +370,8 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
sequences[0] = sequences[0][:r[1]]
else:
sequences[1] = sequences[1][:r[1]]
sequences[0][REVERSE_SEQ_COLUMN_NAME] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN_NAME] = sequences[1].quality # used by alignpairedend tool
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
# do the same on the other seq
if first_match_first_seq:
r = direct_primer.revcomp(sequences[1])
@ -373,8 +382,8 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
sequences[1] = sequences[1][:r[1]]
else:
sequences[0] = sequences[0][:r[1]]
sequences[0][REVERSE_SEQ_COLUMN_NAME] = sequences[1].seq
sequences[0][REVERSE_QUALITY_COLUMN_NAME] = sequences[1].quality
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality
# Look for other primer in the other direction on the sequence, or
@ -442,8 +451,8 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
sequences[1] = sequences[1][reversematch[1][2]:]
if not directmatch[0].forward:
sequences[1] = sequences[1].reverse_complement
sequences[0][REVERSE_SEQ_COLUMN_NAME] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN_NAME] = sequences[1].quality # used by alignpairedend tool
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
else:
sequences[0] = sequences[0][reversematch[1][2]:]
@ -593,7 +602,13 @@ def run(config):
pb = ProgressBar(entries_len, config, seconde=5)
# Check and store primers and tags
infos, primer_list = read_info_view(info_view, max_errors=config['ngsfilter']['error'], verbose=False, not_aligned=not_aligned) # TODO obi verbose option
try:
infos, primer_list = read_info_view(info_view, max_errors=config['ngsfilter']['error'], verbose=False, not_aligned=not_aligned) # TODO obi verbose option
except RollbackException, e:
if unidentified is not None:
raise RollbackException("obi ngsfilter error, rollbacking views: "+str(e), o_view, unidentified)
else:
raise RollbackException("obi ngsfilter error, rollbacking view: "+str(e), o_view)
aligner = Primer_search(primer_list, config['ngsfilter']['error'])
@ -605,12 +620,12 @@ def run(config):
paired_p.revcomp.aligner = aligner
if not_aligned: # create columns used by alignpairedend tool
Column.new_column(o_view, REVERSE_SEQ_COLUMN_NAME, OBI_SEQ)
Column.new_column(o_view, REVERSE_QUALITY_COLUMN_NAME, OBI_QUAL, associated_column_name=REVERSE_SEQ_COLUMN_NAME, associated_column_version=o_view[REVERSE_SEQ_COLUMN_NAME].version)
Column.new_column(o_view, REVERSE_SEQUENCE_COLUMN, OBI_SEQ)
Column.new_column(o_view, REVERSE_QUALITY_COLUMN, OBI_QUAL, associated_column_name=REVERSE_SEQUENCE_COLUMN, associated_column_version=o_view[REVERSE_SEQUENCE_COLUMN].version)
if unidentified is not None:
Column.new_column(unidentified, REVERSE_SEQ_COLUMN_NAME, OBI_SEQ)
Column.new_column(unidentified, REVERSE_QUALITY_COLUMN_NAME, OBI_QUAL, associated_column_name=REVERSE_SEQ_COLUMN_NAME, associated_column_version=unidentified[REVERSE_SEQ_COLUMN_NAME].version)
Column.new_column(unidentified, REVERSE_SEQUENCE_COLUMN, OBI_SEQ)
Column.new_column(unidentified, REVERSE_QUALITY_COLUMN, OBI_QUAL, associated_column_name=REVERSE_SEQUENCE_COLUMN, associated_column_version=unidentified[REVERSE_SEQUENCE_COLUMN].version)
g = 0
u = 0
@ -651,11 +666,11 @@ def run(config):
#print("\n\nOutput view:\n````````````", file=sys.stderr)
#print(repr(o_view), file=sys.stderr)
input[0].close()
output[0].close()
info_input[0].close()
input[0].close(force=True)
output[0].close(force=True)
info_input[0].close(force=True)
if unidentified is not None:
unidentified_input[0].close()
unidentified_input[0].close(force=True)
aligner.free()
logger("info", "Done.")

View File

@ -141,7 +141,7 @@ def run(config):
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
if i_dms != o_dms:
View.delete_view(i_dms, o_view_name)
o_dms.close()
i_dms.close()
o_dms.close(force=True)
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -251,7 +251,7 @@ def run(config):
for i in range(len(sorted_stats)):
c = sorted_stats[i][0]
for v in c:
if v is not None:
if type(v) == bytes:
print(pcat % tostr(v)+"\t", end="")
else:
print(pcat % str(v)+"\t", end="")
@ -268,6 +268,6 @@ def run(config):
print("%7d" %catcount[c], end="")
print("%9d" %totcount[c])
input[0].close()
input[0].close(force=True)
logger("info", "Done.")

View File

@ -106,7 +106,7 @@ def run(config):
# If the input and the output DMS are different, delete the temporary imported view used to create the final view
if i_dms != o_dms:
View.delete_view(i_dms, o_view_name)
o_dms.close()
i_dms.close()
o_dms.close(force=True)
i_dms.close(force=True)
logger("info", "Done.")

View File

@ -529,7 +529,7 @@ def run(config):
test_taxo(config, infos)
infos['view'].close()
infos['dms'].close()
infos['dms'].close(force=True)
shutil.rmtree(config['obi']['defaultdms']+'.obidms', ignore_errors=True)
print("Done.")

View File

@ -8,7 +8,8 @@ from obitools3.dms.view import RollbackException
from obitools3.dms.view.typed_view.view_NUC_SEQS cimport View_NUC_SEQS
from obitools3.dms.column.column cimport Column, Column_line
from obitools3.dms.capi.obiview cimport QUALITY_COLUMN, COUNT_COLUMN, NUC_SEQUENCE_COLUMN, ID_COLUMN, TAXID_COLUMN, \
TAXID_DIST_COLUMN, MERGED_TAXID_COLUMN, MERGED_COLUMN, MERGED_PREFIX
TAXID_DIST_COLUMN, MERGED_TAXID_COLUMN, MERGED_COLUMN, MERGED_PREFIX, \
REVERSE_QUALITY_COLUMN
from obitools3.dms.capi.obitypes cimport OBI_INT, OBI_STR, index_t
from obitools3.apps.optiongroups import addMinimalInputOption, \
addMinimalOutputOption, \
@ -24,9 +25,6 @@ from cpython.exc cimport PyErr_CheckSignals
__title__="Group sequence records together"
REVERSE_QUALITY_COLUMN_NAME = b"REVERSE_QUALITY" # TODO from ngsfilter, move to C
def addOptions(parser):
@ -496,8 +494,8 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, li
# Deletes quality columns if there is one because the matching between sequence and quality will be broken (quality set to NA when sequence not)
if QUALITY_COLUMN in view:
o_view.delete_column(QUALITY_COLUMN)
if REVERSE_QUALITY_COLUMN_NAME in view:
o_view.delete_column(REVERSE_QUALITY_COLUMN_NAME)
if REVERSE_QUALITY_COLUMN in view:
o_view.delete_column(REVERSE_QUALITY_COLUMN)
if taxonomy is not None:
print("") # TODO because in the middle of progress bar. Better solution?

View File

@ -24,6 +24,8 @@ cdef extern from "obiview.h" nogil:
extern const_char_p ID_COLUMN
extern const_char_p DEFINITION_COLUMN
extern const_char_p QUALITY_COLUMN
extern const_char_p REVERSE_QUALITY_COLUMN
extern const_char_p REVERSE_SEQUENCE_COLUMN
extern const_char_p COUNT_COLUMN
extern const_char_p TAXID_COLUMN
extern const_char_p MERGED_TAXID_COLUMN
@ -100,7 +102,7 @@ cdef extern from "obiview.h" nogil:
const_char_p comments,
bint create)
int obi_view_delete_column(Obiview_p view, const_char_p column_name)
int obi_view_delete_column(Obiview_p view, const_char_p column_name, bint delete_file)
OBIDMS_column_p obi_view_get_column(Obiview_p view, const_char_p column_name)

View File

@ -21,7 +21,11 @@ from ..capi.obiutils cimport obi_format_date
from ..capi.obiview cimport obi_view_add_column, \
obi_view_get_pointer_on_column_in_view, \
Obiview_p, \
NUC_SEQUENCE_COLUMN
NUC_SEQUENCE_COLUMN, \
QUALITY_COLUMN, \
REVERSE_SEQUENCE_COLUMN, \
REVERSE_QUALITY_COLUMN
from ..object cimport OBIDeactivatedInstanceError
@ -122,11 +126,18 @@ cdef class Column(OBIWrapper) :
if data_type == OBI_QUAL:
if associated_column_name_b == b"":
if NUC_SEQUENCE_COLUMN not in view:
raise RuntimeError("Cannot create column %s in view %s: trying to create quality column but no NUC_SEQ column to associate it with in the view" % (bytes2str(column_name_b),
bytes2str(view.name)))
associated_column_name_b = NUC_SEQUENCE_COLUMN
associated_column_version = view[NUC_SEQUENCE_COLUMN].version
if column_name == QUALITY_COLUMN:
if NUC_SEQUENCE_COLUMN not in view:
raise RuntimeError("Cannot create column %s in view %s: trying to create quality column but no NUC_SEQ column to associate it with in the view" % (bytes2str(column_name_b),
bytes2str(view.name)))
associated_column_name_b = NUC_SEQUENCE_COLUMN
associated_column_version = view[NUC_SEQUENCE_COLUMN].version
elif column_name == REVERSE_QUALITY_COLUMN:
if REVERSE_SEQUENCE_COLUMN not in view:
raise RuntimeError("Cannot create column %s in view %s: trying to create reverse quality column but no REVERSE_SEQUENCE column to associate it with in the view" % (bytes2str(column_name_b),
bytes2str(view.name)))
associated_column_name_b = REVERSE_SEQUENCE_COLUMN
associated_column_version = view[REVERSE_SEQUENCE_COLUMN].version
if (obi_view_add_column(view = view.pointer(),
column_name = column_name_b,

View File

@ -94,16 +94,16 @@ cdef class DMS(OBIWrapper):
return dms
def close(self) :
def close(self, force=False) :
'''
Closes the DMS instance and free the associated memory
Closes the DMS instance and free the associated memory (no counter, closing is final)
The `close` method is automatically called by the object destructor.
'''
cdef OBIDMS_p pointer = self.pointer()
if self.active() :
OBIWrapper.close(self)
if (obi_close_dms(pointer, False)) < 0 :
if (obi_close_dms(pointer, force=force)) < 0 :
raise Exception("Problem closing an OBIDMS")
@ -254,12 +254,13 @@ cdef class DMS(OBIWrapper):
# bash command history property getter
@property
def bash_history(self):
s = b"#!/bin/bash\n\n"
#s = b"#!${bash}/bin/bash\n\n"
s = b""
first = True
for command in self.command_line_history:
s+=b"#"
s+=command[b"time"]
s+=b"\n"
s+=b"\nobi "
s+=command[b"command"]
s+=b"\n"
return s

View File

@ -22,7 +22,8 @@ cdef class View(OBIWrapper):
cdef inline Obiview_p pointer(self)
cpdef delete_column(self,
object column_name)
object column_name,
bint delete_file=*)
cpdef rename_column(self,
object current_name,

View File

@ -227,7 +227,8 @@ cdef class View(OBIWrapper) :
cpdef delete_column(self,
object column_name) :
object column_name,
bint delete_file=False) :
cdef bytes column_name_b = tobytes(column_name)
@ -239,7 +240,7 @@ cdef class View(OBIWrapper) :
col.close()
# Remove the column from the view which closes the C structure
if obi_view_delete_column(self.pointer(), column_name_b) < 0 :
if obi_view_delete_column(self.pointer(), column_name_b, delete_file) < 0 :
raise RollbackException("Problem deleting column %s from a view",
bytes2str(column_name_b), self)
@ -297,11 +298,17 @@ cdef class View(OBIWrapper) :
nb_elements_per_line=new_nb_elements_per_line, elements_names=new_elements_names,
comments=old_column.comments, alias=column_name_b+tobytes('___new___'))
switch_to_dict = old_column.nb_elements_per_line == 1 and new_nb_elements_per_line > 1
ori_key = old_column._elements_names[0]
for i in range(length) :
new_column[i] = old_column[i]
if switch_to_dict :
new_column[i] = {ori_key: old_column[i]}
else:
new_column[i] = old_column[i]
# Remove old column from view
self.delete_column(column_name_b)
self.delete_column(column_name_b, delete_file=True)
# Rename new
new_column.name = column_name_b
@ -519,7 +526,7 @@ cdef class View(OBIWrapper) :
# bash command history property getter
@property
def bash_history(self):
s = b"#!/bin/bash\n\n"
s = b""
first = True
for level in self.view_history:
command_list = [level[input][b"command_line"] for input in level.keys()]

View File

@ -6,6 +6,7 @@ from .solexapairend import iterOnAligment
from .shifted_ali cimport Ali_shifted
from obitools3.dms.capi.obiview cimport Obiview_p, QUALITY_COLUMN, NUC_SEQUENCE_COLUMN, \
REVERSE_SEQUENCE_COLUMN, REVERSE_QUALITY_COLUMN, \
obi_set_qual_int_with_elt_idx_and_col_p_in_view, \
obi_set_str_with_elt_idx_and_col_p_in_view
@ -13,7 +14,6 @@ from obitools3.dms.capi.obidmscolumn cimport OBIDMS_column_p
from obitools3.dms.view.view cimport View
from obitools3.dms.column.column cimport Column
from obitools3.commands.ngsfilter import REVERSE_SEQ_COLUMN_NAME, REVERSE_QUALITY_COLUMN_NAME
from math import log10
@ -233,7 +233,7 @@ def buildConsensus(ali, seq, ref_tags=None):
seq[b'mode']=b'alignment'
for tag in ref_tags:
if tag != REVERSE_SEQ_COLUMN_NAME and tag != REVERSE_QUALITY_COLUMN_NAME and \
if tag != REVERSE_SEQUENCE_COLUMN and tag != REVERSE_QUALITY_COLUMN and \
tag != NUC_SEQUENCE_COLUMN and tag != QUALITY_COLUMN:
seq[tag] = ref_tags[tag]
@ -254,7 +254,7 @@ def buildJoinedSequence(ali, reverse, seq, forward=None):
seq[b"mode"]=b"joined"
seq[b"pairedend_limit"]=len(forward)
for tag in forward:
if tag != REVERSE_SEQ_COLUMN_NAME and tag != REVERSE_QUALITY_COLUMN_NAME:
if tag != REVERSE_SEQUENCE_COLUMN and tag != REVERSE_QUALITY_COLUMN:
seq[tag] = forward[tag]
return seq

View File

@ -156,6 +156,9 @@ def emblIterator_file(lineiterator,
yield seq
read+=1
# Last sequence
seq = emblParser(entry)
yield seq
free(entry)

View File

@ -153,6 +153,9 @@ def genbankIterator_file(lineiterator,
yield seq
read+=1
# Last sequence
seq = genbankParser(entry)
yield seq
free(entry)

7
python/obitools3/uri/decode.pyx Executable file → Normal file
View File

@ -171,7 +171,8 @@ Reads an URI and returns a tuple containing:
def open_uri(uri,
bint input=True,
type newviewtype=View,
dms_only=False):
dms_only=False,
force_file=False):
cdef bytes urib = tobytes(uri)
cdef bytes scheme
@ -195,9 +196,9 @@ def open_uri(uri,
if 'obi' not in config:
config['obi']={}
try:
if not force_file and "defaultdms" in config["obi"]:
default_dms=config["obi"]["defaultdms"]
except KeyError:
else:
default_dms=None
try:

View File

@ -72,7 +72,7 @@ cpdef int count_entries(file, bytes format):
return -1
mmapped_file = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
total_count += len(re.findall(sep, mmapped_file))
if format != b"ngsfilter" and format != b"tabular":
if format != b"ngsfilter" and format != b"tabular" and format != b"embl" and format != b"genbank":
total_count += 1 # adding +1 for 1st entry because separators include \n (ngsfilter and tabular already count one more because of last \n)
except:

View File

@ -1,5 +1,5 @@
major = 3
minor = 0
serial= '0-beta5'
serial= '0-beta14'
version ="%d.%02d.%s" % (major,minor,serial)

View File

@ -409,8 +409,7 @@ int obi_clean(const char* dms_name,
stop = true;
}
#pragma omp parallel default(none) \
shared(thread_count, seq_count, blob_array, complete_sample_count_array, alignment_result_array, \
#pragma omp parallel shared(thread_count, seq_count, blob_array, complete_sample_count_array, alignment_result_array, \
stop, blob1, i, obi_errno, keep_running, stderr, max_ratio, iseq_column, i_view, \
similarity_mode, reference, normalize, threshold, ktable, status_column, o_view, sample_count)
{

42
src/obi_ecopcr.c Executable file → Normal file
View File

@ -105,7 +105,8 @@ static int create_output_columns(Obiview_p o_view, bool kingdom_mode);
* @param o_temp1_column A pointer on the output column for the temperature for the first primer.
* @param o_temp2_column A pointer on the output column for the temperature for the second primer.
*
* @retval 0 if the operation was successfully completed.
* @retval 0 if the sequence was skipped (taxid not found, warning printed).
* @retval 1 if the sequence was successfully printed to the output.
* @retval -1 if an error occurred.
*
* @since July 2018
@ -366,6 +367,17 @@ static int print_seq(Obiview_p i_view, Obiview_p o_view,
// TODO add check for primer longer than MAX_PAT_LEN (32)
// Get sequence id
seq_id = obi_get_str_with_elt_idx_and_col_p_in_view(i_view, i_id_column, i_idx, 0);
// Get the taxon structure
main_taxon = obi_taxo_get_taxon_with_taxid(taxonomy, taxid);
if (main_taxon == NULL)
{
obidebug(1, "\nWarning: error reading the taxonomic information of a sequence. Seq id: %s, taxid: %d. Probably deprecated taxid. Skipping this sequence.", seq_id, taxid);
return 0;
}
ldelta = (pos1 <= keep_nucleotides)?pos1:keep_nucleotides;
rdelta = ((pos2+keep_nucleotides)>=seq_len)?seq_len-pos2:keep_nucleotides;
@ -431,16 +443,7 @@ static int print_seq(Obiview_p i_view, Obiview_p o_view,
if (isnan(tm2))
tm2 = OBIFloat_NA;
// Get the taxon structure
main_taxon = obi_taxo_get_taxon_with_taxid(taxonomy, taxid);
if (main_taxon == NULL)
{
obidebug(1, "\nError reading the taxonomic information of a sequence");
return -1;
}
// Write sequence id
seq_id = obi_get_str_with_elt_idx_and_col_p_in_view(i_view, i_id_column, i_idx, 0);
if (obi_set_str_with_elt_idx_and_col_p_in_view(o_view, o_id_column, o_idx, 0, seq_id) < 0)
{
obidebug(1, "\nError writing the sequence id");
@ -629,7 +632,7 @@ static int print_seq(Obiview_p i_view, Obiview_p o_view,
return -1;
}
return 0;
return 1;
}
@ -698,6 +701,7 @@ int obi_ecopcr(const char* i_dms_name,
obiint_t taxid;
char* sequence;
int printed;
SeqPtr apatseq=NULL;
int32_t o1Hits;
@ -1064,7 +1068,7 @@ int obi_ecopcr(const char* i_dms_name,
(!max_len || (length <= max_len)))
{
// Print the found amplicon
if (print_seq(i_view, o_view,
printed = print_seq(i_view, o_view,
i_idx, o_idx,
taxonomy,
sequence,
@ -1090,12 +1094,14 @@ int obi_ecopcr(const char* i_dms_name,
o_strand_column,
o_primer1_column, o_primer2_column,
o_error1_column, o_error2_column,
o_temp1_column, o_temp2_column) < 0)
o_temp1_column, o_temp2_column);
if (printed < 0)
{
obidebug(1, "\nError writing the ecopcr result");
return -1;
}
o_idx++;
else if (printed > 0)
o_idx++;
}
}
}
@ -1152,7 +1158,7 @@ int obi_ecopcr(const char* i_dms_name,
(!max_len || (length <= max_len)))
{
// Print the found amplicon
if (print_seq(i_view, o_view,
printed = print_seq(i_view, o_view,
i_idx, o_idx,
taxonomy,
sequence,
@ -1178,12 +1184,14 @@ int obi_ecopcr(const char* i_dms_name,
o_strand_column,
o_primer1_column, o_primer2_column,
o_error1_column, o_error2_column,
o_temp1_column, o_temp2_column) < 0)
o_temp1_column, o_temp2_column);
if (printed < 0)
{
obidebug(1, "\nError writing the ecopcr result");
return -1;
}
o_idx++;
else if (printed > 0)
o_idx++;
}
}
}

View File

@ -81,8 +81,8 @@
* @param o_dms_name The path to the output DMS.
* @param o_view_name The name of the output view.
* @param o_view_comments The comments to associate with the output view.
* @param primer1 The first primer.
* @param primer2 The second primer.
* @param primer1 The first primer, length must be less than or equal to 32 (because of apat lib limitation).
* @param primer2 The second primer, length must be less than or equal to 32 (because of apat lib limitation).
* @param error_max The maximum number of errors allowed per primer for amplification.
* @param min_len The minimum length of an amplicon.
* @param max_len The maximum length of an amplicon.

View File

@ -100,35 +100,35 @@ int print_assignment_result(Obiview_p output_view, index_t line,
static int create_output_columns(Obiview_p o_view)
{
// Score column
if (obi_view_add_column(o_view, ECOTAG_SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOTAG_SCORE_COLUMN_NAME, true) < 0)
if (obi_view_add_column(o_view, ECOTAG_SCORE_COLUMN_NAME, -1, NULL, OBI_FLOAT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{
obidebug(1, "\nError creating the column for the score in ecotag");
return -1;
}
// Assigned taxid column
if (obi_view_add_column(o_view, ECOTAG_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOTAG_TAXID_COLUMN_NAME, true) < 0)
if (obi_view_add_column(o_view, ECOTAG_TAXID_COLUMN_NAME, -1, NULL, OBI_INT, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{
obidebug(1, "\nError creating the column for the assigned taxid in ecotag");
return -1;
}
// Assigned scientific name column
if (obi_view_add_column(o_view, ECOTAG_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOTAG_NAME_COLUMN_NAME, true) < 0)
if (obi_view_add_column(o_view, ECOTAG_NAME_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{
obidebug(1, "\nError creating the column for the assigned scientific name in ecotag");
return -1;
}
// Assignement status column
if (obi_view_add_column(o_view, ECOTAG_STATUS_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, NULL, NULL, -1, ECOTAG_STATUS_COLUMN_NAME, true) < 0)
if (obi_view_add_column(o_view, ECOTAG_STATUS_COLUMN_NAME, -1, NULL, OBI_BOOL, 0, 1, NULL, false, false, false, NULL, NULL, -1, "{}", true) < 0)
{
obidebug(1, "\nError creating the column for the assignment status in ecotag");
return -1;
}
// Column for array of best match ids
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, true, false, NULL, NULL, -1, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, true) < 0)
if (obi_view_add_column(o_view, ECOTAG_BEST_MATCH_IDS_COLUMN_NAME, -1, NULL, OBI_STR, 0, 1, NULL, false, true, false, NULL, NULL, -1, "{}", true) < 0)
{
obidebug(1, "\nError creating the column for the array of ids of the best match in ecotag");
return -1;

View File

@ -696,6 +696,12 @@ int obi_clean_dms(const char* dms_path)
// return -1;
// }
if (obi_close_dms(dms, true) < 0)
{
obidebug(1, "\nError closing a DMS after cleaning");
return -1;
}
return 0;
}

View File

@ -2376,9 +2376,10 @@ int read_merged_dmp(const char* taxdump, OBIDMS_taxonomy_p tax, int32_t* delnode
// and the deleted taxids with no current reference. An element of the list is composed of the taxid, and the index
// of the taxon in the taxa structure, or -1 for deleted taxids.
// Creating the merged list requires to merge the 3 ordered lists into one.
while (((nT < (tax->taxa)->count) && ((tax->taxa)->taxon[nT].taxid < old_taxid)) || ((nD >= 0) && (delnodes[nD] < old_taxid)))
while (((nT < (tax->taxa)->count) && ((tax->taxa)->taxon[nT].taxid < old_taxid)) ||
((nD >= 0) && (delnodes[nD] < old_taxid)))
{
if ((tax->taxa)->taxon[nT].taxid < delnodes[nD])
if ((nT < (tax->taxa)->count) && (tax->taxa)->taxon[nT].taxid < delnodes[nD])
{ // Add element from taxa list
// Enlarge structure if needed
if (n == buffer_size)
@ -2401,7 +2402,7 @@ int read_merged_dmp(const char* taxdump, OBIDMS_taxonomy_p tax, int32_t* delnode
nT++;
n++;
}
else if (delnodes[nD] < (tax->taxa)->taxon[nT].taxid)
else
{ // Add element from deleted taxids list
// Enlarge structure if needed
if (n == buffer_size)
@ -3036,12 +3037,12 @@ OBIDMS_taxonomy_p obi_read_taxonomy(OBIDMS_p dms, const char* taxonomy_name, boo
strcpy(tax->tax_name, taxonomy_name);
buffer_size = 2048;
taxonomy_path = get_taxonomy_path(dms, taxonomy_name);
if (taxonomy_path == NULL)
return NULL;
buffer_size = strlen(taxonomy_path) + strlen(taxonomy_name) + 6;
// Read ranks
ranks_file_name = (char*) malloc(buffer_size*sizeof(char));
if (ranks_file_name == NULL)

View File

@ -1973,7 +1973,7 @@ int obi_enlarge_column(OBIDMS_column_p column)
// Calculate the new file size
old_line_count = (column->header)->line_count;
new_line_count = old_line_count * COLUMN_GROWTH_FACTOR;
new_line_count = ceil((double) old_line_count * (double) COLUMN_GROWTH_FACTOR);
if (new_line_count > MAXIMUM_LINE_COUNT)
{
@ -2381,6 +2381,54 @@ char* obi_get_elements_names(OBIDMS_column_p column)
}
char* obi_get_formatted_elements_names(OBIDMS_column_p column)
{
char* elements_names;
int i, j;
int elt_idx;
int len;
elements_names = (char*) malloc(((column->header)->elements_names_length + (column->header)->nb_elements_per_line) * sizeof(char));
if (elements_names == NULL)
{
obi_set_errno(OBI_MALLOC_ERROR);
obidebug(1, "\nError allocating memory for elements names");
return NULL;
}
j = 0;
for (i=0; i < (column->header)->nb_elements_per_line; i++)
{
elt_idx = ((column->header)->elements_names_idx)[i];
len = strlen(((column->header)->elements_names)+elt_idx);
memcpy(elements_names+j, ((column->header)->elements_names)+elt_idx, len*sizeof(char));
j = j + len;
elements_names[j] = ';';
j++;
elements_names[j] = ' ';
j++;
}
elements_names[j - 1] = '\0';
return elements_names;
}
char* obi_column_formatted_infos(OBIDMS_column_p column)
{
char* column_infos;
char* elt_names;
column_infos = malloc(1024 * sizeof(char));
elt_names = obi_get_formatted_elements_names(column);
free(elt_names);
return column_infos;
}
int obi_column_prepare_to_set_value(OBIDMS_column_p column, index_t line_nb, index_t elt_idx)
{

View File

@ -34,7 +34,7 @@
#define NB_ELTS_MAX_IF_DEFAULT_NAME (1000000) /**< The maximum number of elements per line if the default element names
* are used ("0\01\02\0...\0n"), considering ELEMENTS_NAMES_MAX. // TODO not up to date
*/
#define COLUMN_GROWTH_FACTOR (2) /**< The growth factor when a column is enlarged.
#define COLUMN_GROWTH_FACTOR (1.3) /**< The growth factor when a column is enlarged.
*/
#define MAXIMUM_LINE_COUNT (1000000000) /**< The maximum line count for the data of a column (1E9). //TODO
*/
@ -505,6 +505,14 @@ index_t obi_column_get_element_index_from_name(OBIDMS_column_p column, const cha
char* obi_get_elements_names(OBIDMS_column_p column);
// TODO
//char* obi_get_formatted_elements_names(OBIDMS_column_p column);
// TODO
//char* obi_column_formatted_infos(OBIDMS_column_p column);
/**
* @brief Prepares a column to set a value.
*

View File

@ -34,8 +34,8 @@
* @brief enum for the boolean OBIType.
*/
typedef enum OBIBool {
FALSE = 0,
TRUE = 1,
OBIFalse = 0,
OBITrue = 1,
OBIBool_NA = 2
} obibool_t, *obibool_p; /**< a boolean true/false value */ // TODO check name convention?

View File

@ -1407,7 +1407,7 @@ static char* view_check_qual_match_seqs(Obiview_p view)
// Test that the lengths of the quality and the sequence are equal
if ((size_t)qual_len != strlen(seq))
{
obidebug(1, "\nError checking the predicate for view %s: The sequences and sequence quality arrays match.", (view->infos)->name);
obidebug(1, "\nError checking the predicate for view %s: The sequences and sequence quality arrays match (index %lld, seq=%s, quality length = %d).", (view->infos)->name, j, seq, qual_len);
return NULL;
}
}
@ -2380,11 +2380,12 @@ int obi_view_add_column(Obiview_p view,
}
int obi_view_delete_column(Obiview_p view, const char* column_name)
int obi_view_delete_column(Obiview_p view, const char* column_name, bool delete_file)
{
int i;
bool found;
OBIDMS_column_p column;
char* col_to_delete_path;
// Check that the view is not read-only
if (view->read_only)
@ -2406,8 +2407,31 @@ int obi_view_delete_column(Obiview_p view, const char* column_name)
obidebug(1, "\nError getting a column from the linked list of column pointers of a view when deleting a column from a view");
return -1;
}
// Keep column path if need to delete the file
if (delete_file)
{
col_to_delete_path = obi_column_full_path(view->dms, column->header->name, column->header->version);
if (col_to_delete_path == NULL)
{
obidebug(1, "\nError getting a column file path when deleting a column");
return -1;
}
}
obi_close_column(column);
// Delete file if needed
if (delete_file)
{
if (remove(col_to_delete_path) < 0)
{
obi_set_errno(OBICOL_UNKNOWN_ERROR);
obidebug(1, "\nError deleting a column file when deleting unfinished columns: file %s", col_to_delete_path);
return -1;
}
free(col_to_delete_path);
}
view->columns = ll_delete(view->columns, i);
// TODO how do we check for error? NULL can be empty list
found = true;
@ -3047,7 +3071,7 @@ int obi_create_auto_id_column(Obiview_p view, const char* prefix)
// Delete old ID column if it exists
if (obi_view_get_column(view, ID_COLUMN) != NULL)
{
if (obi_view_delete_column(view, ID_COLUMN) < 0)
if (obi_view_delete_column(view, ID_COLUMN, false) < 0)
{
obidebug(1, "Error deleting an ID column to replace it in a view");
return -1;

View File

@ -52,6 +52,15 @@
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
* in NUC_SEQS_VIEW views.
*/
#define REVERSE_QUALITY_COLUMN "REVERSE_QUALITY" /**< The name of the column containing the sequence qualities
* of the reverse read (generated by ngsfilter, used by alignpairedend).
*/
#define REVERSE_SEQUENCE_COLUMN "REVERSE_SEQUENCE" /**< The name of the column containing the sequence
* of the reverse read (generated by ngsfilter, used by alignpairedend).
*/
#define QUALITY_COLUMN "QUALITY" /**< The name of the column containing the sequence qualities
* in NUC_SEQS_VIEW views.
*/
#define COUNT_COLUMN "COUNT" /**< The name of the column containing the sequence counts
* in NUC_SEQS_VIEW views.
*/
@ -431,6 +440,7 @@ int obi_view_add_column(Obiview_p view,
*
* @param view A pointer on the view.
* @param column_name The name of the column that should be deleted from the view.
* @param delete_file Whether the column file should be deleted. Use carefully re: dependencies.
*
* @returns A value indicating the success of the operation.
* @retval 0 if the operation was successfully completed.
@ -439,7 +449,7 @@ int obi_view_add_column(Obiview_p view,
* @since February 2016
* @author Celine Mercier (celine.mercier@metabarcoding.org)
*/
int obi_view_delete_column(Obiview_p view, const char* column_name);
int obi_view_delete_column(Obiview_p view, const char* column_name, bool delete_file);
/**

View File

@ -951,15 +951,15 @@ double generic_sse_banded_lcs_align(char* seq1, char* seq2, double threshold, bo
// Put the DNA sequences in the int arrays. Longest sequence must be first argument of sse_align function
if (l2 > l1)
{
putSeqInSeq(iseq1, seq2, l2, TRUE);
putSeqInSeq(iseq2, seq1, l1, FALSE);
putSeqInSeq(iseq1, seq2, l2, true);
putSeqInSeq(iseq2, seq1, l1, false);
// Compute alignment
id = sse_banded_lcs_align(iseq1, iseq2, l2, l1, normalize, reference, similarity_mode, address, LCSmin, lcs_length, ali_length);
}
else
{
putSeqInSeq(iseq1, seq1, l1, TRUE);
putSeqInSeq(iseq2, seq2, l2, FALSE);
putSeqInSeq(iseq1, seq1, l1, true);
putSeqInSeq(iseq2, seq2, l2, false);
// Compute alignment
id = sse_banded_lcs_align(iseq1, iseq2, l1, l2, normalize, reference, similarity_mode, address, LCSmin, lcs_length, ali_length);
}
@ -1054,15 +1054,15 @@ double obiblob_sse_banded_lcs_align(Obi_blob_p seq1, Obi_blob_p seq2, double thr
// Put the DNA sequences in the int arrays. Longest sequence must be first argument of sse_align function
if (l2 > l1)
{
putBlobInSeq(iseq1, seq2, l2, TRUE);
putBlobInSeq(iseq2, seq1, l1, FALSE);
putBlobInSeq(iseq1, seq2, l2, true);
putBlobInSeq(iseq2, seq1, l1, false);
// Compute alignment
id = sse_banded_lcs_align(iseq1, iseq2, l2, l1, normalize, reference, similarity_mode, address, LCSmin, lcs_length, ali_length);
}
else
{
putBlobInSeq(iseq1, seq1, l1, TRUE);
putBlobInSeq(iseq2, seq2, l2, FALSE);
putBlobInSeq(iseq1, seq1, l1, true);
putBlobInSeq(iseq2, seq2, l2, false);
// Compute alignment
id = sse_banded_lcs_align(iseq1, iseq2, l1, l2, normalize, reference, similarity_mode, address, LCSmin, lcs_length, ali_length);
}