Compare commits
13 Commits
Author | SHA1 | Date | |
---|---|---|---|
d579bb2749 | |||
10e5ebdbc0 | |||
8833110490 | |||
bd38449f2d | |||
904823c827 | |||
af68a1024c | |||
425fe25bd2 | |||
d48aed38d4 | |||
5e32f8523e | |||
8f1d94fd24 | |||
38f42cb0fb | |||
7f0f63cf26 | |||
cba78111c9 |
@ -205,19 +205,25 @@ def run(config):
|
||||
if type(entries) == list:
|
||||
forward = entries[0]
|
||||
reverse = entries[1]
|
||||
aligner = Kmer_similarity(forward, \
|
||||
view2=reverse, \
|
||||
kmer_size=config['alignpairedend']['kmersize'], \
|
||||
reversed_column=None)
|
||||
if len(forward) == 0 or len(reverse) == 0:
|
||||
aligner = None
|
||||
else:
|
||||
aligner = Kmer_similarity(forward, \
|
||||
view2=reverse, \
|
||||
kmer_size=config['alignpairedend']['kmersize'], \
|
||||
reversed_column=None)
|
||||
else:
|
||||
aligner = Kmer_similarity(entries, \
|
||||
column2=entries[REVERSE_SEQUENCE_COLUMN], \
|
||||
qual_column2=entries[REVERSE_QUALITY_COLUMN], \
|
||||
kmer_size=config['alignpairedend']['kmersize'], \
|
||||
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool
|
||||
if len(entries) == 0:
|
||||
aligner = None
|
||||
else:
|
||||
aligner = Kmer_similarity(entries, \
|
||||
column2=entries[REVERSE_SEQUENCE_COLUMN], \
|
||||
qual_column2=entries[REVERSE_QUALITY_COLUMN], \
|
||||
kmer_size=config['alignpairedend']['kmersize'], \
|
||||
reversed_column=entries[b'reversed']) # column created by the ngsfilter tool
|
||||
|
||||
ba = alignmentIterator(entries, aligner)
|
||||
|
||||
|
||||
i = 0
|
||||
for ali in ba:
|
||||
|
||||
@ -251,7 +257,7 @@ def run(config):
|
||||
pb(i, force=True)
|
||||
print("", file=sys.stderr)
|
||||
|
||||
if kmer_ali :
|
||||
if kmer_ali and aligner is not None:
|
||||
aligner.free()
|
||||
|
||||
# Save command config in View and DMS comments
|
||||
|
@ -322,7 +322,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
||||
sequences[0] = sequences[0][directmatch[1][2]:]
|
||||
else:
|
||||
sequences[1] = sequences[1][directmatch[1][2]:]
|
||||
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
||||
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
||||
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
||||
|
||||
if directmatch[0].forward:
|
||||
@ -369,7 +369,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
||||
sequences[0] = sequences[0][:r[1]]
|
||||
else:
|
||||
sequences[1] = sequences[1][:r[1]]
|
||||
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
||||
sequences[0][REVERSE_SEQUENCE_COLUMN] = sequences[1].seq # used by alignpairedend tool
|
||||
sequences[0][REVERSE_QUALITY_COLUMN] = sequences[1].quality # used by alignpairedend tool
|
||||
# do the same on the other seq
|
||||
if first_match_first_seq:
|
||||
@ -394,7 +394,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
||||
seq_to_match = sequences[0]
|
||||
reversematch = []
|
||||
# Compute begin
|
||||
begin=directmatch[1][2]+1 # end of match + 1 on the same sequence
|
||||
#begin=directmatch[1][2]+1 # end of match + 1 on the same sequence -- No, already cut out forward primer
|
||||
# Try reverse matching on the other sequence:
|
||||
new_seq = True
|
||||
pattern = 0
|
||||
@ -408,7 +408,7 @@ cdef tuple annotate(sequences, infos, no_tags, verbose=False):
|
||||
primer=p
|
||||
# Saving original primer as 4th member of the tuple to serve as correct key in infos dict even if it might have been reversed complemented
|
||||
# (3rd member already used by directmatch)
|
||||
reversematch.append((primer, primer(seq_to_match, same_sequence=not new_seq, pattern=pattern, begin=begin), None, p))
|
||||
reversematch.append((primer, primer(seq_to_match, same_sequence=not new_seq, pattern=pattern, begin=0), None, p))
|
||||
new_seq = False
|
||||
pattern+=1
|
||||
# Choose match closer to the end of the sequence
|
||||
@ -645,6 +645,7 @@ def run(config):
|
||||
|
||||
g = 0
|
||||
u = 0
|
||||
i = 0
|
||||
no_tags = config['ngsfilter']['notags']
|
||||
try:
|
||||
for i in range(entries_len):
|
||||
|
@ -301,8 +301,8 @@ def fill_column(config, infos, col) :
|
||||
def create_random_column(config, infos) :
|
||||
alias = random.choice([b'', random_unique_name(infos)])
|
||||
tuples = random.choice([True, False])
|
||||
dict_column = False
|
||||
if not tuples :
|
||||
dict_column = random.choice([True, False])
|
||||
nb_elements_per_line=random.randint(1, config['test']['maxelts'])
|
||||
if nb_elements_per_line > 1:
|
||||
dict_column = True
|
||||
@ -311,7 +311,6 @@ def create_random_column(config, infos) :
|
||||
elements_names.append(random_unique_element_name(config, infos))
|
||||
elements_names = random.choice([None, elements_names])
|
||||
else :
|
||||
dict_column = False
|
||||
nb_elements_per_line = 1
|
||||
elements_names = None
|
||||
name = random_unique_name(infos)
|
||||
|
@ -354,8 +354,8 @@ cdef uniq_sequences(View_NUC_SEQS view, View_NUC_SEQS o_view, ProgressBar pb, di
|
||||
key = mergedKeys[k]
|
||||
merged_col_name = mergedKeys_m[k]
|
||||
|
||||
if merged_infos[merged_col_name]['nb_elts'] == 1:
|
||||
raise Exception("Can't merge information from a tag with only one element (e.g. one sample ; don't use -m option)")
|
||||
# if merged_infos[merged_col_name]['nb_elts'] == 1:
|
||||
# raise Exception("Can't merge information from a tag with only one element (e.g. one sample ; don't use -m option)")
|
||||
|
||||
if merged_col_name in view:
|
||||
i_col = view[merged_col_name]
|
||||
|
@ -8,7 +8,7 @@ Created on feb 20th 2018
|
||||
|
||||
import types
|
||||
from obitools3.utils cimport __etag__
|
||||
|
||||
from obitools3.utils cimport str2bytes
|
||||
|
||||
def tabIterator(lineiterator,
|
||||
bint header = False,
|
||||
@ -75,7 +75,7 @@ def tabIterator(lineiterator,
|
||||
continue
|
||||
else:
|
||||
# TODO ??? default column names? like R?
|
||||
keys = [i for i in range(len(line.split(sep)))]
|
||||
keys = [str2bytes(str(i)) for i in range(len(line.split(sep)))]
|
||||
|
||||
while skipped < skip :
|
||||
line = next(iterator)
|
||||
|
@ -53,7 +53,11 @@ def entryIteratorFactory(lineiterator,
|
||||
|
||||
i = iterator
|
||||
|
||||
first=next(i)
|
||||
try:
|
||||
first=next(i)
|
||||
except StopIteration:
|
||||
first=""
|
||||
pass
|
||||
|
||||
format=b"tabular"
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
major = 3
|
||||
minor = 0
|
||||
serial= '0b43'
|
||||
serial= '1b4'
|
||||
|
||||
version ="%d.%d.%s" % (major,minor,serial)
|
||||
|
128
src/obi_clean.c
128
src/obi_clean.c
@ -229,6 +229,8 @@ int obi_clean(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
seq_count = (i_view->infos)->line_count;
|
||||
|
||||
// Open the sequence column
|
||||
if (strcmp((i_view->infos)->view_type, VIEW_TYPE_NUC_SEQS) == 0)
|
||||
iseq_column = obi_view_get_column(i_view, NUC_SEQUENCE_COLUMN);
|
||||
@ -245,7 +247,7 @@ int obi_clean(const char* dms_name,
|
||||
}
|
||||
|
||||
// Open the sample column if there is one
|
||||
if ((strcmp(sample_column_name, "") == 0) || (sample_column_name == NULL))
|
||||
if ((strcmp(sample_column_name, "") == 0) || (sample_column_name == NULL) || (seq_count == 0))
|
||||
{
|
||||
fprintf(stderr, "Info: No sample information provided, assuming one sample.\n");
|
||||
sample_column = obi_view_get_column(i_view, COUNT_COLUMN);
|
||||
@ -340,66 +342,67 @@ int obi_clean(const char* dms_name,
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Build kmer tables
|
||||
ktable = hash_seq_column(i_view, iseq_column, 0);
|
||||
if (ktable == NULL)
|
||||
if (seq_count > 0)
|
||||
{
|
||||
obi_set_errno(OBI_CLEAN_ERROR);
|
||||
obidebug(1, "\nError building kmer tables before aligning");
|
||||
return -1;
|
||||
}
|
||||
// Build kmer tables
|
||||
ktable = hash_seq_column(i_view, iseq_column, 0);
|
||||
if (ktable == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_CLEAN_ERROR);
|
||||
obidebug(1, "\nError building kmer tables before aligning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
seq_count = (i_view->infos)->line_count;
|
||||
|
||||
// Allocate arrays for sample counts otherwise reading in mapped files takes longer
|
||||
complete_sample_count_array = (int*) malloc(seq_count * sample_count * sizeof(int));
|
||||
if (complete_sample_count_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the array of sample counts, size: %lld", seq_count * sample_count * sizeof(int));
|
||||
return -1;
|
||||
}
|
||||
for (samp=0; samp < sample_count; samp++)
|
||||
{
|
||||
for (k=0; k<seq_count; k++)
|
||||
complete_sample_count_array[k+(samp*seq_count)] = obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp);
|
||||
}
|
||||
|
||||
// Allocate arrays for blobs otherwise reading in mapped files takes longer
|
||||
blob_array = (Obi_blob_p*) malloc(seq_count * sizeof(Obi_blob_p));
|
||||
if (blob_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the array of blobs");
|
||||
return -1;
|
||||
}
|
||||
for (k=0; k<seq_count; k++)
|
||||
{
|
||||
blob_array[k] = obi_get_blob_with_elt_idx_and_col_p_in_view(i_view, iseq_column, k, 0);
|
||||
}
|
||||
|
||||
// Allocate alignment result array (byte at 0 if not aligned yet,
|
||||
// 1 if sequence at index has a similarity above the threshold with the current sequence,
|
||||
// 2 if sequence at index has a similarity below the threshold with the current sequence)
|
||||
alignment_result_array = (byte_t*) calloc(seq_count, sizeof(byte_t));
|
||||
if (alignment_result_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for alignment result array");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Initialize all sequences to singletons or NA if no sequences in that sample
|
||||
for (k=0; k<seq_count; k++)
|
||||
{
|
||||
// Allocate arrays for sample counts otherwise reading in mapped files takes longer
|
||||
complete_sample_count_array = (int*) malloc(seq_count * sample_count * sizeof(int));
|
||||
if (complete_sample_count_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the array of sample counts, size: %lld", seq_count * sample_count * sizeof(int));
|
||||
return -1;
|
||||
}
|
||||
for (samp=0; samp < sample_count; samp++)
|
||||
{
|
||||
if (obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp) != OBIInt_NA) // Only initialize samples where there are some sequences
|
||||
for (k=0; k<seq_count; k++)
|
||||
complete_sample_count_array[k+(samp*seq_count)] = obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp);
|
||||
}
|
||||
|
||||
// Allocate arrays for blobs otherwise reading in mapped files takes longer
|
||||
blob_array = (Obi_blob_p*) malloc(seq_count * sizeof(Obi_blob_p));
|
||||
if (blob_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for the array of blobs");
|
||||
return -1;
|
||||
}
|
||||
for (k=0; k<seq_count; k++)
|
||||
{
|
||||
blob_array[k] = obi_get_blob_with_elt_idx_and_col_p_in_view(i_view, iseq_column, k, 0);
|
||||
}
|
||||
|
||||
// Allocate alignment result array (byte at 0 if not aligned yet,
|
||||
// 1 if sequence at index has a similarity above the threshold with the current sequence,
|
||||
// 2 if sequence at index has a similarity below the threshold with the current sequence)
|
||||
alignment_result_array = (byte_t*) calloc(seq_count, sizeof(byte_t));
|
||||
if (alignment_result_array == NULL)
|
||||
{
|
||||
obi_set_errno(OBI_MALLOC_ERROR);
|
||||
obidebug(1, "\nError allocating memory for alignment result array");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Initialize all sequences to singletons or NA if no sequences in that sample
|
||||
for (k=0; k<seq_count; k++)
|
||||
{
|
||||
for (samp=0; samp < sample_count; samp++)
|
||||
{
|
||||
if (obi_set_char_with_elt_idx_and_col_p_in_view(o_view, status_column, k, samp, 's') < 0)
|
||||
if (obi_get_int_with_elt_idx_and_col_p_in_view(i_view, sample_column, k, samp) != OBIInt_NA) // Only initialize samples where there are some sequences
|
||||
{
|
||||
obidebug(1, "\nError initializing all sequences to singletons");
|
||||
return -1;
|
||||
if (obi_set_char_with_elt_idx_and_col_p_in_view(o_view, status_column, k, samp, 's') < 0)
|
||||
{
|
||||
obidebug(1, "\nError initializing all sequences to singletons");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -551,17 +554,20 @@ int obi_clean(const char* dms_name,
|
||||
}
|
||||
}
|
||||
|
||||
free_kmer_tables(ktable, seq_count);
|
||||
free(complete_sample_count_array);
|
||||
free(blob_array);
|
||||
free(alignment_result_array);
|
||||
if (seq_count > 0)
|
||||
{
|
||||
free_kmer_tables(ktable, seq_count);
|
||||
free(complete_sample_count_array);
|
||||
free(blob_array);
|
||||
free(alignment_result_array);
|
||||
}
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
|
||||
if (stop)
|
||||
return -1;
|
||||
|
||||
if (heads_only)
|
||||
if (heads_only && (seq_count > 0))
|
||||
{
|
||||
line_selection = malloc((((o_view->infos)->line_count) + 1) * sizeof(index_t));
|
||||
if (line_selection == NULL)
|
||||
@ -635,7 +641,7 @@ int obi_clean(const char* dms_name,
|
||||
}
|
||||
|
||||
// Flag the end of the line selection
|
||||
if (heads_only)
|
||||
if (heads_only && (seq_count > 0))
|
||||
line_selection[l] = -1;
|
||||
|
||||
// Create new view with line selection if heads only
|
||||
|
117
src/obiavl.c
117
src/obiavl.c
@ -582,6 +582,7 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
||||
{
|
||||
size_t file_size;
|
||||
size_t new_data_size;
|
||||
size_t header_size;
|
||||
double multiple;
|
||||
int file_descriptor;
|
||||
|
||||
@ -589,6 +590,8 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
||||
multiple = ceil((double) (ONE_IF_ZERO((avl->header)->nb_items * sizeof(AVL_node_t))) / (double) getpagesize());
|
||||
new_data_size = ((size_t) multiple) * getpagesize();
|
||||
|
||||
header_size = (avl->header)->header_size;
|
||||
|
||||
// Check that it is actually greater than the current size of the file, otherwise no need to truncate
|
||||
if ((avl->header)->avl_size == new_data_size)
|
||||
return 0;
|
||||
@ -596,16 +599,22 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
||||
// Get the file descriptor
|
||||
file_descriptor = avl->avl_fd;
|
||||
|
||||
// Unmap the tree before truncating the file
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(avl->tree, (avl->header)->avl_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the tree of an AVL before truncating");
|
||||
return -1;
|
||||
}
|
||||
if (munmap(avl->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the tree of an AVL before truncating");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Truncate the file
|
||||
file_size = (avl->header)->header_size + new_data_size;
|
||||
file_size = header_size + new_data_size;
|
||||
if (ftruncate(file_descriptor, file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
@ -613,7 +622,22 @@ int truncate_avl_to_size_used(OBIDMS_avl_p avl) // TODO is it necessary to unmap
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the data
|
||||
// Remap the header and the data
|
||||
|
||||
avl->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
file_descriptor,
|
||||
0
|
||||
);
|
||||
if (avl->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError re-mmapping the header of an AVL after truncating");
|
||||
return -1;
|
||||
}
|
||||
|
||||
avl->tree = mmap(NULL,
|
||||
new_data_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
@ -640,6 +664,7 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
||||
{
|
||||
size_t file_size;
|
||||
index_t new_data_size;
|
||||
size_t header_size;
|
||||
double multiple;
|
||||
int file_descriptor;
|
||||
|
||||
@ -647,6 +672,8 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
||||
multiple = ceil((double) (ONE_IF_ZERO((avl_data->header)->data_size_used)) / (double) getpagesize());
|
||||
new_data_size = ((index_t) multiple) * getpagesize();
|
||||
|
||||
header_size = (avl_data->header)->header_size;
|
||||
|
||||
// Check that it is actually greater than the current size of the file, otherwise no need to truncate
|
||||
if ((avl_data->header)->data_size_max >= new_data_size)
|
||||
return 0;
|
||||
@ -654,7 +681,8 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
||||
// Get the file descriptor
|
||||
file_descriptor = avl_data->data_fd;
|
||||
|
||||
// Unmap the data before truncating the file
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
|
||||
if (munmap(avl_data->data, (avl_data->header)->data_size_max) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
@ -662,8 +690,15 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (munmap(avl_data->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the header of an AVL before truncating");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Truncate the file
|
||||
file_size = (avl_data->header)->header_size + new_data_size;
|
||||
file_size = header_size + new_data_size;
|
||||
if (ftruncate(file_descriptor, file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
@ -672,6 +707,22 @@ int truncate_avl_data_to_size_used(OBIDMS_avl_data_p avl_data) // TODO is it nec
|
||||
}
|
||||
|
||||
// Remap the data
|
||||
|
||||
avl_data->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
file_descriptor,
|
||||
0
|
||||
);
|
||||
|
||||
if (avl_data->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError re-mmapping the header of an AVL after truncating");
|
||||
return -1;
|
||||
}
|
||||
|
||||
avl_data->data = mmap(NULL,
|
||||
new_data_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
@ -710,6 +761,20 @@ int grow_avl(OBIDMS_avl_p avl) // TODO Lock when needed
|
||||
header_size = (avl->header)->header_size;
|
||||
file_size = header_size + new_data_size;
|
||||
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(avl->tree, old_data_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the tree of an AVL tree file before enlarging");
|
||||
return -1;
|
||||
}
|
||||
if (munmap(avl->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the header of an AVL tree file before enlarging");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Enlarge the file
|
||||
if (ftruncate(avl_file_descriptor, file_size) < 0)
|
||||
{
|
||||
@ -718,12 +783,20 @@ int grow_avl(OBIDMS_avl_p avl) // TODO Lock when needed
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap and re-map the data
|
||||
// Re-map
|
||||
|
||||
if (munmap(avl->tree, old_data_size) < 0)
|
||||
avl->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
avl_file_descriptor,
|
||||
0
|
||||
);
|
||||
|
||||
if (avl->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the tree of an AVL tree file before enlarging");
|
||||
obidebug(1, "\nError re-mmapping the header of an AVL tree file after enlarging the file");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -768,6 +841,20 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
||||
header_size = (avl_data->header)->header_size;
|
||||
file_size = header_size + new_data_size;
|
||||
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(avl_data->data, old_data_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the data of an AVL tree data file before enlarging");
|
||||
return -1;
|
||||
}
|
||||
if (munmap(avl_data->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the header of an AVL tree data file before enlarging");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Enlarge the file
|
||||
if (ftruncate(avl_data_file_descriptor, file_size) < 0)
|
||||
{
|
||||
@ -776,12 +863,19 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap and re-map the data
|
||||
// Re-map
|
||||
|
||||
if (munmap(avl_data->data, old_data_size) < 0)
|
||||
avl_data->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
avl_data_file_descriptor,
|
||||
0
|
||||
);
|
||||
if (avl_data->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
obidebug(1, "\nError munmapping the data of an AVL tree data file before enlarging");
|
||||
obidebug(1, "\nError re-mmapping the header of an AVL tree data file after enlarging the file");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -792,7 +886,6 @@ int grow_avl_data(OBIDMS_avl_data_p avl_data) // TODO Lock when needed
|
||||
avl_data_file_descriptor,
|
||||
header_size
|
||||
);
|
||||
|
||||
if (avl_data->data == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBI_AVL_ERROR);
|
||||
|
19
src/obidms.c
19
src/obidms.c
@ -316,6 +316,15 @@ static int enlarge_infos_file(OBIDMS_p dms, size_t new_size)
|
||||
multiple = ceil((double) new_size / (double) getpagesize());
|
||||
rounded_new_size = multiple * getpagesize();
|
||||
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(dms->infos, (dms->infos)->file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBIDMS_UNKNOWN_ERROR);
|
||||
obidebug(1, "\nError munmapping a DMS information file when enlarging");
|
||||
close(infos_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Enlarge the file
|
||||
if (ftruncate(infos_file_descriptor, rounded_new_size) < 0)
|
||||
{
|
||||
@ -325,15 +334,7 @@ static int enlarge_infos_file(OBIDMS_p dms, size_t new_size)
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap and remap the file
|
||||
if (munmap(dms->infos, (dms->infos)->file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBIDMS_UNKNOWN_ERROR);
|
||||
obidebug(1, "\nError munmapping a DMS information file when enlarging");
|
||||
close(infos_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the file
|
||||
dms->infos = mmap(NULL,
|
||||
rounded_new_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
|
@ -40,7 +40,7 @@
|
||||
*/
|
||||
#define MAX_NB_OPENED_INDEXERS (1000) /**< The maximum number of indexers open at the same time.
|
||||
*/
|
||||
#define MAX_PATH_LEN (1024) /**< Maximum length for the character string defining a
|
||||
#define MAX_PATH_LEN (2048) /**< Maximum length for the character string defining a
|
||||
* file or directory path.
|
||||
*/
|
||||
|
||||
|
@ -1769,6 +1769,7 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
{
|
||||
size_t file_size;
|
||||
size_t data_size;
|
||||
size_t header_size;
|
||||
index_t new_line_count;
|
||||
double multiple;
|
||||
int column_file_descriptor;
|
||||
@ -1791,6 +1792,8 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
|
||||
data_size = obi_array_sizeof((column->header)->stored_data_type, new_line_count, (column->header)->nb_elements_per_line);
|
||||
|
||||
header_size = (column->header)->header_size;
|
||||
|
||||
// Check that it is actually greater than the current data size, otherwise no need to truncate
|
||||
if ((column->header)->data_size == data_size)
|
||||
return 0;
|
||||
@ -1855,7 +1858,7 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap the data before truncating the file
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(column->data, (column->header)->data_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||
@ -1863,9 +1866,16 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
close(column_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
if (munmap(column->header, header_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||
obidebug(1, "\nError munmapping the header of a column before truncating");
|
||||
close(column_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Truncate the column file
|
||||
file_size = (column->header)->header_size + data_size;
|
||||
file_size = header_size + data_size;
|
||||
if (ftruncate(column_file_descriptor, file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||
@ -1874,13 +1884,30 @@ int obi_truncate_column(OBIDMS_column_p column) // TODO is it necessary to unmap
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the data
|
||||
// Remap the header and the data
|
||||
|
||||
column->header = mmap(NULL,
|
||||
header_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
column_file_descriptor,
|
||||
0
|
||||
);
|
||||
|
||||
if (column->header == MAP_FAILED)
|
||||
{
|
||||
obi_set_errno(OBICOL_UNKNOWN_ERROR);
|
||||
obidebug(1, "\nError re-mmapping the header of a column after truncating");
|
||||
close(column_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
column->data = mmap(NULL,
|
||||
data_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
column_file_descriptor,
|
||||
(column->header)->header_size
|
||||
header_size
|
||||
);
|
||||
|
||||
if (column->data == MAP_FAILED)
|
||||
|
@ -638,6 +638,15 @@ static int enlarge_view_file(Obiview_p view, size_t new_size)
|
||||
multiple = ceil((double) new_size / (double) getpagesize());
|
||||
rounded_new_size = multiple * getpagesize();
|
||||
|
||||
// Unmap the entire file before truncating it (WSL requirement)
|
||||
if (munmap(view->infos, (view->infos)->file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBIVIEW_ERROR);
|
||||
obidebug(1, "\nError munmapping a view file when enlarging");
|
||||
close(obiview_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Enlarge the file
|
||||
if (ftruncate(obiview_file_descriptor, rounded_new_size) < 0)
|
||||
{
|
||||
@ -647,15 +656,7 @@ static int enlarge_view_file(Obiview_p view, size_t new_size)
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unmap and remap the file
|
||||
if (munmap(view->infos, (view->infos)->file_size) < 0)
|
||||
{
|
||||
obi_set_errno(OBIVIEW_ERROR);
|
||||
obidebug(1, "\nError munmapping a view file when enlarging");
|
||||
close(obiview_file_descriptor);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the file
|
||||
view->infos = mmap(NULL,
|
||||
rounded_new_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
|
Reference in New Issue
Block a user