mirror of
https://github.com/metabarcoding/obitools4.git
synced 2025-06-29 16:20:46 +00:00
Accelerate the speed of very long fasta sequences, and more generaly of every format
This commit is contained in:
BIN
obitests/obitools/obiconvert/gbpln1088.4Mb.fasta.gz
Normal file
BIN
obitests/obitools/obiconvert/gbpln1088.4Mb.fasta.gz
Normal file
Binary file not shown.
@ -13,6 +13,11 @@ CMD=obiconvert
|
|||||||
#
|
#
|
||||||
######
|
######
|
||||||
TEST_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"
|
TEST_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"
|
||||||
|
|
||||||
|
if [ -z "$TEST_DIR" ] ; then
|
||||||
|
TEST_DIR="."
|
||||||
|
fi
|
||||||
|
|
||||||
OBITOOLS_DIR="${TEST_DIR/obitest*/}build"
|
OBITOOLS_DIR="${TEST_DIR/obitest*/}build"
|
||||||
export PATH="${OBITOOLS_DIR}:${PATH}"
|
export PATH="${OBITOOLS_DIR}:${PATH}"
|
||||||
|
|
||||||
@ -99,6 +104,36 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
((ntest++))
|
||||||
|
if obiconvert -Z "${TEST_DIR}/gbpln1088.4Mb.fasta.gz" \
|
||||||
|
> "${TMPDIR}/xxx.fasta.gz" && \
|
||||||
|
zdiff "${TEST_DIR}/gbpln1088.4Mb.fasta.gz" \
|
||||||
|
"${TMPDIR}/xxx.fasta.gz"
|
||||||
|
then
|
||||||
|
log "$MCMD: converting large fasta file to fasta OK"
|
||||||
|
((success++))
|
||||||
|
else
|
||||||
|
log "$MCMD: converting large fasta file to fasta failed"
|
||||||
|
((failed++))
|
||||||
|
fi
|
||||||
|
|
||||||
|
((ntest++))
|
||||||
|
if obiconvert -Z --fastq-output \
|
||||||
|
"${TEST_DIR}/gbpln1088.4Mb.fasta.gz" \
|
||||||
|
> "${TMPDIR}/xxx.fastq.gz" && \
|
||||||
|
obiconvert -Z --fasta-output \
|
||||||
|
"${TMPDIR}/xxx.fastq.gz" \
|
||||||
|
> "${TMPDIR}/yyy.fasta.gz" && \
|
||||||
|
zdiff "${TEST_DIR}/gbpln1088.4Mb.fasta.gz" \
|
||||||
|
"${TMPDIR}/yyy.fasta.gz"
|
||||||
|
then
|
||||||
|
log "$MCMD: converting large file between fasta and fastq OK"
|
||||||
|
((success++))
|
||||||
|
else
|
||||||
|
log "$MCMD: converting large file between fasta and fastq failed"
|
||||||
|
((failed++))
|
||||||
|
fi
|
||||||
|
|
||||||
#########################################
|
#########################################
|
||||||
#
|
#
|
||||||
# At the end of the tests
|
# At the end of the tests
|
||||||
|
@ -187,13 +187,12 @@ func _ParseEmblFile(
|
|||||||
func ReadEMBL(reader io.Reader, options ...WithOption) (obiiter.IBioSequence, error) {
|
func ReadEMBL(reader io.Reader, options ...WithOption) (obiiter.IBioSequence, error) {
|
||||||
opt := MakeOptions(options)
|
opt := MakeOptions(options)
|
||||||
|
|
||||||
buff := make([]byte, 1024*1024*128) // 128 MB
|
|
||||||
|
|
||||||
entry_channel := ReadFileChunk(
|
entry_channel := ReadFileChunk(
|
||||||
opt.Source(),
|
opt.Source(),
|
||||||
reader,
|
reader,
|
||||||
buff,
|
1024*1024*128,
|
||||||
EndOfLastFlatFileEntry,
|
EndOfLastFlatFileEntry,
|
||||||
|
"\nID ",
|
||||||
)
|
)
|
||||||
|
|
||||||
newIter := obiiter.MakeIBioSequence()
|
newIter := obiiter.MakeIBioSequence()
|
||||||
|
@ -233,13 +233,12 @@ func ReadFasta(reader io.Reader, options ...WithOption) (obiiter.IBioSequence, e
|
|||||||
|
|
||||||
nworker := opt.ParallelWorkers()
|
nworker := opt.ParallelWorkers()
|
||||||
|
|
||||||
buff := make([]byte, 1024*1024)
|
|
||||||
|
|
||||||
chkchan := ReadFileChunk(
|
chkchan := ReadFileChunk(
|
||||||
opt.Source(),
|
opt.Source(),
|
||||||
reader,
|
reader,
|
||||||
buff,
|
1024*1024,
|
||||||
EndOfLastFastaEntry,
|
EndOfLastFastaEntry,
|
||||||
|
"\n>",
|
||||||
)
|
)
|
||||||
|
|
||||||
for i := 0; i < nworker; i++ {
|
for i := 0; i < nworker; i++ {
|
||||||
|
@ -327,13 +327,12 @@ func ReadFastq(reader io.Reader, options ...WithOption) (obiiter.IBioSequence, e
|
|||||||
|
|
||||||
nworker := opt.ParallelWorkers()
|
nworker := opt.ParallelWorkers()
|
||||||
|
|
||||||
buff := make([]byte, 1024*1024)
|
|
||||||
|
|
||||||
chkchan := ReadFileChunk(
|
chkchan := ReadFileChunk(
|
||||||
opt.Source(),
|
opt.Source(),
|
||||||
reader,
|
reader,
|
||||||
buff,
|
1024*1024,
|
||||||
EndOfLastFastqEntry,
|
EndOfLastFastqEntry,
|
||||||
|
"\n@",
|
||||||
)
|
)
|
||||||
|
|
||||||
for i := 0; i < nworker; i++ {
|
for i := 0; i < nworker; i++ {
|
||||||
|
@ -4,8 +4,10 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"git.metabarcoding.org/obitools/obitools4/obitools4/pkg/obiseq"
|
"git.metabarcoding.org/obitools/obitools4/obitools4/pkg/obiseq"
|
||||||
|
"git.metabarcoding.org/obitools/obitools4/obitools4/pkg/obiutils"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -17,10 +19,102 @@ type FileChunk struct {
|
|||||||
Order int
|
Order int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PieceOfChunk struct {
|
||||||
|
head *PieceOfChunk
|
||||||
|
next *PieceOfChunk
|
||||||
|
data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPieceOfChunk(size int) *PieceOfChunk {
|
||||||
|
data := make([]byte, size)
|
||||||
|
p := &PieceOfChunk{
|
||||||
|
next: nil,
|
||||||
|
data: data,
|
||||||
|
}
|
||||||
|
p.head = p
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (piece *PieceOfChunk) NewPieceOfChunk(size int) *PieceOfChunk {
|
||||||
|
if piece == nil {
|
||||||
|
return NewPieceOfChunk(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
if piece.next != nil {
|
||||||
|
log.Panic("Try to create a new piece of chunk when next already exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
n := NewPieceOfChunk(size)
|
||||||
|
n.head = piece.head
|
||||||
|
piece.next = n
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (piece *PieceOfChunk) Next() *PieceOfChunk {
|
||||||
|
return piece.next
|
||||||
|
}
|
||||||
|
|
||||||
|
func (piece *PieceOfChunk) Head() *PieceOfChunk {
|
||||||
|
if piece == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return piece.head
|
||||||
|
}
|
||||||
|
|
||||||
|
func (piece *PieceOfChunk) Len() int {
|
||||||
|
if piece == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if piece.next == nil {
|
||||||
|
return len(piece.data)
|
||||||
|
}
|
||||||
|
return len(piece.data) + piece.next.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (piece *PieceOfChunk) Pack() *PieceOfChunk {
|
||||||
|
if piece == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
size := piece.next.Len()
|
||||||
|
piece.data = slices.Grow(piece.data, size)
|
||||||
|
|
||||||
|
for p := piece.next; p != nil; {
|
||||||
|
piece.data = append(piece.data, p.data...)
|
||||||
|
p.data = nil
|
||||||
|
n := p.next
|
||||||
|
p.next = nil
|
||||||
|
p = n
|
||||||
|
}
|
||||||
|
|
||||||
|
piece.next = nil
|
||||||
|
|
||||||
|
return piece
|
||||||
|
}
|
||||||
|
|
||||||
|
func (piece *PieceOfChunk) IsLast() bool {
|
||||||
|
return piece.next == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (piece *PieceOfChunk) FileChunk(source string, order int) FileChunk {
|
||||||
|
piece.Pack()
|
||||||
|
return FileChunk{
|
||||||
|
Source: source,
|
||||||
|
Raw: bytes.NewBuffer(piece.data),
|
||||||
|
Order: order,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ChannelFileChunk chan FileChunk
|
type ChannelFileChunk chan FileChunk
|
||||||
|
|
||||||
type LastSeqRecord func([]byte) int
|
type LastSeqRecord func([]byte) int
|
||||||
|
|
||||||
|
func ispossible(data []byte, probe string) bool {
|
||||||
|
s := obiutils.UnsafeString(data)
|
||||||
|
return strings.Index(s, probe) != -1
|
||||||
|
}
|
||||||
|
|
||||||
// _ReadFlatFileChunk reads a chunk of data from the given 'reader' and sends it to the
|
// _ReadFlatFileChunk reads a chunk of data from the given 'reader' and sends it to the
|
||||||
// 'readers' channel as a _FileChunk struct. The function reads from the reader until
|
// 'readers' channel as a _FileChunk struct. The function reads from the reader until
|
||||||
// the end of the last entry is found, then sends the chunk to the channel. If the end
|
// the end of the last entry is found, then sends the chunk to the channel. If the end
|
||||||
@ -37,81 +131,86 @@ type LastSeqRecord func([]byte) int
|
|||||||
func ReadFileChunk(
|
func ReadFileChunk(
|
||||||
source string,
|
source string,
|
||||||
reader io.Reader,
|
reader io.Reader,
|
||||||
buff []byte,
|
fileChunkSize int,
|
||||||
splitter LastSeqRecord) ChannelFileChunk {
|
splitter LastSeqRecord,
|
||||||
var err error
|
probe string) ChannelFileChunk {
|
||||||
var fullbuff []byte
|
|
||||||
|
|
||||||
chunk_channel := make(ChannelFileChunk)
|
chunk_channel := make(ChannelFileChunk)
|
||||||
|
|
||||||
fileChunkSize := len(buff)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
var err error
|
||||||
size := 0
|
size := 0
|
||||||
l := 0
|
l := 0
|
||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
|
pieces := NewPieceOfChunk(fileChunkSize)
|
||||||
// Initialize the buffer to the size of a chunk of data
|
// Initialize the buffer to the size of a chunk of data
|
||||||
fullbuff = buff
|
|
||||||
|
|
||||||
// Read from the reader until the buffer is full or the end of the file is reached
|
// Read from the reader until the buffer is full or the end of the file is reached
|
||||||
l, err = io.ReadFull(reader, buff)
|
l, err = io.ReadFull(reader, pieces.data)
|
||||||
buff = buff[:l]
|
pieces.data = pieces.data[:l]
|
||||||
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
if err == io.ErrUnexpectedEOF {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
end := splitter(pieces.data)
|
||||||
|
|
||||||
// Read from the reader until the end of the last entry is found or the end of the file is reached
|
// Read from the reader until the end of the last entry is found or the end of the file is reached
|
||||||
for err == nil {
|
for err == nil {
|
||||||
// Create an extended buffer to read from if the end of the last entry is not found in the current buffer
|
// Create an extended buffer to read from if the end of the last entry is not found in the current buffer
|
||||||
end := 0
|
|
||||||
ic := 0
|
|
||||||
|
|
||||||
// Read from the reader in 1 MB increments until the end of the last entry is found
|
// Read from the reader in 1 MB increments until the end of the last entry is found
|
||||||
for end = splitter(buff); err == nil && end < 0; end = splitter(buff) {
|
for err == nil && end < 0 {
|
||||||
ic++
|
pieces = pieces.NewPieceOfChunk(fileChunkSize)
|
||||||
buff = slices.Grow(buff, fileChunkSize)
|
size, err = io.ReadFull(reader, pieces.data)
|
||||||
l := len(buff)
|
pieces.data = pieces.data[:size]
|
||||||
extbuff := buff[l:(l + fileChunkSize - 1)]
|
|
||||||
size, err = io.ReadFull(reader, extbuff)
|
|
||||||
buff = buff[0:(l + size)]
|
|
||||||
// log.Warnf("Splitter not found, attempting %d to read in %d B increments : len(buff) = %d/%d", ic, fileChunkSize, len(extbuff), len(buff))
|
|
||||||
|
|
||||||
|
if ispossible(pieces.data, probe) {
|
||||||
|
pieces = pieces.Head().Pack()
|
||||||
|
end = splitter(pieces.data)
|
||||||
|
} else {
|
||||||
|
end = -1
|
||||||
|
}
|
||||||
|
// log.Warnf("Splitter not found, attempting %d to read in %d B increments : len(buff) = %d/%d", ic, fileChunkSize, len(extbuff), len(buff))
|
||||||
}
|
}
|
||||||
|
|
||||||
fullbuff = buff
|
pieces = pieces.Head().Pack()
|
||||||
|
lbuff := pieces.Len()
|
||||||
|
|
||||||
if len(buff) > 0 {
|
if lbuff > 0 {
|
||||||
if end < 0 {
|
if end < 0 {
|
||||||
end = len(buff)
|
end = pieces.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
pnext := end
|
lremain := lbuff - end
|
||||||
lremain := len(buff) - pnext
|
|
||||||
buff = buff[:end]
|
|
||||||
for len(buff) > 0 && (buff[len(buff)-1] == '\n' || buff[len(buff)-1] == '\r') {
|
|
||||||
buff = buff[:len(buff)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buff) > 0 {
|
var nextpieces *PieceOfChunk
|
||||||
cbuff := slices.Clone(buff)
|
|
||||||
io := bytes.NewBuffer(cbuff)
|
|
||||||
// log.Warnf("chuck %d :Read %d bytes from file %s", i, io.Len(), source)
|
|
||||||
chunk_channel <- FileChunk{source, io, i}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
if lremain > 0 {
|
if lremain > 0 {
|
||||||
buff = fullbuff[0:lremain]
|
nextpieces = NewPieceOfChunk(lremain)
|
||||||
lcp := copy(buff, fullbuff[pnext:])
|
lcp := copy(nextpieces.data, pieces.data[end:])
|
||||||
if lcp < lremain {
|
if lcp < lremain {
|
||||||
log.Fatalf("Error copying remaining data of chunk %d : %d < %d", i, lcp, lremain)
|
log.Fatalf("Error copying remaining data of chunk %d : %d < %d", i, lcp, lremain)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
buff = buff[:0]
|
nextpieces = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pieces.data = pieces.data[:end]
|
||||||
|
|
||||||
|
for len(pieces.data) > 0 && (pieces.data[len(pieces.data)-1] == '\n' || pieces.data[len(pieces.data)-1] == '\r') {
|
||||||
|
pieces.data = pieces.data[:len(pieces.data)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pieces.data) > 0 {
|
||||||
|
// log.Warnf("chuck %d :Read %d bytes from file %s", i, io.Len(), source)
|
||||||
|
chunk_channel <- pieces.FileChunk(source, i)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
pieces = nextpieces
|
||||||
|
end = -1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,10 +218,11 @@ func ReadFileChunk(
|
|||||||
log.Fatalf("Error reading data from file : %s", err)
|
log.Fatalf("Error reading data from file : %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pieces.Head().Pack()
|
||||||
|
|
||||||
// Send the last chunk to the channel
|
// Send the last chunk to the channel
|
||||||
if len(buff) > 0 {
|
if pieces.Len() > 0 {
|
||||||
io := bytes.NewBuffer(slices.Clone(buff))
|
chunk_channel <- pieces.FileChunk(source, i)
|
||||||
chunk_channel <- FileChunk{source, io, i}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the readers channel when the end of the file is reached
|
// Close the readers channel when the end of the file is reached
|
||||||
|
@ -223,13 +223,12 @@ func ReadGenbank(reader io.Reader, options ...WithOption) (obiiter.IBioSequence,
|
|||||||
opt := MakeOptions(options)
|
opt := MakeOptions(options)
|
||||||
// entry_channel := make(chan _FileChunk)
|
// entry_channel := make(chan _FileChunk)
|
||||||
|
|
||||||
buff := make([]byte, 1024*1024*128) // 128 MB
|
|
||||||
|
|
||||||
entry_channel := ReadFileChunk(
|
entry_channel := ReadFileChunk(
|
||||||
opt.Source(),
|
opt.Source(),
|
||||||
reader,
|
reader,
|
||||||
buff,
|
1024*1024*128,
|
||||||
EndOfLastFlatFileEntry,
|
EndOfLastFlatFileEntry,
|
||||||
|
"\nLOCUS ",
|
||||||
)
|
)
|
||||||
|
|
||||||
newIter := obiiter.MakeIBioSequence()
|
newIter := obiiter.MakeIBioSequence()
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
// corresponds to the last commit, and not the one when the file will be
|
// corresponds to the last commit, and not the one when the file will be
|
||||||
// commited
|
// commited
|
||||||
|
|
||||||
var _Commit = "f21f51a"
|
var _Commit = "937a483"
|
||||||
var _Version = "Release 4.4.0"
|
var _Version = "Release 4.4.0"
|
||||||
|
|
||||||
// Version returns the version of the obitools package.
|
// Version returns the version of the obitools package.
|
||||||
|
Reference in New Issue
Block a user