git-svn-id: https://www.grenoble.prabi.fr/svn/LECASofts/ecoPCR/trunk@115 60f365c0-8329-0410-b2a4-ec073aeeaa1d
This commit is contained in:
@ -68,8 +68,7 @@ def endLessIterator(endedlist):
|
||||
yield x
|
||||
while(1):
|
||||
yield endedlist[-1]
|
||||
|
||||
|
||||
|
||||
class ColumnFile(object):
|
||||
|
||||
def __init__(self,stream,sep=None,strip=True,types=None):
|
||||
@ -135,7 +134,7 @@ def bsearchTaxon(taxonomy,taxid):
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
def readNodeTable(file):
|
||||
|
||||
@ -149,7 +148,6 @@ def readNodeTable(file):
|
||||
bool,bool,bool,str))
|
||||
print >>sys.stderr,"Reading taxonomy dump file..."
|
||||
taxonomy=[[n[0],n[2],n[1]] for n in nodes]
|
||||
|
||||
print >>sys.stderr,"List all taxonomy rank..."
|
||||
ranks =list(set(x[1] for x in taxonomy))
|
||||
ranks.sort()
|
||||
@ -171,15 +169,14 @@ def readNodeTable(file):
|
||||
|
||||
return taxonomy,ranks,index
|
||||
|
||||
def scientificNameIterator(file):
|
||||
def nameIterator(file):
|
||||
file = universalOpen(file)
|
||||
names = ColumnFile(file,
|
||||
sep='|',
|
||||
types=(int,str,
|
||||
str,str))
|
||||
for taxid,name,unique,classname,white in names:
|
||||
if classname == 'scientific name':
|
||||
yield taxid,name
|
||||
yield taxid,name,classname
|
||||
|
||||
def mergedNodeIterator(file):
|
||||
file = universalOpen(file)
|
||||
@ -201,8 +198,12 @@ def readTaxonomyDump(taxdir):
|
||||
taxonomy,ranks,index = readNodeTable('%s/nodes.dmp' % taxdir)
|
||||
|
||||
print >>sys.stderr,"Adding scientific name..."
|
||||
for taxid,name in scientificNameIterator('%s/names.dmp' % taxdir):
|
||||
taxonomy[index[taxid]].append(name)
|
||||
|
||||
alternativeName=[]
|
||||
for taxid,name,classname in nameIterator('%s/names.dmp' % taxdir):
|
||||
alternativeName.append((name,classname,index[taxid]))
|
||||
if classname == 'scientific name':
|
||||
taxonomy[index[taxid]].append(name)
|
||||
|
||||
print >>sys.stderr,"Adding taxid alias..."
|
||||
for taxid,current in mergedNodeIterator('%s/merged.dmp' % taxdir):
|
||||
@ -212,7 +213,7 @@ def readTaxonomyDump(taxdir):
|
||||
for taxid in deletedNodeIterator('%s/delnodes.dmp' % taxdir):
|
||||
index[taxid]=None
|
||||
|
||||
return taxonomy,ranks,index
|
||||
return taxonomy,ranks,alternativeName,index
|
||||
|
||||
|
||||
#####
|
||||
@ -267,28 +268,52 @@ def genbankEntryParser(entry):
|
||||
Tx = None
|
||||
return {'id':Id,'taxid':Tx,'definition':De,'sequence':Sq}
|
||||
|
||||
_fastaParseID = re.compile('(?<=^>)[^ ]+')
|
||||
_fastaParseDE = re.compile('(?<=^>).+',)
|
||||
_fastaParseSQ = re.compile('^[^>].+',re.MULTILINE+re.DOTALL)
|
||||
_fastaParseTX = re.compile('(?<=[[Tt]axon:) *[0-9]+ *(?=])')
|
||||
|
||||
def fastaEntryParser(entry):
|
||||
Id = _fastaParseID.findall(entry)[0]
|
||||
De = _fastaParseDE.findall(entry)[0].split(None,1)[1:]
|
||||
if not De:
|
||||
De=''
|
||||
else:
|
||||
De=De[0]
|
||||
Sq = cleanSeq(_fastaParseSQ.findall(entry)[0].upper())
|
||||
######################
|
||||
|
||||
_cleanDef = re.compile('[\nDE]')
|
||||
|
||||
def cleanDef(definition):
|
||||
return _cleanDef.sub('',definition)
|
||||
|
||||
_emblParseID = re.compile('(?<=^ID {3})[^ ]+(?=;)',re.MULTILINE)
|
||||
_emblParseDE = re.compile('(?<=^DE {3}).+?\. *$(?=[^ ])',re.MULTILINE+re.DOTALL)
|
||||
_emblParseSQ = re.compile('(?<=^ ).+?(?=^//$)',re.MULTILINE+re.DOTALL)
|
||||
_emblParseTX = re.compile('(?<= /db_xref="taxon:)[0-9]+(?=")')
|
||||
|
||||
def emblEntryParser(entry):
|
||||
Id = _emblParseID.findall(entry)[0]
|
||||
De = ' '.join(cleanDef(_emblParseDE.findall(entry)[0]).split())
|
||||
Sq = cleanSeq(_emblParseSQ.findall(entry)[0].upper())
|
||||
try:
|
||||
Tx = int(_fastaParseTX.findall(entry)[0])
|
||||
Tx = int(_emblParseTX.findall(entry)[0])
|
||||
except IndexError:
|
||||
Tx = None
|
||||
|
||||
return {'id':Id,'taxid':Tx,'definition':De,'sequence':Sq}
|
||||
|
||||
|
||||
######################
|
||||
|
||||
def parseFasta(seq):
|
||||
title = seq[0].strip()[1:].split(None,1)
|
||||
id=title[0]
|
||||
if len(title) == 2:
|
||||
field = title[1].split('; ')
|
||||
else:
|
||||
field=[]
|
||||
info = dict(x.split('=') for x in field if '=' in x)
|
||||
definition = ' '.join([x for x in field if '=' not in x])
|
||||
seq=(''.join([x.strip() for x in seq[1:]])).upper()
|
||||
return id,seq,definition,info
|
||||
|
||||
|
||||
def fastaEntryParser(entry):
|
||||
id,seq,definition,info = parseFasta(entry)
|
||||
Tx = info.get('taxid',None)
|
||||
if Tx is not None:
|
||||
Tx=int(Tx)
|
||||
return {'id':id,'taxid':Tx,'definition':definition,'sequence':seq}
|
||||
|
||||
|
||||
|
||||
def sequenceIteratorFactory(entryParser,entryIterator):
|
||||
def sequenceIterator(file):
|
||||
for entry in entryIterator(file):
|
||||
@ -381,6 +406,22 @@ def ecoRankPacker(rank):
|
||||
|
||||
return packed
|
||||
|
||||
def ecoNamePacker(name):
|
||||
|
||||
namelength = len(name[0])
|
||||
classlength= len(name[1])
|
||||
totalSize = namelength + classlength + 4 + 4 + 4 + 4
|
||||
|
||||
packed = struct.pack('> I I I I I %ds %ds' % (namelength,classlength),
|
||||
totalSize,
|
||||
int(name[1]=='scientific name'),
|
||||
namelength,
|
||||
classlength,
|
||||
name[2],
|
||||
name[0],
|
||||
name[1])
|
||||
|
||||
return packed
|
||||
|
||||
def ecoSeqWriter(file,input,taxindex,parser):
|
||||
output = open(file,'wb')
|
||||
@ -438,18 +479,40 @@ def ecoRankWriter(file,ranks):
|
||||
output.write(ecoRankPacker(rank))
|
||||
|
||||
output.close()
|
||||
|
||||
def nameCmp(n1,n2):
|
||||
name1=n1[0].upper()
|
||||
name2=n2[0].upper()
|
||||
if name1 < name2:
|
||||
return -1
|
||||
elif name1 > name2:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def ecoNameWriter(file,names):
|
||||
output = open(file,'wb')
|
||||
output.write(struct.pack('> I',len(names)))
|
||||
|
||||
names.sort(nameCmp)
|
||||
|
||||
for name in names:
|
||||
output.write(ecoNamePacker(name))
|
||||
|
||||
output.close()
|
||||
|
||||
def ecoDBWriter(prefix,taxonomy,seqFileNames,parser):
|
||||
|
||||
ecoRankWriter('%s.rdx' % prefix, taxonomy[1])
|
||||
ecoTaxWriter('%s.tdx' % prefix, taxonomy[0])
|
||||
|
||||
ecoNameWriter('%s.ndx' % prefix, taxonomy[2])
|
||||
|
||||
filecount = 0
|
||||
for filename in seqFileNames:
|
||||
filecount+=1
|
||||
sk=ecoSeqWriter('%s_%03d.sdx' % (prefix,filecount),
|
||||
filename,
|
||||
taxonomy[2],
|
||||
taxonomy[3],
|
||||
parser)
|
||||
if sk:
|
||||
print >>sys.stderr,"Skipped entry :"
|
||||
@ -464,37 +527,58 @@ def ecoParseOptions(arguments):
|
||||
}
|
||||
|
||||
o,filenames = getopt.getopt(arguments,
|
||||
'ht:n:gf',
|
||||
'ht:n:gfe',
|
||||
['help',
|
||||
'taxonomy=',
|
||||
'name=',
|
||||
'genbank',
|
||||
'fasta'])
|
||||
'fasta',
|
||||
'embl'])
|
||||
|
||||
for name,value in o:
|
||||
if name in ('-h','--help'):
|
||||
pass
|
||||
printHelp()
|
||||
exit()
|
||||
elif name in ('-t','--taxonomy'):
|
||||
opt['taxdir']=value
|
||||
elif name in ('-n','--name'):
|
||||
opt['prefix']=value
|
||||
elif name in ('-g','--genbank'):
|
||||
opt['parser']=sequenceIteratorFactory(genbankEntryParser,
|
||||
entryIterator
|
||||
)
|
||||
entryIterator)
|
||||
|
||||
elif name in ('-f','--fasta'):
|
||||
opt['parser']=sequenceIteratorFactory(fastaEntryParser,
|
||||
fastaEntryIterator)
|
||||
|
||||
elif name in ('-e','--embl'):
|
||||
opt['parser']=sequenceIteratorFactory(emblEntryParser,
|
||||
entryIterator)
|
||||
else:
|
||||
raise ValueError,'Unknown option %s' % name
|
||||
|
||||
return opt,filenames
|
||||
|
||||
def printHelp():
|
||||
print "-----------------------------------"
|
||||
print " ecoPCRFormat.py"
|
||||
print "-----------------------------------"
|
||||
print "ecoPCRFormat.py [option] <argument>"
|
||||
print "-----------------------------------"
|
||||
print "-e --embl :[E]mbl format"
|
||||
print "-f --fasta :[F]asta format"
|
||||
print "-g --genbank :[G]enbank format"
|
||||
print "-h --help :[H]elp - print this help"
|
||||
print "-n --name :[N]ame of the new database created"
|
||||
print "-t --taxonomy :[T]axonomy - path to the taxonomy database"
|
||||
print " :bcp-like dump from GenBank taxonomy database."
|
||||
print "-----------------------------------"
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
opt,filenames = ecoParseOptions(sys.argv[1:])
|
||||
|
||||
taxonomy = readTaxonomyDump(opt['taxdir'])
|
||||
|
||||
|
||||
ecoDBWriter(opt['prefix'], taxonomy, filenames, opt['parser'])
|
||||
|
||||
|
Reference in New Issue
Block a user