Compare commits

..

282 Commits

Author SHA1 Message Date
d1d26b9028 Simplify the code 2016-08-04 08:00:54 +02:00
8f0462c407 Merge branch 'master' into Eric_version_for_sequence
Conflicts:
	python/obitools3/obidms/_obidmscolumn_seq.pyx
2016-08-03 10:09:20 +02:00
000b9999ad Merge branch 'master' of git@git.metabarcoding.org:obitools/obitools3.git 2016-07-03 09:22:22 +02:00
aff9831c13 Substitute fprintf call by fputs call to conform with the new ubuntu
compilation rules
2016-07-03 09:21:56 +02:00
448fa8d325 first trial for a fasta formater 2016-07-03 09:18:52 +02:00
6af62d8124 Change a fprintf without argument to a fputs to comply with the new
default parameter on ubuntu
2016-07-03 08:25:06 +02:00
0869b9ba3f Closes issue #47 by storing each view in a separate file named with the
view's name and created upon view creation.
2016-06-30 11:41:30 +02:00
ad2af0b512 Some comments updated 2016-06-16 11:26:54 +02:00
38e603ed57 Deleted some redundant cython code 2016-06-10 10:34:47 +02:00
f438c3d913 OBIQUAL columns can now handle multiple elements per line 2016-06-09 15:54:36 +02:00
2a1ea3ba3f Setting NA values is now handled properly for OBI_SEQ, OBI_STR and
OBI_QUAL columns
2016-06-09 14:22:36 +02:00
fc3641d7ff Read-only AVLs are now hard-linked instead of copied when cloning an AVL
group to make it writable. Also fixed several bugs when handling AVL
groups.
2016-06-03 19:02:46 +02:00
799b942017 Deleted old debugging print 2016-06-03 18:57:32 +02:00
6e3f5b230e Fixed typo in doc 2016-06-03 18:56:45 +02:00
2f57f80c63 Fixed a bug where an unmapped variable would be read 2016-06-03 18:55:58 +02:00
2962c4d250 Goes with previous commit 2016-06-03 18:54:25 +02:00
69bf7ec2e7 NA value for OBI_STR and OBI_SEQ columns is now NULL 2016-06-03 18:53:22 +02:00
bac7ce7184 Start of the implementation of the export methods 2016-06-02 19:10:33 +02:00
f186395661 Trap potential exception generated by char* to bytes casts 2016-05-29 21:18:20 +02:00
85395dfc1a value returned for sequence is now bytes and no more str 2016-05-29 13:53:32 +02:00
f830389974 Add some comment on the location of the align method. 2016-05-29 12:58:31 +02:00
2e35229357 Add conversion checking on the value of a seq column 2016-05-29 12:54:13 +02:00
a8ed57dc6e few small changes 2016-05-21 12:29:55 +02:00
c3274d419c remove an extra debug log 2016-05-21 12:29:08 +02:00
cca0dbb46b Close issue #54 by adding a read1 method to the MagicKeyFile class 2016-05-21 12:24:48 +02:00
5a78157112 increase parsing speed of the header 2016-05-21 10:29:11 +02:00
0b9a41d952 Patch a bug about the reading of the last sequence 2016-05-21 10:28:03 +02:00
e681ca646d Fixed a problem with some columns being shorter in views and triggering
errors when trying to get values. Temporary fix that needs discussion
2016-05-20 18:45:29 +02:00
3b59043ea8 Major update: New column type to store sequence qualities. Closes #41 2016-05-20 16:45:22 +02:00
ffff91e76c Fixed variable name that had been accidentally changed for better
clarity
2016-05-18 13:27:41 +02:00
6a8df069ad Indexers are now cloned if needed to modify them after they've been
closed. Obligatory indexers' names now follow the same pattern as other
indexers (columnname_version). Closes #46 and #49.
2016-05-18 13:23:48 +02:00
8ae7644945 First version of quality handling (not working yet) and now it is
checked that a column is writable before enlarging it
2016-05-11 16:38:14 +02:00
b3c47809da First version of alignment functions (imported from suma* programs) 2016-05-11 16:36:23 +02:00
3567681339 Now when a column is added to a view, if there is a line selection, all
columns in the view are cloned first
2016-05-11 16:34:20 +02:00
757ef8509a Deleting CeCILL license duplicates 2016-05-09 11:17:45 +02:00
f961621f5d Minor improvements in _obidms Cython layer 2016-05-04 13:43:26 +02:00
bc12360490 Reworked and commented a bit the cython layer for dms, columns and views 2016-05-02 15:16:06 +02:00
872071b104 Removed a list of column pointers kept in the OBIView class that was not
really needed
2016-05-02 14:23:42 +02:00
32cc8968e8 Adding CeCILL license 2016-05-02 11:51:59 +02:00
d6481f0db8 Merge branch 'master' of git@git.metabarcoding.org:obitools/obitools3.git 2016-04-29 17:46:59 +02:00
a32920e401 Relative paths when creating or opening a DMS now work 2016-04-29 17:46:36 +02:00
31cf27d676 Added indexer function that returns the name of the indexer 2016-04-29 16:18:56 +02:00
baba2d742e commenting _obidms.pyx 2016-04-29 16:07:03 +02:00
5bd12079ae Added comments about listing columns and indexers in obidms functions 2016-04-29 16:06:01 +02:00
072ee5ac03 Re-re-fixed line breaks in README file 2016-04-29 15:44:40 +02:00
9fe21316ff Refixed line breaks in README file 2016-04-29 15:39:46 +02:00
3dc3aaa46b Fixed line breaks in README file 2016-04-29 15:36:58 +02:00
b371030edd Adding README file 2016-04-29 15:35:08 +02:00
b3976fa461 Merge branch 'luke_tests' 2016-04-28 11:17:24 +02:00
6ea2cfb9ca Merging luke_tests branch without the commit turning inline functions in macros 2016-04-28 11:17:18 +02:00
0eca86107e Pseudo obihead for tests 2016-04-27 14:27:28 +02:00
0de953a3ef pseudo obigrep for tests 2016-04-27 14:19:55 +02:00
f3b20b809d Fixed bug with indexer names being defined and generating seg fault if
creating a column not using indexers
2016-04-27 14:01:36 +02:00
d159b921eb Fixed obi import trying to print all lines at the end (source of
segfault?)
2016-04-27 13:14:19 +02:00
4e4cf46b16 Added all C files as source files for all cython files to stop having
that kind of problem with linux systems
2016-04-27 10:44:24 +02:00
6b61533650 Added more C source files for _obiseq 2016-04-27 10:41:00 +02:00
419885485b Added files in _obitaxo C sources for cython 2016-04-27 10:30:16 +02:00
0c8504b6db Commented #ifdef directive for detect_bucket_size function because it
causes errors
2016-04-27 10:24:40 +02:00
654c34a1a6 changed inline functions to macros to make it work on Luke 2016-04-26 15:40:12 +02:00
2d8c06f7b7 Fixed variable initialization for error detection 2016-04-26 14:38:46 +02:00
a6c8d35491 import command a bit modified for tests 2016-04-26 14:29:54 +02:00
366264828e Renamed MurmurHash2.c file to murmurhash2.c as it could be a problem 2016-04-26 14:29:17 +02:00
d3a6ff6043 Removed deprecated code 2016-04-26 14:27:16 +02:00
5ca84b91dc Merge branch 'master' of git@git.metabarcoding.org:obitools/obitools3.git 2016-04-25 18:35:57 +02:00
87935c6678 Fixed all compilation problems with new function names, locations etc 2016-04-25 18:35:02 +02:00
92980508c0 Made the function to clone a column in the context of a view private 2016-04-25 18:15:25 +02:00
65880db422 Made function to update the line count of a view private 2016-04-25 18:11:37 +02:00
767d9c7804 Reordered view functions for better coherence 2016-04-25 18:07:58 +02:00
2566377e2a Updated the documentation for utils functions 2016-04-25 18:02:58 +02:00
1fbbdd43f9 Updated obiversion_t declaration 2016-04-25 17:58:37 +02:00
8cdfbb379e Documentation for views and reworked the code a little 2016-04-25 17:58:12 +02:00
0a55e26520 Reworked obiview code and added more comments 2016-04-25 11:37:53 +02:00
68a8509c12 Updated documentation in obitypes.h 2016-04-25 10:33:01 +02:00
5f98d2ed5c Fixed the calculation of the size of data for OBI_STR and OBI_SEQ
columns
2016-04-25 10:26:51 +02:00
ef1be141c1 Update Licence to english version 2016-04-23 18:03:50 +02:00
bbfd40d56d Add license 2016-04-23 18:03:10 +02:00
5d08da46a2 Updated the documentation in obidmscolumn.h 2016-04-22 17:55:53 +02:00
66045acf1d Creating a column now uses the function to create the indexer name if
one was not provided
2016-04-22 17:47:00 +02:00
6977c4315c Improved function to build an indexer name 2016-04-22 17:38:23 +02:00
839b3000a8 Added a function to build indexer names 2016-04-22 17:08:23 +02:00
ffa4557928 changed MAP_PRIVATE flags to MAP_SHARED when opening a column because it
seems a lot more efficient
2016-04-22 16:26:24 +02:00
003cd11362 Fixed initialization of NA values for OBI_STR and OBI_SEQ columns 2016-04-22 16:14:23 +02:00
c87227b65a Uncommented an error message that doesn't need to be commented anymore 2016-04-22 16:11:56 +02:00
c07e75f2ac Updated the documentation for OBI_STR columns 2016-04-22 15:59:32 +02:00
6b394a5cf7 Updated the documentation for OBI_SEQ columns 2016-04-22 15:58:20 +02:00
2416b8ccd8 Deleted more unused inclusions in OBI_STR and OBI_SEQ column types code 2016-04-22 15:56:09 +02:00
b9921e111d Removed unused inclusions and definitions in all column types code 2016-04-22 15:50:19 +02:00
8f5aa8841d Removed unused definition in OBI_IDX columns code 2016-04-22 15:44:30 +02:00
900d67de87 Updated the documentation for columns with the type OBI_IDX 2016-04-22 15:43:39 +02:00
22e3c3eeed Updated the documentation for obidms functions 2016-04-22 11:28:09 +02:00
4ead37ee48 Finished moving obiblob functions to obiblob files and documentation for
obiblob functions
2016-04-21 15:18:14 +02:00
bce360bbd5 Documentation for obiblob indexer API 2016-04-21 15:08:40 +02:00
2a68cb26f8 Improved AVL tree documentation 2016-04-21 15:07:27 +02:00
043e70ff49 Updated AVL documentation 2016-04-21 14:39:03 +02:00
66021367f6 Moved some blob functions to obiblob.c 2016-04-21 14:20:26 +02:00
e69f44ae3d Little annotations for the murmur hash function. 2016-04-21 13:53:29 +02:00
1941a3785e Updated encode functions documentation 2016-04-21 13:46:02 +02:00
c7b8db6a2e Replaced malloc+memset with calloc 2016-04-21 13:45:39 +02:00
1dc4a3be49 Documentation for DNA sequence indexing functions 2016-04-21 13:36:51 +02:00
09597016fd Short doc for crc function 2016-04-21 13:23:52 +02:00
1a2fa0923c Documented the functions indexing and retrieving character strings 2016-04-21 11:35:21 +02:00
00f2f2cc51 Documented changes made in bloom functions 2016-04-21 11:22:31 +02:00
7a88ca619a First obi import (doesn't import tags yet because NA values aren't
handled)
2016-04-15 17:00:08 +02:00
eddd19a245 Changes in obi commands 2016-04-15 16:59:21 +02:00
2aafecc3b5 Changed sequence 'description' to 'definition' everywhere 2016-04-15 16:31:43 +02:00
094b2371e9 Deleted obsolete directory 2016-04-15 14:44:31 +02:00
c1034d300d merging and fixed git conflict with obiavl.h 2016-04-15 13:23:29 +02:00
02d67c257f The default name of an AVL is now the column name + '_indexer', and when
an AVL is opened (as opposed to created), it is read-only
2016-04-15 12:55:26 +02:00
e04ea85d1e Fixed problematic __str__ method and useless declarations in the
OBI_Nuc_Seq_Stored class
2016-04-15 11:22:05 +02:00
527d3555f0 Moved the functions getting full paths for files and directories to
obidms.c/.h files
2016-04-15 11:11:13 +02:00
71492ad229 Made the handling of listing and unlisting opened columns and indexers
functions in the obidms files.
2016-04-15 10:49:12 +02:00
73d64e5aff Renamed 'unmap_header' function to 'close_header' 2016-04-14 15:19:27 +02:00
4cb52e1632 Made the truncating of columns automatic when closing them (note:
already the case for AVLs)
2016-04-14 15:13:30 +02:00
9d042f7bd0 Refactored and relocated the set and get functions of all column types,
both within and out of the context of a view
2016-04-13 15:10:24 +02:00
5ec2d8842e Character string indexer API 2016-04-12 17:21:01 +02:00
04c9470f7d Fixed and cleaned DNA_seq_indexer API 2016-04-12 17:20:24 +02:00
be05c889e2 DNA_seq_indexer API 2016-04-12 16:38:47 +02:00
04e3a7b5a9 Added more references in cython .cfiles files because it seems necessary
for linux distributions
2016-04-12 15:10:54 +02:00
d8107533d8 Obiblob_indexer API 2016-04-12 14:53:33 +02:00
cd4e65e190 Fixed typo and includes in obiblob files 2016-04-12 14:52:27 +02:00
375bfcce8a Renamed "Obi_byte_arrays" to "Obiblobs" and moved Obiblob functions to
separate obiblob.c and obiblob.h files
2016-04-12 11:21:14 +02:00
c225cfd8b6 Fixed bug with retrieval of values from AVLs (bad cast in byte array
structure)
2016-04-11 17:07:22 +02:00
6fe4c6134a Allows for calling getConfiguration without parametter for geting the
default configuration
2016-04-11 13:31:09 +02:00
966b1325ed Deleted declaration of obsolete public function 2016-04-11 11:14:20 +02:00
019dfc01b4 Branch to refactor and debug (AVLs bugged) 2016-04-08 15:38:57 +02:00
45c9c5075c A first version of the fasta parser 2016-04-01 18:15:54 +02:00
20b97c972b Add boolean type in the tag evaluation 2016-04-01 13:42:24 +02:00
efc4a4a3c6 Reduce the call count to eval. This reduce by 3 the time of fast(q|a)
header processing
2016-04-01 08:54:06 +02:00
ce6ea89c21 Add the missing bootstrappip module 2016-03-31 17:28:03 +02:00
4207db7c17 Transfers bug patch from orgasm 2016-03-31 16:53:09 +02:00
1cd35b3359 firt version of a fastq parser 2016-03-31 10:47:12 +02:00
f51a6df5b2 Add a class buffering lines during a text file reading 2016-03-30 14:53:25 +02:00
94417e1330 patch the uncompress module to be able to deal with remote file 2016-03-29 20:57:39 +02:00
2e17dbce55 Adds a uopen function able to open transparently a local or a remote
file compressed or not
2016-03-29 20:56:54 +02:00
a9eed1f5d9 Adds class for uncompressing transparently compressed files on line 2016-03-29 18:21:04 +02:00
2dfab3f378 Some changes in relation with the new obitools3.apps module 2016-03-28 15:05:59 +02:00
e583098a96 change in the obi programme according to the new obitools3.apps module
creation
2016-03-28 15:05:02 +02:00
b926ca3997 A template for a command 2016-03-28 15:04:06 +02:00
aacfefad26 A set of utilitaty function for creating commands 2016-03-28 15:03:26 +02:00
edc4fd7b3e Fixed minor warning 2016-03-25 16:11:52 +01:00
ff6c27acf2 Implemented the retrieval of values with groups of AVLs 2016-03-25 15:35:16 +01:00
69856f18dd untested (and no possible retrieval) of CRC used to represent data in
AVL trees
2016-03-24 16:38:11 +01:00
2c084c8cf7 Switch to 10000000 per avl 2016-03-23 16:13:28 +01:00
58ac860cc7 Added macro for the bloom filter parameters and deleted old unused
macros for crc
2016-03-23 13:33:40 +01:00
d44117d625 obiimport function for testing purposes 2016-03-23 13:00:02 +01:00
6bd42132c4 Minor fixes to silence warnings and replaced two asprintf uses 2016-03-23 12:58:53 +01:00
4085904362 Merge branch 'multiple_avls_bloom' 2016-03-22 14:14:10 +01:00
b04b4b5902 made POSIX compliant 2016-03-21 11:33:06 +01:00
383e738ab7 Merge branch 'master' of git@git.metabarcoding.org:obitools/obitools3.git 2016-03-18 15:49:53 +01:00
3681cecb4d Multiple AVLs with bloom filters (very raw test version) 2016-03-18 11:06:02 +01:00
545ed8111a Code for tests storing data in multiple AVLs.
(note: unretrievable data as implemented)
2016-03-11 15:34:55 +01:00
86071d30c9 Minor improvement in AVL initial size calculation 2016-03-11 14:07:40 +01:00
21d1b2ed3e First implementation of taxonomy reading 2016-03-11 13:56:38 +01:00
6157633137 prototype for the obi unix command and the count sub command 2016-03-08 16:06:00 +01:00
a08def47e6 It is now impossible to create a view with a name identical to one of an
existing written view
2016-03-01 13:36:54 +01:00
fc5a12bad7 Closes #34 2016-02-29 17:56:55 +01:00
e323d8e702 Cython classes for nucleotide sequences (outside or in the context of a
view)
2016-02-29 16:33:30 +01:00
b350ea0393 Fixed minor error 2016-02-29 16:28:34 +01:00
8e9e21a02e Increased the maximum depth of AVL trees 2016-02-29 16:27:23 +01:00
4df313c54a Added Obiviews specialized for the handling of nucleotide sequences 2016-02-25 09:43:27 +01:00
ffc68d448f Deleted a forgotten print statement 2016-02-18 15:15:42 +01:00
a8f03248a8 Major update : views 2016-02-18 10:38:51 +01:00
cfaf069095 Fixed more typos and formatting imperfections. 2015-12-11 17:37:25 +01:00
a6144eabe2 Fixed typos 2015-12-11 17:26:20 +01:00
c139367555 DNA sequences and character strings are now handled using AVL trees. 2015-12-11 17:24:44 +01:00
1586956d57 Added the lists of opened columns and arrays in the OBIDMS structure,
and a counter in the OBIDMS column structure; fixed some bugs and
created tests for referring columns that are bound to disappear anyway.
2015-12-02 17:32:07 +01:00
b45b496b0e Major update: new type of columns containing indices referring to lines
in other columns
2015-11-29 11:57:07 +01:00
2cf10cb6f0 Column type is now passed as a character string when creating the column
(either 'OBI_INT', 'OBI_FLOAT', 'OBI_BOOL', 'OBI_CHAR', 'OBI_STR' or
'OBI_SEQ')
2015-11-23 15:48:27 +01:00
5a5516303d deleting useless .pyc files 2015-11-23 14:43:34 +01:00
d6a99bafea Fixed a major bug with the versioning of columns that was introduced in
f6ec8ba9
2015-11-23 13:34:51 +01:00
08f2657e18 Increased maximum line count of columns to 1^9 2015-11-23 13:23:18 +01:00
6aa2f92930 DNA sequences are now encoded on 4 bits when they are in IUPAC 2015-11-20 15:32:09 +01:00
87044b41d8 modified the encoding function on 2 bits a little 2015-11-20 11:32:47 +01:00
6ab1c83302 New column type for DNA sequences. Only for those coded on 2 bits (only
'ATGCatgc') for now.
2015-11-19 18:12:48 +01:00
e371248567 changed version to 0.0.0 2015-11-19 18:11:21 +01:00
dbf9463238 The endianness of a DMS is now stored in the OBIDMS structure 2015-11-18 15:35:09 +01:00
eb12af4da4 Fixed minor error in the documentation of a function. 2015-11-16 15:38:01 +01:00
e8417b4f6f The endianness of an OBIDMS is now stored in an informations file that
is read when opening the OBIDMS.
2015-11-16 14:37:51 +01:00
6579566c6e Minor changes in code to improve readability and fix C compilation
warnings
2015-11-10 14:37:58 +01:00
410e2e02a0 When retrieving the header of a column, the version number of the column
wanted can now be provided.
2015-11-10 13:30:10 +01:00
8ce4f264aa When enlarging a column, the function doesn't try anymore to keep the
mapped region at the same pointer (never works), and unmap/remap
instead.
2015-11-10 13:18:36 +01:00
d885eb48ff The header size when creating a column is now calculated according to
the size of the header structure and the page size of the platform.
2015-11-10 13:09:30 +01:00
661fe3606a In OBI_CHAR columns, characters are now given and retrieved as decoded
(unicode) characters.
2015-11-10 11:24:08 +01:00
c4b7e579cf Comments in column headers are now working. 2015-11-10 10:56:45 +01:00
f6ec8ba963 The header size is now directly read in the file when a column or an
array is opened.
2015-11-09 17:50:32 +01:00
0e3d6ed2d7 Methods __len__ (number of lines used) and __sizeof__ (total size in
bytes) implemented for columns.
2015-11-09 15:56:20 +01:00
01bfc14503 The data size in bytes is now stored in the header of a column. 2015-11-09 15:55:00 +01:00
65c1b1e8b2 Minor changes to make the creation of files and directories cleaner 2015-11-09 15:22:01 +01:00
b37bd8f21c File descriptors for dms, column and array directories are now stored in
structures.
2015-11-09 15:06:02 +01:00
05e3956a0c Minor changes in code to improve readability (freeing some character
strings earlier)
2015-11-09 11:22:51 +01:00
9b066f4327 Major update: obiarrays with columns containing indices referring to
character strings.
2015-11-06 17:55:15 +01:00
456551ffeb obi arrays that don't work because of cython bug passing wrong pointers 2015-11-03 14:22:00 +01:00
ecb9d97adb Reorganized the code to have less functions, and the functions to get
and format the creation date of a column are now working.
2015-10-15 15:12:45 +02:00
0eaa5aa784 Major changes : new cython subclasses to handle columns with multiple
elements per line in a more efficient way + now elements_names are
passed as a list + new function to recover only the header of a column
2015-10-14 18:05:34 +02:00
21923e213d The unit tests now test for None values 2015-10-12 18:02:40 +02:00
6877fc4892 Fixed a critical bug where values were initialized to NA at the wrong
location when there was multiple elements per line
2015-10-12 17:54:36 +02:00
dbed3d9d1d New module for unit testing with PyUnit 2015-10-09 15:42:57 +02:00
fc8bf16769 Fixed a critical bug in the computation of the new number of lines of a
column when truncating
2015-10-09 13:49:48 +02:00
e114a3c9cb fixed a critical bug where data size was not calculated correctly and
column directory is now closed when column is closed
2015-10-09 10:25:40 +02:00
ebc9f6f512 fixed a bug where Cython was casting doubles in floats 2015-10-08 15:28:30 +02:00
2b3f03ec28 Removed deprecated script 2015-10-08 10:46:46 +02:00
8fd9c06be2 Fixed missing file for documentation compilation 2015-10-08 10:45:54 +02:00
b553eef781 Method to close a DMS is uncommented but not complete yet (columns have
to be closed separately)
2015-10-08 10:44:13 +02:00
ee4c513fd4 Fixed a bug where cloning a column would fail if the data was empty 2015-10-08 10:36:02 +02:00
c013e6ad33 fixed typo in doxygen doc 2015-10-08 10:33:19 +02:00
c98d567e2f Updated the documentation and restructured a bit because it wasn't
compiling (note: Breathe not working)
2015-10-06 11:09:01 +02:00
392f110c8d new functions in the OBIDMS_column class to raise NotImplementedError
exceptions and to get the creation date of a column
2015-10-02 13:51:26 +02:00
6ced3c4896 new functions to get the creation date of a column 2015-10-02 13:47:53 +02:00
4b8bf41a71 closes #13, obi_errno is initialized to 0 2015-10-02 13:46:34 +02:00
c59a244e9d Fixed little typo 2015-09-30 12:07:13 +02:00
4b7f2d268b Doxygen documentation corrected and completed. 2015-09-30 12:03:46 +02:00
45af8396b8 minor change in variable name for better coherence 2015-09-28 13:52:41 +02:00
eeebbc77c3 missing __init__ file in previous commit 2015-09-28 13:52:13 +02:00
72155c3b73 entirely reworked cython wrapper 2015-09-28 13:51:35 +02:00
9aaf2d264a test 2015-09-25 11:29:17 +02:00
0951934f77 test 2015-09-25 11:28:23 +02:00
68b6ab3224 working on the cython wrapper. This doesn't compile 2015-09-24 11:32:09 +02:00
b0570ee486 fixed permissions when creating files 2015-09-22 15:52:07 +02:00
72105b2aed fixed the computation of the data size when cloning a column 2015-09-22 10:27:17 +02:00
9ab8a42340 fixed the mkdir permission mode that forced the user to use sudo to
write in the created directories
2015-09-21 15:48:02 +02:00
7b606c0477 Column files now always have a size that is a multiple of the page size,
and the function that enlarges mapped column files tries mapping on next
byte before unmapping/mapping again
2015-09-21 15:42:29 +02:00
7d7dbb1bf9 Handling of single char columns 2015-09-18 15:49:15 +02:00
34a3717fb9 The cython function to open a column is now a method of the OBIDMS class 2015-09-17 11:23:50 +02:00
2698022aaf New OBIDMS method to list the columns of an OBIDMS 2015-09-15 17:09:31 +02:00
90bf15186e Method to get a column's number of lines used 2015-09-14 17:33:40 +02:00
5c3bc03bd2 Added the handling of errors with the functions to get a value in a
column
2015-09-14 17:04:29 +02:00
0e50fbf706 Merge branch 'master' of git@git.metabarcoding.org:obitools/obitools3.git 2015-09-03 08:38:57 -04:00
f2231770f1 If an error occurs when cloning a column, the new cloned file is now
deleted
2015-09-02 13:06:21 +02:00
f5937ec019 Deplacement des sources de la doc... 2015-09-02 04:50:50 -04:00
52e3f2ce4a Columns that are cloned are now closed after cloning 2015-09-02 10:36:00 +02:00
f0dffaff13 Merge branch 'master' of git@git.metabarcoding.org:obitools/obitools3.git 2015-09-02 10:15:18 +02:00
f31d8983bb Columns are now enlarged if needed when setting a value 2015-09-01 17:38:08 +02:00
a6395ebaf2 Transfert the distutil.ext from the org.asm project 2015-08-27 16:12:19 -04:00
3b7536c0df The number of lines used is updated in the header when a value is set,
and the iteration on the values of a column is done on the number of
lines used (instead of on the total line count).
2015-08-26 17:25:15 +02:00
34ade161de added an obidebug() line 2015-08-26 17:05:37 +02:00
48d10ea17c Functions to truncate and/or close a column 2015-08-26 17:01:54 +02:00
fdc3b96beb Added cython subclasses for columns with _writable or _read for each
data type. Trying to set a value on a _read subclass raises an
exception.
2015-08-26 15:34:12 +02:00
95dbeb25a0 Added functions to get and set values in columns using the element name
(for columns with lines made of vectors of elements), for all data types
2015-08-26 15:05:23 +02:00
9d91e907e5 When a column is created, its data is initialized to the NA value of the
obitype
2015-08-26 12:00:38 +02:00
3f81dc0173 Changed the variable name 'nb_elements' to 'nb_lines' for better clarity 2015-08-26 10:52:19 +02:00
8ff3488926 Fix: The number of lines used is set when cloning a column with its data 2015-08-26 10:38:07 +02:00
611699252e Files and functions for columns with the data type OBIChar_t, working
but using char* (needs to be changed)
2015-08-26 10:31:56 +02:00
9ad31fddff Added the possibility to clone a column, with or without its data 2015-08-26 10:29:07 +02:00
e6d96d999a Activated the html annotated files output in setup.py 2015-08-12 15:47:13 +02:00
eaca9c85f1 Files and functions for OBIDMS columns with the data type OBI_BOOL 2015-08-11 11:00:59 +02:00
bc2ca89088 Basis for OBIDMS columns with the type OBI_IDX 2015-08-10 16:30:55 +02:00
a5ed8f0ef0 A basic obicount for testing purposes 2015-08-10 16:06:05 +02:00
3b3267db9a Removed a useless #include 2015-08-10 16:05:34 +02:00
d53c16cf3e Functions and files for OBIDMS columns with the type OBI_FLOAT 2015-08-10 16:04:53 +02:00
66f397239b Commented __del__ functions for now, as they contain closedir() that can
create problems
2015-08-03 16:15:26 +02:00
1fe2e36d5d Fixed a critical bug where the elements names were not stored correctly 2015-08-03 16:11:55 +02:00
ab44e32afb More efficient obi_column_set_int() and obi_column_get_int() 2015-08-03 15:26:28 +02:00
41f627091f Error handling: obidebug() with message for all errors, and removed
commands closing directories when an error occurred (creating more
errors).
2015-08-03 15:10:39 +02:00
1e01c9059c functions to get data type as a character string 2015-08-03 11:46:21 +02:00
5f62cd8526 C sources modified to add the handling of OBIDM columns with the type
OBI_INT, and the handling of multiple types in general
2015-07-31 18:03:48 +02:00
a6abc74500 Cython sources modified to add the handling of OBIDMS columns with the
type OBI_INT
2015-07-31 18:02:40 +02:00
5c674715ee added the idea of each line of a column corresponding to a vector, with
the added informations of the number of elements per line and the
elements' names in the column's header structure
2015-07-20 16:08:50 +02:00
484fcca557 changed the private *at functions to the official ones for openat and
mkdirat, but not for opendirat, as there is no official version of it.
2015-07-20 11:38:43 +02:00
802f9983c2 fixing file names in list of C files after changing 'column groups' to
'column directories'
2015-07-20 11:13:56 +02:00
d26abc438d fixing an 'error' I had introduced to test the debugging system 2015-07-20 11:12:48 +02:00
2162bf4272 obidebug.h for debugging tools 2015-06-26 17:56:15 +02:00
c454f9612e changed 'column groups' to be called 'column directories' for now. 2015-06-26 17:53:03 +02:00
71cd59f775 close file descriptors that don't refer to a directory (as those can not
be reopened)
2015-06-24 13:53:12 +02:00
719ef61461 changed file names 2015-06-23 18:36:20 +02:00
152b34b5f4 Closes #10 : column groups stored in directories 2015-06-23 18:35:34 +02:00
61b6c3ce83 Added OBI errnos but work in progress 2015-06-23 17:56:21 +02:00
0a78a786c1 fixed replacement *at functions and made get_full_path() function public 2015-06-23 17:55:00 +02:00
e6e68786f7 private *at functions (openat, mkdirat, opendirat, closedirat) 2015-06-18 17:19:23 +02:00
58e7066b9f updated and fixed the documentation 2015-06-17 17:17:54 +02:00
ed662077b3 fixed bugs and typos in the obidms and obidmscolumn source and header
files
2015-06-17 16:51:16 +02:00
9f9c20aa4a new OBIDMS_column cython class 2015-06-17 16:49:49 +02:00
73ba9fd4c3 updated the OBIDMS class 2015-06-17 16:48:56 +02:00
771be89103 updated the documentation (replaced 'release files' with 'version
files')
2015-06-17 16:45:04 +02:00
f740557d96 fixes #9 : replacement function for openat(). 2015-06-17 14:55:05 +02:00
aff74c72dd updated the documentation 2015-06-10 15:41:14 +02:00
5d967f5b7b fixed typos and other minor things 2015-06-10 15:19:02 +02:00
9b6de9c411 Attempt at defining some naming conventions. 2015-05-28 17:54:11 +02:00
27e246e709 updated the documentation 2015-05-28 17:03:50 +02:00
ff64d1b2e9 deleting a useless file 2015-05-28 16:35:43 +02:00
235a81a534 deleting test data file 2015-05-28 16:35:03 +02:00
4759e1cf54 deleting the Breathe sources 2015-05-28 16:34:24 +02:00
f4a123cd17 updated the documentation with the special values, and the idea of
column directories and column group directories.
2015-05-28 16:33:45 +02:00
245 changed files with 23070 additions and 14918 deletions

518
LICENSE Normal file
View File

@ -0,0 +1,518 @@
CeCILL FREE SOFTWARE LICENSE AGREEMENT
Version 2.1 dated 2013-06-21
Notice
This Agreement is a Free Software license agreement that is the result
of discussions between its authors in order to ensure compliance with
the two main principles guiding its drafting:
* firstly, compliance with the principles governing the distribution
of Free Software: access to source code, broad rights granted to users,
* secondly, the election of a governing law, French law, with which it
is conformant, both as regards the law of torts and intellectual
property law, and the protection that it offers to both authors and
holders of the economic rights over software.
The authors of the CeCILL (for Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre])
license are:
Commissariat à l'énergie atomique et aux énergies alternatives - CEA, a
public scientific, technical and industrial research establishment,
having its principal place of business at 25 rue Leblanc, immeuble Le
Ponant D, 75015 Paris, France.
Centre National de la Recherche Scientifique - CNRS, a public scientific
and technological establishment, having its principal place of business
at 3 rue Michel-Ange, 75794 Paris cedex 16, France.
Institut National de Recherche en Informatique et en Automatique -
Inria, a public scientific and technological establishment, having its
principal place of business at Domaine de Voluceau, Rocquencourt, BP
105, 78153 Le Chesnay cedex, France.
Preamble
The purpose of this Free Software license agreement is to grant users
the right to modify and redistribute the software governed by this
license within the framework of an open source distribution model.
The exercising of this right is conditional upon certain obligations for
users so as to preserve this status for all subsequent redistributions.
In consideration of access to the source code and the rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors only have limited liability.
In this respect, the risks associated with loading, using, modifying
and/or developing or reproducing the software by the user are brought to
the user's attention, given its Free Software status, which may make it
complicated to use, with the result that its use is reserved for
developers and experienced professionals having in-depth computer
knowledge. Users are therefore encouraged to load and test the
suitability of the software as regards their requirements in conditions
enabling the security of their systems and/or data to be ensured and,
more generally, to use and operate it in the same conditions of
security. This Agreement may be freely reproduced and published,
provided it is not altered, and that no provisions are either added or
removed herefrom.
This Agreement may apply to any or all software for which the holder of
the economic rights decides to submit the use thereof to its provisions.
Frequently asked questions can be found on the official website of the
CeCILL licenses family (http://www.cecill.info/index.en.html) for any
necessary clarification.
Article 1 - DEFINITIONS
For the purpose of this Agreement, when the following expressions
commence with a capital letter, they shall have the following meaning:
Agreement: means this license agreement, and its possible subsequent
versions and annexes.
Software: means the software in its Object Code and/or Source Code form
and, where applicable, its documentation, "as is" when the Licensee
accepts the Agreement.
Initial Software: means the Software in its Source Code and possibly its
Object Code form and, where applicable, its documentation, "as is" when
it is first distributed under the terms and conditions of the Agreement.
Modified Software: means the Software modified by at least one
Contribution.
Source Code: means all the Software's instructions and program lines to
which access is required so as to modify the Software.
Object Code: means the binary files originating from the compilation of
the Source Code.
Holder: means the holder(s) of the economic rights over the Initial
Software.
Licensee: means the Software user(s) having accepted the Agreement.
Contributor: means a Licensee having made at least one Contribution.
Licensor: means the Holder, or any other individual or legal entity, who
distributes the Software under the Agreement.
Contribution: means any or all modifications, corrections, translations,
adaptations and/or new functions integrated into the Software by any or
all Contributors, as well as any or all Internal Modules.
Module: means a set of sources files including their documentation that
enables supplementary functions or services in addition to those offered
by the Software.
External Module: means any or all Modules, not derived from the
Software, so that this Module and the Software run in separate address
spaces, with one calling the other when they are run.
Internal Module: means any or all Module, connected to the Software so
that they both execute in the same address space.
GNU GPL: means the GNU General Public License version 2 or any
subsequent version, as published by the Free Software Foundation Inc.
GNU Affero GPL: means the GNU Affero General Public License version 3 or
any subsequent version, as published by the Free Software Foundation Inc.
EUPL: means the European Union Public License version 1.1 or any
subsequent version, as published by the European Commission.
Parties: mean both the Licensee and the Licensor.
These expressions may be used both in singular and plural form.
Article 2 - PURPOSE
The purpose of the Agreement is the grant by the Licensor to the
Licensee of a non-exclusive, transferable and worldwide license for the
Software as set forth in Article 5 <#scope> hereinafter for the whole
term of the protection granted by the rights over said Software.
Article 3 - ACCEPTANCE
3.1 The Licensee shall be deemed as having accepted the terms and
conditions of this Agreement upon the occurrence of the first of the
following events:
* (i) loading the Software by any or all means, notably, by
downloading from a remote server, or by loading from a physical medium;
* (ii) the first time the Licensee exercises any of the rights granted
hereunder.
3.2 One copy of the Agreement, containing a notice relating to the
characteristics of the Software, to the limited warranty, and to the
fact that its use is restricted to experienced users has been provided
to the Licensee prior to its acceptance as set forth in Article 3.1
<#accepting> hereinabove, and the Licensee hereby acknowledges that it
has read and understood it.
Article 4 - EFFECTIVE DATE AND TERM
4.1 EFFECTIVE DATE
The Agreement shall become effective on the date when it is accepted by
the Licensee as set forth in Article 3.1 <#accepting>.
4.2 TERM
The Agreement shall remain in force for the entire legal term of
protection of the economic rights over the Software.
Article 5 - SCOPE OF RIGHTS GRANTED
The Licensor hereby grants to the Licensee, who accepts, the following
rights over the Software for any or all use, and for the term of the
Agreement, on the basis of the terms and conditions set forth hereinafter.
Besides, if the Licensor owns or comes to own one or more patents
protecting all or part of the functions of the Software or of its
components, the Licensor undertakes not to enforce the rights granted by
these patents against successive Licensees using, exploiting or
modifying the Software. If these patents are transferred, the Licensor
undertakes to have the transferees subscribe to the obligations set
forth in this paragraph.
5.1 RIGHT OF USE
The Licensee is authorized to use the Software, without any limitation
as to its fields of application, with it being hereinafter specified
that this comprises:
1. permanent or temporary reproduction of all or part of the Software
by any or all means and in any or all form.
2. loading, displaying, running, or storing the Software on any or all
medium.
3. entitlement to observe, study or test its operation so as to
determine the ideas and principles behind any or all constituent
elements of said Software. This shall apply when the Licensee
carries out any or all loading, displaying, running, transmission or
storage operation as regards the Software, that it is entitled to
carry out hereunder.
5.2 ENTITLEMENT TO MAKE CONTRIBUTIONS
The right to make Contributions includes the right to translate, adapt,
arrange, or make any or all modifications to the Software, and the right
to reproduce the resulting software.
The Licensee is authorized to make any or all Contributions to the
Software provided that it includes an explicit notice that it is the
author of said Contribution and indicates the date of the creation thereof.
5.3 RIGHT OF DISTRIBUTION
In particular, the right of distribution includes the right to publish,
transmit and communicate the Software to the general public on any or
all medium, and by any or all means, and the right to market, either in
consideration of a fee, or free of charge, one or more copies of the
Software by any means.
The Licensee is further authorized to distribute copies of the modified
or unmodified Software to third parties according to the terms and
conditions set forth hereinafter.
5.3.1 DISTRIBUTION OF SOFTWARE WITHOUT MODIFICATION
The Licensee is authorized to distribute true copies of the Software in
Source Code or Object Code form, provided that said distribution
complies with all the provisions of the Agreement and is accompanied by:
1. a copy of the Agreement,
2. a notice relating to the limitation of both the Licensor's warranty
and liability as set forth in Articles 8 and 9,
and that, in the event that only the Object Code of the Software is
redistributed, the Licensee allows effective access to the full Source
Code of the Software for a period of at least three years from the
distribution of the Software, it being understood that the additional
acquisition cost of the Source Code shall not exceed the cost of the
data transfer.
5.3.2 DISTRIBUTION OF MODIFIED SOFTWARE
When the Licensee makes a Contribution to the Software, the terms and
conditions for the distribution of the resulting Modified Software
become subject to all the provisions of this Agreement.
The Licensee is authorized to distribute the Modified Software, in
source code or object code form, provided that said distribution
complies with all the provisions of the Agreement and is accompanied by:
1. a copy of the Agreement,
2. a notice relating to the limitation of both the Licensor's warranty
and liability as set forth in Articles 8 and 9,
and, in the event that only the object code of the Modified Software is
redistributed,
3. a note stating the conditions of effective access to the full source
code of the Modified Software for a period of at least three years
from the distribution of the Modified Software, it being understood
that the additional acquisition cost of the source code shall not
exceed the cost of the data transfer.
5.3.3 DISTRIBUTION OF EXTERNAL MODULES
When the Licensee has developed an External Module, the terms and
conditions of this Agreement do not apply to said External Module, that
may be distributed under a separate license agreement.
5.3.4 COMPATIBILITY WITH OTHER LICENSES
The Licensee can include a code that is subject to the provisions of one
of the versions of the GNU GPL, GNU Affero GPL and/or EUPL in the
Modified or unmodified Software, and distribute that entire code under
the terms of the same version of the GNU GPL, GNU Affero GPL and/or EUPL.
The Licensee can include the Modified or unmodified Software in a code
that is subject to the provisions of one of the versions of the GNU GPL,
GNU Affero GPL and/or EUPL and distribute that entire code under the
terms of the same version of the GNU GPL, GNU Affero GPL and/or EUPL.
Article 6 - INTELLECTUAL PROPERTY
6.1 OVER THE INITIAL SOFTWARE
The Holder owns the economic rights over the Initial Software. Any or
all use of the Initial Software is subject to compliance with the terms
and conditions under which the Holder has elected to distribute its work
and no one shall be entitled to modify the terms and conditions for the
distribution of said Initial Software.
The Holder undertakes that the Initial Software will remain ruled at
least by this Agreement, for the duration set forth in Article 4.2 <#term>.
6.2 OVER THE CONTRIBUTIONS
The Licensee who develops a Contribution is the owner of the
intellectual property rights over this Contribution as defined by
applicable law.
6.3 OVER THE EXTERNAL MODULES
The Licensee who develops an External Module is the owner of the
intellectual property rights over this External Module as defined by
applicable law and is free to choose the type of agreement that shall
govern its distribution.
6.4 JOINT PROVISIONS
The Licensee expressly undertakes:
1. not to remove, or modify, in any manner, the intellectual property
notices attached to the Software;
2. to reproduce said notices, in an identical manner, in the copies of
the Software modified or not.
The Licensee undertakes not to directly or indirectly infringe the
intellectual property rights on the Software of the Holder and/or
Contributors, and to take, where applicable, vis-à-vis its staff, any
and all measures required to ensure respect of said intellectual
property rights of the Holder and/or Contributors.
Article 7 - RELATED SERVICES
7.1 Under no circumstances shall the Agreement oblige the Licensor to
provide technical assistance or maintenance services for the Software.
However, the Licensor is entitled to offer this type of services. The
terms and conditions of such technical assistance, and/or such
maintenance, shall be set forth in a separate instrument. Only the
Licensor offering said maintenance and/or technical assistance services
shall incur liability therefor.
7.2 Similarly, any Licensor is entitled to offer to its licensees, under
its sole responsibility, a warranty, that shall only be binding upon
itself, for the redistribution of the Software and/or the Modified
Software, under terms and conditions that it is free to decide. Said
warranty, and the financial terms and conditions of its application,
shall be subject of a separate instrument executed between the Licensor
and the Licensee.
Article 8 - LIABILITY
8.1 Subject to the provisions of Article 8.2, the Licensee shall be
entitled to claim compensation for any direct loss it may have suffered
from the Software as a result of a fault on the part of the relevant
Licensor, subject to providing evidence thereof.
8.2 The Licensor's liability is limited to the commitments made under
this Agreement and shall not be incurred as a result of in particular:
(i) loss due the Licensee's total or partial failure to fulfill its
obligations, (ii) direct or consequential loss that is suffered by the
Licensee due to the use or performance of the Software, and (iii) more
generally, any consequential loss. In particular the Parties expressly
agree that any or all pecuniary or business loss (i.e. loss of data,
loss of profits, operating loss, loss of customers or orders,
opportunity cost, any disturbance to business activities) or any or all
legal proceedings instituted against the Licensee by a third party,
shall constitute consequential loss and shall not provide entitlement to
any or all compensation from the Licensor.
Article 9 - WARRANTY
9.1 The Licensee acknowledges that the scientific and technical
state-of-the-art when the Software was distributed did not enable all
possible uses to be tested and verified, nor for the presence of
possible defects to be detected. In this respect, the Licensee's
attention has been drawn to the risks associated with loading, using,
modifying and/or developing and reproducing the Software which are
reserved for experienced users.
The Licensee shall be responsible for verifying, by any or all means,
the suitability of the product for its requirements, its good working
order, and for ensuring that it shall not cause damage to either persons
or properties.
9.2 The Licensor hereby represents, in good faith, that it is entitled
to grant all the rights over the Software (including in particular the
rights set forth in Article 5 <#scope>).
9.3 The Licensee acknowledges that the Software is supplied "as is" by
the Licensor without any other express or tacit warranty, other than
that provided for in Article 9.2 <#good-faith> and, in particular,
without any warranty as to its commercial value, its secured, safe,
innovative or relevant nature.
Specifically, the Licensor does not warrant that the Software is free
from any error, that it will operate without interruption, that it will
be compatible with the Licensee's own equipment and software
configuration, nor that it will meet the Licensee's requirements.
9.4 The Licensor does not either expressly or tacitly warrant that the
Software does not infringe any third party intellectual property right
relating to a patent, software or any other property right. Therefore,
the Licensor disclaims any and all liability towards the Licensee
arising out of any or all proceedings for infringement that may be
instituted in respect of the use, modification and redistribution of the
Software. Nevertheless, should such proceedings be instituted against
the Licensee, the Licensor shall provide it with technical and legal
expertise for its defense. Such technical and legal expertise shall be
decided on a case-by-case basis between the relevant Licensor and the
Licensee pursuant to a memorandum of understanding. The Licensor
disclaims any and all liability as regards the Licensee's use of the
name of the Software. No warranty is given as regards the existence of
prior rights over the name of the Software or as regards the existence
of a trademark.
Article 10 - TERMINATION
10.1 In the event of a breach by the Licensee of its obligations
hereunder, the Licensor may automatically terminate this Agreement
thirty (30) days after notice has been sent to the Licensee and has
remained ineffective.
10.2 A Licensee whose Agreement is terminated shall no longer be
authorized to use, modify or distribute the Software. However, any
licenses that it may have granted prior to termination of the Agreement
shall remain valid subject to their having been granted in compliance
with the terms and conditions hereof.
Article 11 - MISCELLANEOUS
11.1 EXCUSABLE EVENTS
Neither Party shall be liable for any or all delay, or failure to
perform the Agreement, that may be attributable to an event of force
majeure, an act of God or an outside cause, such as defective
functioning or interruptions of the electricity or telecommunications
networks, network paralysis following a virus attack, intervention by
government authorities, natural disasters, water damage, earthquakes,
fire, explosions, strikes and labor unrest, war, etc.
11.2 Any failure by either Party, on one or more occasions, to invoke
one or more of the provisions hereof, shall under no circumstances be
interpreted as being a waiver by the interested Party of its right to
invoke said provision(s) subsequently.
11.3 The Agreement cancels and replaces any or all previous agreements,
whether written or oral, between the Parties and having the same
purpose, and constitutes the entirety of the agreement between said
Parties concerning said purpose. No supplement or modification to the
terms and conditions hereof shall be effective as between the Parties
unless it is made in writing and signed by their duly authorized
representatives.
11.4 In the event that one or more of the provisions hereof were to
conflict with a current or future applicable act or legislative text,
said act or legislative text shall prevail, and the Parties shall make
the necessary amendments so as to comply with said act or legislative
text. All other provisions shall remain effective. Similarly, invalidity
of a provision of the Agreement, for any reason whatsoever, shall not
cause the Agreement as a whole to be invalid.
11.5 LANGUAGE
The Agreement is drafted in both French and English and both versions
are deemed authentic.
Article 12 - NEW VERSIONS OF THE AGREEMENT
12.1 Any person is authorized to duplicate and distribute copies of this
Agreement.
12.2 So as to ensure coherence, the wording of this Agreement is
protected and may only be modified by the authors of the License, who
reserve the right to periodically publish updates or new versions of the
Agreement, each with a separate number. These subsequent versions may
address new issues encountered by Free Software.
12.3 Any Software distributed under a given version of the Agreement may
only be subsequently distributed under the same version of the Agreement
or a subsequent version, subject to the provisions of Article 5.3.4
<#compatibility>.
Article 13 - GOVERNING LAW AND JURISDICTION
13.1 The Agreement is governed by French law. The Parties agree to
endeavor to seek an amicable solution to any disagreements or disputes
that may arise during the performance of the Agreement.
13.2 Failing an amicable solution within two (2) months as from their
occurrence, and unless emergency proceedings are necessary, the
disagreements or disputes shall be referred to the Paris Courts having
jurisdiction, by the more diligent Party.

40
README.md Normal file
View File

@ -0,0 +1,40 @@
The `OBITools3`: A package for the management of analyses and data in DNA metabarcoding
---------------------------------------------
DNA metabarcoding offers new perspectives for biodiversity research [1]. This approach of ecosystem studies relies heavily on the use of Next-Generation Sequencing (NGS), and consequently requires the ability to to treat large volumes of data. The `OBITools` package satisfies this requirement thanks to a set of programs specifically designed for analyzing NGS data in a DNA metabarcoding context [2] - <http://metabarcoding.org/obitools>. Their capacity to filter and edit sequences while taking into account taxonomic annotation helps to setup tailored-made analysis pipelines for a broad range of DNA metabarcoding applications, including biodiversity surveys or diet analyses.
**The `OBITools3`.** This new version of the `OBITools` looks to significantly improve the storage efficiency and the data processing speed. To this end, the `OBITools3` rely on an ad hoc database system, inside which all the data that a DNA metabarcoding experiment must consider is stored: the sequences, the metadata (describing for instance the samples), the database containing the reference sequences used for the taxonomic annotation, as well as the taxonomic databases. Besides the gain in efficiency, this new structure allows an easier access to all the data associated with an experiment.
**Column-oriented storage.** An analysis pipeline corresponds to a succession of commands, each computing one step of the analysis, and where the result of the command *n* is used by the command *n+1*. DNA metabarcoding data can easily be represented in the form of tables, and each command can be regarded as an operation transforming one or several 'input' tables into one or several 'output' tables, which can be used by the next command. Many of the basic operations in a pipeline copy without modification an important part of the input tables to the result tables, and use for their calculations only a small part of the input data. In the original `OBITools`, those tables are kept in the form of annotated sequence files in the FASTA or FASTQ format. This has two consequences: i) keeping the transitional results of the analysis pipeline means using disk space for an important volume of redundant data, ii) The coding and decoding of informations that are not actually used represent an important part of the treatment process. The new database system used by the `OBITools3` (called DMS for Data Management System) relies on column-oriented storage. The columns are immutable and can be assembled in views representing the data tables. This way, the data not modified by a command in an input table can easily be associated to the result table without duplicating any information ; and the data not used at all by a command can be associated with the result table without being read. This strategy results in a gain in disk space efficiency by limiting data redundancy, as well as a gain in execution time by limiting data reading, writing and conversion operations. Finally, as a mean to optimize data access, each column is stored in a binary file directly mapped in memory for reading and writing operations.
**Storage optimization.** DNA metabarcoding data is intrinsically very redundant. For example, the same sequence corresponding to a species will be present several thousand times across all samples. In order to limit the disk space used and make comparison operations more efficient, data in the form of character strings is stored in columns using a complex indexing structure, efficient on millions of values, coupling hash functions, Bloom filters and AVL trees. Finally, DNA sequences are compressed by encoding each nucleotide on two or four bits depending on whether the sequences contain only the four nucleotides (A, C, G, T) or use the IUPAC codes.
**Saving the data processing history.** The totality of the informations used by the `OBITools3` is stored in immutable data structures in the DMS. If a command has to modify a column used as input to produce its result, a new version of that column is created, leaving the initial version intact. This storage system enables to keep, at minimal cost, the totality of the transitional results produced by the pipeline. The storage of metadata describing all the operations that have produced a view (a result table) in the DMS makes possible the creation of an oriented hypergraph, where each node corresponds to a view and each arrow to an operation. By retracing the dependency relationships in this hypergraph, it is possible to rebuild *a posteriori* the entirety of the process that has produced a result table.
**Tools.** The `OBITools3` offer the same tools as the original `OBITools`. Eventually, new versions of `ecoPrimers` (PCR primer design) [3], `ecoPCR` (*in silico* PCR) [4], as well as `Sumatra` (sequence alignment) and `Sumaclust` (sequence alignment and clustering) [5] will be added, taking advantage of the database structure developed for the `OBITools3`.
**Implementation and disponibility.** The lower layers managing the DMS as well as all the compute-intensive functions are coded in `C99` for efficiency reasons. A `Cython` (<http://www.cython.org>) object layer allows for a simple but efficient implementation of the `OBITools3` commands in `Python 3.5`. The `OBITools3` are still in development, and the first functional versions are expected for autumn 2016.
**References.**
1. Taberlet P, Coissac E, Hajibabaei M, Rieseberg LH: Environmental DNA. Mol Ecol 2012:17891793.
2. Boyer F, Mercier C, Bonin A, Le Bras Y, Taberlet P, Coissac E: OBITools: a Unix-inspired software package for DNA metabarcoding. Mol Ecol Resour 2015:n/an/a.
3. Riaz T, Shehzad W, Viari A, Pompanon F, Taberlet P, Coissac E: ecoPrimers: inference of new DNA barcode markers from whole genome sequence analysis. Nucleic Acids Res 2011, 39:e145.
4. Ficetola GF, Coissac E, Zundel S, Riaz T, Shehzad W, Bessière J, Taberlet P, Pompanon F: An in silico approach for the evaluation of DNA barcodes. BMC Genomics 2010, 11:434.
5. Mercier C, Boyer F, Bonin A, Coissac E (2013) SUMATRA and SUMACLUST: fast and exact comparison and clustering of sequences. Available: <http://metabarcoding.org/sumatra> and <http://metabarcoding.org/sumaclust>

View File

@ -10,33 +10,29 @@ from obidistutils.serenity.checksystem import is_mac_system
class build(ori_build):
def has_ctools(self):
return self.distribution.has_ctools()
def has_files(self):
return self.distribution.has_files()
def has_executables(self):
return self.distribution.has_executables()
def has_ext_modules(self):
return self.distribution.has_ext_modules()
def has_littlebigman(self):
return True
def has_pidname(self):
return is_mac_system()
def has_doc(self):
return True
def has_littlebigman(self):
return True
sub_commands = [('littlebigman', has_littlebigman),
('pidname',has_pidname),
('build_ctools', has_ctools),
('build_files', has_files),
('build_cexe', has_executables)] \
+ ori_build.sub_commands + \
[('build_sphinx',has_doc)]
try:
from obidistutils.command.build_sphinx import build_sphinx # @UnusedImport
sub_commands = [("littlebigman",has_littlebigman),
('pidname',has_pidname)
] \
+ ori_build.sub_commands + \
[('build_sphinx',has_doc)]
except ImportError:
sub_commands = [("littlebigman",has_littlebigman),
('pidname',has_pidname)
] \
+ ori_build.sub_commands

View File

@ -4,10 +4,11 @@ Created on 20 oct. 2012
@author: coissac
'''
from obidistutils.command.build_ctools import build_ctools
from .build_ctools import build_ctools
from .build_exe import build_exe
from distutils.errors import DistutilsSetupError
from distutils import log
import os
class build_cexe(build_ctools):
@ -38,7 +39,9 @@ class build_cexe(build_ctools):
self.set_undefined_options('build_files',
('files', 'built_files'))
self.executables = self.distribution.executables
self.executables = self.distribution.executables
# self.build_cexe = os.path.join(os.path.dirname(self.build_cexe),'cbinaries')
# self.mkpath(self.build_cexe)
if self.executables:
self.check_executable_list(self.executables)
@ -70,4 +73,13 @@ class build_cexe(build_ctools):
log.info("%s ok",message)
return sources
def run(self):
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
build_exe.run(self)

View File

@ -5,7 +5,8 @@ Created on 20 oct. 2012
'''
from obidistutils.command.build_exe import build_exe
from .build_exe import build_exe
from distutils import log
class build_ctools(build_exe):
description = "build C/C++ executable not distributed with Python extensions"
@ -37,19 +38,26 @@ class build_ctools(build_exe):
self.executables = self.distribution.ctools
self.check_executable_list(self.executables)
if self.littlebigman =='-DLITTLE_END':
if self.define is None:
self.define=[('LITTLE_END',None)]
else:
self.define.append('LITTLE_END',None)
log.info('Look for CPU architecture... %s',self.define)
self.ctools = set()
def run(self):
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
build_exe.run(self)
for e,p in self.executables: # @UnusedVariable
self.ctools.add(e)

View File

@ -208,3 +208,4 @@ class build_exe(Command):
output_dir=self.build_cexe,
debug=self.debug)

View File

@ -7,87 +7,109 @@ Created on 13 fevr. 2014
from distutils import log
import os
from Cython.Distutils import build_ext as ori_build_ext # @UnresolvedImport
from distutils.errors import DistutilsSetupError
class build_ext(ori_build_ext):
def modifyDocScripts(self):
build_dir_file=open("doc/build_dir.txt","w")
print(self.build_lib,file=build_dir_file)
build_dir_file.close()
def initialize_options(self):
ori_build_ext.initialize_options(self) # @UndefinedVariable
self.littlebigman = None
self.built_files = None
try:
from Cython.Distutils import build_ext as ori_build_ext # @UnresolvedImport
from Cython.Compiler import Options as cython_options # @UnresolvedImport
class build_ext(ori_build_ext):
def finalize_options(self):
ori_build_ext.finalize_options(self) # @UndefinedVariable
self.set_undefined_options('littlebigman',
('littlebigman', 'littlebigman'))
self.set_undefined_options('build_files',
('files', 'built_files'))
self.cython_c_in_temp = 1
if self.littlebigman =='-DLITTLE_END':
if self.define is None:
self.define=[('LITTLE_END',None)]
else:
self.define.append('LITTLE_END',None)
def substitute_sources(self,exe_name,sources):
"""
Substitutes source file name starting by an @ by the actual
name of the built file (see --> build_files)
"""
sources = list(sources)
for i in range(len(sources)):
message = "%s :-> %s" % (exe_name,sources[i])
if sources[i][0]=='@':
try:
filename = self.built_files[sources[i][1:]]
except KeyError:
tmpfilename = os.path.join(self.build_temp,sources[i][1:])
if os.path.isfile (tmpfilename):
filename = tmpfilename
else:
raise DistutilsSetupError(
'The %s filename declared in the source '
'files of the program %s have not been '
'built by the installation process' % (sources[i],
exe_name))
sources[i]=filename
log.info("%s changed to %s",message,filename)
else:
log.info("%s ok",message)
return sources
def build_extensions(self):
# First, sanity-check the 'extensions' list
for ext in self.extensions:
ext.sources = self.substitute_sources(ext.name,ext.sources)
def modifyDocScripts(self):
build_dir_file=open("doc/sphinx/build_dir.txt","w")
print(self.build_lib,file=build_dir_file)
build_dir_file.close()
self.check_extensions_list(self.extensions)
for ext in self.extensions:
log.info("%s :-> %s",ext.name,ext.sources)
ext.sources = self.cython_sources(ext.sources, ext)
self.build_extension(ext)
def initialize_options(self):
ori_build_ext.initialize_options(self) # @UndefinedVariable
self.littlebigman = None
self.built_files = None
def run(self):
self.modifyDocScripts()
ori_build_ext.run(self) # @UndefinedVariable
def finalize_options(self):
ori_build_ext.finalize_options(self) # @UndefinedVariable
self.set_undefined_options('littlebigman',
('littlebigman', 'littlebigman'))
self.set_undefined_options('build_files',
('files', 'built_files'))
self.cython_c_in_temp = 1
if self.littlebigman =='-DLITTLE_END':
if self.define is None:
self.define=[('LITTLE_END',None)]
else:
self.define.append('LITTLE_END',None)
def substitute_sources(self,exe_name,sources):
"""
Substitutes source file name starting by an @ by the actual
name of the built file (see --> build_files)
"""
sources = list(sources)
for i in range(len(sources)):
message = "%s :-> %s" % (exe_name,sources[i])
if sources[i][0]=='@':
try:
filename = self.built_files[sources[i][1:]]
except KeyError:
tmpfilename = os.path.join(self.build_temp,sources[i][1:])
if os.path.isfile (tmpfilename):
filename = tmpfilename
else:
raise DistutilsSetupError(
'The %s filename declared in the source '
'files of the program %s have not been '
'built by the installation process' % (sources[i],
exe_name))
sources[i]=filename
log.info("%s changed to %s",message,filename)
else:
log.info("%s ok",message)
return sources
def build_extensions(self):
# First, sanity-check the 'extensions' list
for ext in self.extensions:
ext.sources = self.substitute_sources(ext.name,ext.sources)
self.check_extensions_list(self.extensions)
for ext in self.extensions:
log.info("%s :-> %s",ext.name,ext.sources)
ext.sources = self.cython_sources(ext.sources, ext)
self.build_extension(ext)
def run(self):
self.modifyDocScripts()
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
cython_options.annotate = True
ori_build_ext.run(self) # @UndefinedVariable
def has_files(self):
return self.distribution.has_files()
def has_executables(self):
return self.distribution.has_executables()
sub_commands = [('build_files',has_files),
('build_cexe', has_executables)
] + \
ori_build_ext.sub_commands
except ImportError:
from distutils.command import build_ext # @UnusedImport

View File

@ -28,6 +28,10 @@ class build_files(Command):
self.files = {}
def run(self):
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
for dest,prog,command in self.distribution.files:
destfile = os.path.join(self.build_temp,dest)
@ -48,6 +52,12 @@ class build_files(Command):
log.info("Done.\n")
def has_ctools(self):
return self.distribution.has_ctools()
sub_commands = [('build_ctools', has_ctools)] + \
Command.sub_commands

View File

@ -6,14 +6,14 @@ Created on 20 oct. 2012
import os.path
from distutils.command.build_scripts import build_scripts as ori_build_scripts,\
first_line_re
from distutils.command.build_scripts import build_scripts as ori_build_scripts
from distutils.util import convert_path
from distutils import log, sysconfig
from distutils.dep_util import newer
from stat import ST_MODE
import re
first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
class build_scripts(ori_build_scripts):

View File

@ -4,23 +4,24 @@ Created on 10 mars 2015
@author: coissac
'''
from sphinx.setup_command import BuildDoc as ori_build_sphinx # @UnresolvedImport
try:
from sphinx.setup_command import BuildDoc as ori_build_sphinx # @UnresolvedImport
class build_sphinx(ori_build_sphinx):
'''Build Sphinx documentation in html, epub and man formats
'''
class build_sphinx(ori_build_sphinx):
'''
Build Sphinx documentation in html, epub and man formats
'''
description = __doc__
def run(self):
self.builder='html'
self.finalize_options()
ori_build_sphinx.run(self)
self.builder='epub'
self.finalize_options()
ori_build_sphinx.run(self)
self.builder='man'
self.finalize_options()
ori_build_sphinx.run(self)
description = __doc__
def run(self):
self.builder='html'
self.finalize_options()
ori_build_sphinx.run(self)
self.builder='epub'
self.finalize_options()
ori_build_sphinx.run(self)
self.builder='man'
self.finalize_options()
ori_build_sphinx.run(self)
except ImportError:
pass

View File

@ -4,11 +4,16 @@ Created on 6 oct. 2014
@author: coissac
'''
# try:
# from setuptools.command.install import install as install_ori
# except ImportError:
# from distutils.command.install import install as install_ori
from distutils.command.install import install as install_ori
class install(install_ori):
def __init__(self,dist):
install_ori.__init__(self, dist)
self.sub_commands.insert(0, ('build',lambda self: True))
# self.sub_commands.insert(0, ('build',lambda self: True))
self.sub_commands.append(('install_sphinx',lambda self: self.distribution.serenity))

View File

@ -3,6 +3,12 @@ Created on 20 oct. 2012
@author: coissac
'''
# try:
# from setuptools.command.install_scripts import install_scripts as ori_install_scripts
# except ImportError:
# from distutils.command.install_scripts import install_scripts as ori_install_scripts
from distutils.command.install_scripts import install_scripts as ori_install_scripts
import os.path
@ -18,12 +24,10 @@ class install_scripts(ori_install_scripts):
def install_public_link(self):
self.mkpath(self.public_dir)
for file in self.get_outputs():
if self.dry_run:
log.info("changing mode of %s", file)
else:
log.info("exporting file %s -> %s", file,os.path.join(self.public_dir,
os.path.split(file)[1]
))
log.info("exporting file %s -> %s", file,os.path.join(self.public_dir,
os.path.split(file)[1]
))
if not self.dry_run:
dest = os.path.join(self.public_dir,
os.path.split(file)[1]
)

View File

@ -43,4 +43,19 @@ class install_sphinx(Command):
self.copy_file(os.path.join(epub),
os.path.join(self.install_doc,os.path.split(epub)[1]))
def get_outputs(self):
directory=os.path.join(self.install_doc,'html')
files = [os.path.join(self.install_doc,'html', f)
for dp, dn, filenames in os.walk(directory) for f in filenames] # @UnusedVariable
directory=os.path.join(self.build_dir,'man')
files.append(os.path.join(self.install_doc,'man','man1', f)
for dp, dn, filenames in os.walk(directory) for f in filenames) # @UnusedVariable
directory=os.path.join(self.build_dir,'epub')
files.append(os.path.join(self.install_doc, f)
for dp, dn, filenames in os.walk(directory) # @UnusedVariable
for f in glob.glob(os.path.join(dp, '*.epub')) )
return files

View File

@ -49,7 +49,7 @@ class littlebigman(build_exe):
shell=True,
stdout=subprocess.PIPE)
little = p.communicate()[0]
return little
return little.decode('latin1')
def run(self):
build_exe.run(self)

View File

@ -41,6 +41,10 @@ class pidname(build_exe):
else:
self.executables = []
# self.build_cexe = os.path.join(os.path.dirname(self.build_cexe),'cbinaries')
# self.mkpath(self.build_cexe)
def run(self):
if is_mac_system():

View File

@ -9,8 +9,11 @@ import os.path
import glob
import sys
# try:
# from setuptools.extension import Extension
# except ImportError:
# from distutils.extension import Extension
from distutils.core import setup as ori_setup
from distutils.extension import Extension
from obidistutils.serenity.checkpackage import install_requirements,\
@ -22,6 +25,7 @@ from obidistutils.serenity.rerun import rerun_with_anothe_python
from distutils import log
from obidistutils.dist import Distribution
from obidistutils.serenity import is_serenity
def findPackage(root,base=None):
@ -62,7 +66,10 @@ def findCython(root,base=None,pyrexs=None):
cfiles = [x for x in cfiles if x[-2:]==".c"]
pyrexs[-1].sources.extend(cfiles)
pyrexs[-1].include_dirs.extend(incdir)
pyrexs[-1].extra_compile_args.extend(['-msse2','-Wno-unused-function'])
pyrexs[-1].extra_compile_args.extend(['-msse2',
'-Wno-unused-function',
'-Wmissing-braces',
'-Wchar-subscripts'])
except IOError:
pass
@ -78,8 +85,8 @@ def rootname(x):
def prepare_commands():
from obidistutils.command.build import build
from obidistutils.command.littlebigman import littlebigman
# from obidistutils.command.serenity import serenity
from obidistutils.command.build_cexe import build_cexe
from obidistutils.command.build_sphinx import build_sphinx
from obidistutils.command.build_ext import build_ext
from obidistutils.command.build_ctools import build_ctools
from obidistutils.command.build_files import build_files
@ -89,8 +96,11 @@ def prepare_commands():
from obidistutils.command.install import install
from obidistutils.command.pidname import pidname
from obidistutils.command.sdist import sdist
COMMANDS = {'build':build,
# 'serenity':serenity,
'littlebigman':littlebigman,
'pidname':pidname,
'build_ctools':build_ctools,
@ -98,12 +108,22 @@ def prepare_commands():
'build_cexe':build_cexe,
'build_ext': build_ext,
'build_scripts':build_scripts,
'build_sphinx':build_sphinx,
'install_scripts':install_scripts,
'install_sphinx':install_sphinx,
'install':install,
'sdist':sdist}
# try:
# from setuptools.commands import egg_info
# COMMANDS['egg_info']=egg_info
# except ImportError:
# pass
try:
from obidistutils.command.build_sphinx import build_sphinx
COMMANDS['build_sphinx']=build_sphinx
except ImportError:
pass
return COMMANDS
@ -139,19 +159,21 @@ def setup(**attrs):
del attrs['requirements']
except KeyError:
pass
enforce_good_python(minversion, maxversion, fork)
if is_serenity():
if (install_requirements(requirementfile)):
rerun_with_anothe_python(sys.executable,minversion,maxversion,fork)
enforce_good_python(minversion, maxversion, fork)
if (install_requirements(requirementfile)):
rerun_with_anothe_python(sys.executable,minversion,maxversion,fork)
try:
check_requirements(requirementfile)
except RequirementError as e :
log.error(e)
sys.exit(1)
try:
check_requirements(requirementfile)
except RequirementError as e :
log.error(e)
sys.exit(1)
if 'distclass' not in attrs:
attrs['distclass']=Distribution
@ -193,5 +215,12 @@ def setup(**attrs):
if 'ext_modules' not in attrs:
attrs['ext_modules'] = EXTENTION
# try:
# from setuptools.core import setup as ori_setup
# except ImportError:
# from distutils.core import setup as ori_setup
from distutils.core import setup as ori_setup
ori_setup(**attrs)

View File

@ -4,6 +4,11 @@ Created on 20 oct. 2012
@author: coissac
'''
# try:
# from setuptools.dist import Distribution as ori_Distribution
# except ImportError:
# from distutils.dist import Distribution as ori_Distribution
from distutils.dist import Distribution as ori_Distribution
class Distribution(ori_Distribution):
@ -29,6 +34,14 @@ class Distribution(ori_Distribution):
"By default the name is PACKAGE-VERSION"
))
def run_commands(self):
"""Run each command that was seen on the setup script command line.
Uses the list of commands found and cache of command objects
created by 'get_command_obj()'.
"""
# self.run_command('littlebigman')
ori_Distribution.run_commands(self)
def has_executables(self):
return self.executables is not None and self.executables

View File

@ -11,8 +11,7 @@ import re
from distutils.errors import DistutilsError
import tempfile
import importlib
import imp
from importlib.util import spec_from_file_location # @UnresolvedImport
import zipimport
import argparse
@ -106,3 +105,8 @@ def serenity_mode(package,version):
return args.serenity
def getVersion(source,main,version):
path = os.path.join(source,main,'%s.py' % version)
spec = spec_from_file_location('version',path)
return spec.loader.load_module().version.strip()

View File

@ -0,0 +1,36 @@
'''
Created on 22 janv. 2016
@author: coissac
'''
import sys
from urllib import request
import os.path
from obidistutils.serenity.util import get_serenity_dir
from obidistutils.serenity.rerun import rerun_with_anothe_python
from obidistutils.serenity.checkpython import is_a_virtualenv_python
getpipurl="https://bootstrap.pypa.io/get-pip.py"
def bootstrap():
getpipfile=os.path.join(get_serenity_dir(),"get-pip.py")
with request.urlopen(getpipurl) as getpip:
with open(getpipfile,"wb") as out:
for l in getpip:
out.write(l)
python = sys.executable
if is_a_virtualenv_python():
command= "%s %s" % (python,getpipfile)
else:
command= "%s %s --user" % (python,getpipfile)
os.system(command)
rerun_with_anothe_python(python)

View File

@ -7,8 +7,13 @@ Created on 2 oct. 2014
import re
import os
import pip # @UnresolvedImport
from pip.utils import get_installed_distributions # @UnresolvedImport
try:
import pip # @UnresolvedImport
from pip.utils import get_installed_distributions # @UnresolvedImport
except ImportError:
from .bootstrappip import bootstrap
bootstrap()
from distutils.version import StrictVersion # @UnusedImport
from distutils.errors import DistutilsError
from distutils import log
@ -83,6 +88,8 @@ def install_requirements(requirementfile='requirements.txt'):
log.info(" Installing requirement : %s" % x)
pip_install_package(x)
install_something=True
if x[0:3]=='pip':
return True
return install_something
@ -134,7 +141,9 @@ def get_package_requirement(package,requirementfile='requirements.txt'):
def pip_install_package(package,directory=None,upgrade=True):
log.info('installing %s in directory %s' % (package,str(directory)))
if directory is not None:
log.info(' installing %s in directory %s' % (package,str(directory)))
if 'http_proxy' in os.environ and 'https_proxy' not in os.environ:
os.environ['https_proxy']=os.environ['http_proxy']
@ -144,8 +153,8 @@ def pip_install_package(package,directory=None,upgrade=True):
if upgrade:
args.append('--upgrade')
if 'http_proxy' in os.environ:
args.append('--proxy=%s' % os.environ['http_proxy'])
if 'https_proxy' in os.environ:
args.append('--proxy=%s' % os.environ['https_proxy'])
if directory is not None:
args.append('--target=%s' % directory)

View File

@ -37,7 +37,7 @@ def is_python_version(path=None,minversion='3.4',maxversion=None):
stdout=subprocess.PIPE)
pythonversion=str(p.communicate()[0],'utf8').strip()
pythonversion = StrictVersion(pythonversion)
return ( pythonversion >=StrictVersion(minversion)
and ( maxversion is None
or pythonversion < StrictVersion(maxversion))
@ -91,9 +91,9 @@ def is_a_virtualenv_python(path=None):
'''
if path is None:
rep = sys.base_exec_prefix == sys.exec_prefix
rep = sys.base_exec_prefix != sys.exec_prefix
else:
command = """'%s' -c 'import sys; print(sys.base_exec_prefix == sys.exec_prefix)'""" % path
command = """'%s' -c 'import sys; print(sys.base_exec_prefix != sys.exec_prefix)'""" % path
p = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE)

View File

@ -9,10 +9,10 @@ import sys
import venv
from distutils.errors import DistutilsError
from obidistutils.serenity.globals import local_virtualenv # @UnusedImport
from obidistutils.serenity.checkpython import which_virtualenv,\
is_python_version, \
is_a_virtualenv_python
from .globals import local_virtualenv # @UnusedImport
from .checkpython import which_virtualenv,\
is_python_version, \
is_a_virtualenv_python
@ -39,6 +39,7 @@ def serenity_virtualenv(envname,package,version,minversion='3.4',maxversion=None
maxversion=maxversion) and
is_a_virtualenv_python(python))
#
# The virtualenv already exist but it is not ok
#
@ -58,7 +59,7 @@ def serenity_virtualenv(envname,package,version,minversion='3.4',maxversion=None
clear=True,
symlinks=False,
with_pip=True)
# check the newly created virtualenv
return serenity_virtualenv(envname,package,version)

2
doc/.gitignore vendored
View File

@ -1,3 +1,5 @@
/build/
/doxygen/
/build_dir.txt
/.DS_Store
/.gitignore

View File

@ -57,7 +57,7 @@ html:
@echo "Generating Doxygen documentation..."
doxygen Doxyfile
@echo "Doxygen documentation generated. \n"
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
$(SPHINXBUILD) -b html -c ./ $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

View File

@ -1,16 +0,0 @@
__version__ = '4.0.0'
def setup(app):
# We can't do the import at the module scope as setup.py has to be able to
# import this file to read __version__ without hitting any syntax errors
# from both Python 2 & Python 3.
# By the time this function is called, the directives code will have been
# converted with 2to3 if appropriate
from . import directives
directives.setup(app)

Binary file not shown.

Binary file not shown.

View File

@ -1,88 +0,0 @@
from ..renderer.rst.doxygen.base import RenderContext
from ..renderer.rst.doxygen import format_parser_error
from ..parser import ParserError, FileIOError
from docutils import nodes
from docutils.parsers import rst
class WarningHandler(object):
def __init__(self, state, context):
self.state = state
self.context = context
def warn(self, raw_text, rendered_nodes=None):
raw_text = self.format(raw_text)
if rendered_nodes is None:
rendered_nodes = [nodes.paragraph("", "", nodes.Text(raw_text))]
return [
nodes.warning("", *rendered_nodes),
self.state.document.reporter.warning(raw_text, line=self.context['lineno'])
]
def format(self, text):
return text.format(**self.context)
def create_warning(project_info, state, lineno, **kwargs):
tail = ''
if project_info:
tail = 'in doxygen xml output for project "{project}" from directory: {path}'.format(
project=project_info.name(),
path=project_info.project_path()
)
context = dict(
lineno=lineno,
tail=tail,
**kwargs
)
return WarningHandler(state, context)
class BaseDirective(rst.Directive):
def __init__(self, root_data_object, renderer_factory_creator_constructor, finder_factory,
project_info_factory, filter_factory, target_handler_factory, parser_factory, *args):
rst.Directive.__init__(self, *args)
self.directive_args = list(args) # Convert tuple to list to allow modification.
self.root_data_object = root_data_object
self.renderer_factory_creator_constructor = renderer_factory_creator_constructor
self.finder_factory = finder_factory
self.project_info_factory = project_info_factory
self.filter_factory = filter_factory
self.target_handler_factory = target_handler_factory
self.parser_factory = parser_factory
def render(self, node_stack, project_info, options, filter_, target_handler, mask_factory):
"Standard render process used by subclasses"
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
options,
target_handler
)
try:
renderer_factory = renderer_factory_creator.create_factory(
node_stack,
self.state,
self.state.document,
filter_,
target_handler,
)
except ParserError as e:
return format_parser_error("doxygenclass", e.error, e.filename, self.state,
self.lineno, True)
except FileIOError as e:
return format_parser_error("doxygenclass", e.error, e.filename, self.state, self.lineno)
context = RenderContext(node_stack, mask_factory, self.directive_args)
object_renderer = renderer_factory.create_renderer(context)
return object_renderer.render()

Binary file not shown.

View File

@ -1,123 +0,0 @@
from ..renderer.rst.doxygen.base import RenderContext
from ..renderer.rst.doxygen.mask import NullMaskFactory
from ..directive.base import BaseDirective
from ..project import ProjectError
from .base import create_warning
from docutils.parsers.rst.directives import unchanged_required, flag
from docutils.parsers import rst
class BaseFileDirective(BaseDirective):
"""Base class handle the main work when given the appropriate file and project info to work
from.
"""
# We use inheritance here rather than a separate object and composition, because so much
# information is present in the Directive class from the docutils framework that we'd have to
# pass way too much stuff to a helper object to be reasonable.
def handle_contents(self, file_, project_info):
finder = self.finder_factory.create_finder(project_info)
finder_filter = self.filter_factory.create_file_finder_filter(file_)
matches = []
finder.filter_(finder_filter, matches)
if len(matches) > 1:
warning = create_warning(None, self.state, self.lineno, file=file_,
directivename=self.directive_name)
return warning.warn('{directivename}: Found multiple matches for file "{file} {tail}')
elif not matches:
warning = create_warning(None, self.state, self.lineno, file=file_,
directivename=self.directive_name)
return warning.warn('{directivename}: Cannot find file "{file} {tail}')
target_handler = self.target_handler_factory.create_target_handler(
self.options, project_info, self.state.document)
filter_ = self.filter_factory.create_file_filter(file_, self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
target_handler
)
node_list = []
for node_stack in matches:
renderer_factory = renderer_factory_creator.create_factory(
node_stack,
self.state,
self.state.document,
filter_,
target_handler,
)
mask_factory = NullMaskFactory()
context = RenderContext(node_stack, mask_factory, self.directive_args)
object_renderer = renderer_factory.create_renderer(context)
node_list.extend(object_renderer.render())
return node_list
class DoxygenFileDirective(BaseFileDirective):
directive_name = 'doxygenfile'
required_arguments = 0
optional_arguments = 2
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
"""Get the file from the argument and the project info from the factory."""
file_ = self.arguments[0]
try:
project_info = self.project_info_factory.create_project_info(self.options)
except ProjectError as e:
warning = create_warning(None, self.state, self.lineno)
return warning.warn('doxygenfile: %s' % e)
return self.handle_contents(file_, project_info)
class AutoDoxygenFileDirective(BaseFileDirective):
directive_name = 'autodoxygenfile'
required_arguments = 1
option_spec = {
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
"""Get the file from the argument and extract the associated project info for the named
project given that it is an auto-project.
"""
file_ = self.arguments[0]
try:
project_info = self.project_info_factory.retrieve_project_info_for_auto(self.options)
except ProjectError as e:
warning = create_warning(None, self.state, self.lineno)
return warning.warn('autodoxygenfile: %s' % e)
return self.handle_contents(file_, project_info)

Binary file not shown.

View File

@ -1,115 +0,0 @@
from ..renderer.rst.doxygen.base import RenderContext
from ..renderer.rst.doxygen.mask import NullMaskFactory
from ..renderer.rst.doxygen import format_parser_error
from ..directive.base import BaseDirective
from ..project import ProjectError
from ..parser import ParserError, FileIOError
from .base import create_warning
from docutils.parsers import rst
from docutils.parsers.rst.directives import unchanged_required, flag
class BaseIndexDirective(BaseDirective):
"""Base class handle the main work when given the appropriate project info to work from.
"""
# We use inheritance here rather than a separate object and composition, because so much
# information is present in the Directive class from the docutils framework that we'd have to
# pass way too much stuff to a helper object to be reasonable.
def handle_contents(self, project_info):
try:
finder = self.finder_factory.create_finder(project_info)
except ParserError as e:
return format_parser_error(self.name, e.error, e.filename, self.state,
self.lineno, True)
except FileIOError as e:
return format_parser_error(self.name, e.error, e.filename, self.state, self.lineno)
data_object = finder.root()
target_handler = self.target_handler_factory.create_target_handler(
self.options, project_info, self.state.document)
filter_ = self.filter_factory.create_index_filter(self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
target_handler
)
renderer_factory = renderer_factory_creator.create_factory(
[data_object],
self.state,
self.state.document,
filter_,
target_handler,
)
mask_factory = NullMaskFactory()
context = RenderContext([data_object, self.root_data_object], mask_factory, self.directive_args)
object_renderer = renderer_factory.create_renderer(context)
try:
node_list = object_renderer.render()
except ParserError as e:
return format_parser_error(self.name, e.error, e.filename, self.state,
self.lineno, True)
except FileIOError as e:
return format_parser_error(self.name, e.error, e.filename, self.state, self.lineno)
return node_list
class DoxygenIndexDirective(BaseIndexDirective):
required_arguments = 0
optional_arguments = 2
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
"""Extract the project info and pass it to the helper method"""
try:
project_info = self.project_info_factory.create_project_info(self.options)
except ProjectError as e:
warning = create_warning(None, self.state, self.lineno)
return warning.warn('doxygenindex: %s' % e)
return self.handle_contents(project_info)
class AutoDoxygenIndexDirective(BaseIndexDirective):
required_arguments = 0
final_argument_whitespace = True
option_spec = {
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
"""Extract the project info from the auto project info store and pass it to the helper
method
"""
try:
project_info = self.project_info_factory.retrieve_project_info_for_auto(self.options)
except ProjectError as e:
warning = create_warning(None, self.state, self.lineno)
return warning.warn('autodoxygenindex: %s' % e)
return self.handle_contents(project_info)

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -1,3 +0,0 @@
class BreatheError(Exception):
pass

Binary file not shown.

Binary file not shown.

View File

@ -1,45 +0,0 @@
class FakeParentNode(object):
node_type = "fakeparent"
class Finder(object):
def __init__(self, root, item_finder_factory):
self._root = root
self.item_finder_factory = item_finder_factory
def filter_(self, filter_, matches):
"""Adds all nodes which match the filter into the matches list"""
item_finder = self.item_finder_factory.create_finder(self._root)
item_finder.filter_([FakeParentNode()], filter_, matches)
def root(self):
return self._root
class FinderFactory(object):
def __init__(self, parser, item_finder_factory_creator):
self.parser = parser
self.item_finder_factory_creator = item_finder_factory_creator
def create_finder(self, project_info):
root = self.parser.parse(project_info)
item_finder_factory = self.item_finder_factory_creator.create_factory(project_info)
return Finder(root, item_finder_factory)
def create_finder_from_root(self, root, project_info):
item_finder_factory = self.item_finder_factory_creator.create_factory(project_info)
return Finder(root, item_finder_factory)

Binary file not shown.

View File

@ -1 +0,0 @@

View File

@ -1,18 +0,0 @@
class ItemFinder(object):
def __init__(self, project_info, data_object, item_finder_factory):
self.data_object = data_object
self.item_finder_factory = item_finder_factory
self.project_info = project_info
def stack(element, list_):
"""Stack an element on to the start of a list and return as a new list"""
# Copy list first so we have a new list to insert into
output = list_[:]
output.insert(0, element)
return output

Binary file not shown.

View File

@ -1,75 +0,0 @@
from .base import ItemFinder, stack
class DoxygenTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_, matches):
"""Find nodes which match the filter. Doesn't test this node, only its children"""
node_stack = stack(self.data_object, ancestors)
compound_finder = self.item_finder_factory.create_finder(self.data_object.compounddef)
compound_finder.filter_(node_stack, filter_, matches)
class CompoundDefTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_, matches):
"""Finds nodes which match the filter and continues checks to children"""
node_stack = stack(self.data_object, ancestors)
if filter_.allow(node_stack):
matches.append(node_stack)
for sectiondef in self.data_object.sectiondef:
finder = self.item_finder_factory.create_finder(sectiondef)
finder.filter_(node_stack, filter_, matches)
for innerclass in self.data_object.innerclass:
finder = self.item_finder_factory.create_finder(innerclass)
finder.filter_(node_stack, filter_, matches)
class SectionDefTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_, matches):
"""Find nodes which match the filter. Doesn't test this node, only its children"""
node_stack = stack(self.data_object, ancestors)
if filter_.allow(node_stack):
matches.append(node_stack)
for memberdef in self.data_object.memberdef:
finder = self.item_finder_factory.create_finder(memberdef)
finder.filter_(node_stack, filter_, matches)
class MemberDefTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_, matches):
data_object = self.data_object
node_stack = stack(data_object, ancestors)
if filter_.allow(node_stack):
matches.append(node_stack)
if data_object.kind == 'enum':
for value in data_object.enumvalue:
value_stack = stack(value, node_stack)
if filter_.allow(value_stack):
matches.append(value_stack)
class RefTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_, matches):
node_stack = stack(self.data_object, ancestors)
if filter_.allow(node_stack):
matches.append(node_stack)

View File

@ -1,55 +0,0 @@
from . import index as indexfinder
from . import compound as compoundfinder
class CreateCompoundTypeSubFinder(object):
def __init__(self, parser_factory, matcher_factory):
self.parser_factory = parser_factory
self.matcher_factory = matcher_factory
def __call__(self, project_info, *args):
compound_parser = self.parser_factory.create_compound_parser(project_info)
return indexfinder.CompoundTypeSubItemFinder(self.matcher_factory, compound_parser,
project_info, *args)
class DoxygenItemFinderFactory(object):
def __init__(self, finders, project_info):
self.finders = finders
self.project_info = project_info
def create_finder(self, data_object):
return self.finders[data_object.node_type](self.project_info, data_object, self)
class DoxygenItemFinderFactoryCreator(object):
def __init__(self, parser_factory, filter_factory):
self.parser_factory = parser_factory
self.filter_factory = filter_factory
def create_factory(self, project_info):
finders = {
"doxygen": indexfinder.DoxygenTypeSubItemFinder,
"compound": CreateCompoundTypeSubFinder(self.parser_factory, self.filter_factory),
"member": indexfinder.MemberTypeSubItemFinder,
"doxygendef": compoundfinder.DoxygenTypeSubItemFinder,
"compounddef": compoundfinder.CompoundDefTypeSubItemFinder,
"sectiondef": compoundfinder.SectionDefTypeSubItemFinder,
"memberdef": compoundfinder.MemberDefTypeSubItemFinder,
"ref": compoundfinder.RefTypeSubItemFinder,
}
return DoxygenItemFinderFactory(finders, project_info)

Binary file not shown.

View File

@ -1,79 +0,0 @@
from .base import ItemFinder, stack
class DoxygenTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_, matches):
"""Find nodes which match the filter. Doesn't test this node, only its children"""
compounds = self.data_object.get_compound()
node_stack = stack(self.data_object, ancestors)
for compound in compounds:
compound_finder = self.item_finder_factory.create_finder(compound)
compound_finder.filter_(node_stack, filter_, matches)
class CompoundTypeSubItemFinder(ItemFinder):
def __init__(self, filter_factory, compound_parser, *args):
ItemFinder.__init__(self, *args)
self.filter_factory = filter_factory
self.compound_parser = compound_parser
def filter_(self, ancestors, filter_, matches):
"""Finds nodes which match the filter and continues checks to children
Requires parsing the xml files referenced by the children for which we use the compound
parser and continue at the top level of that pretending that this node is the parent of the
top level node of the compound file.
"""
node_stack = stack(self.data_object, ancestors)
# Match against compound object
if filter_.allow(node_stack):
matches.append(node_stack)
# Descend to member children
members = self.data_object.get_member()
member_matches = []
for member in members:
member_finder = self.item_finder_factory.create_finder(member)
member_finder.filter_(node_stack, filter_, member_matches)
results = []
# If there are members in this compound that match the criteria
# then load up the file for this compound and get the member data objects
if member_matches:
file_data = self.compound_parser.parse(self.data_object.refid)
finder = self.item_finder_factory.create_finder(file_data)
for member_stack in member_matches:
ref_filter = self.filter_factory.create_id_filter('memberdef', member_stack[0].refid)
finder.filter_(node_stack, ref_filter, matches)
else:
# Read in the xml file referenced by the compound and descend into that as well
file_data = self.compound_parser.parse(self.data_object.refid)
finder = self.item_finder_factory.create_finder(file_data)
finder.filter_(node_stack, filter_, matches)
class MemberTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_, matches):
node_stack = stack(self.data_object, ancestors)
# Match against member object
if filter_.allow(node_stack):
matches.append(node_stack)

Binary file not shown.

View File

@ -1,118 +0,0 @@
import breathe.parser.doxygen.index
import breathe.parser.doxygen.compound
class ParserError(Exception):
def __init__(self, error, filename):
Exception.__init__(self, error)
self.error = error
self.filename = filename
class FileIOError(Exception):
def __init__(self, error, filename):
Exception.__init__(self, error)
self.error = error
self.filename = filename
class Parser(object):
def __init__(self, cache, path_handler, file_state_cache):
self.cache = cache
self.path_handler = path_handler
self.file_state_cache = file_state_cache
class DoxygenIndexParser(Parser):
def __init__(self, cache, path_handler, file_state_cache):
Parser.__init__(self, cache, path_handler, file_state_cache)
def parse(self, project_info):
filename = self.path_handler.resolve_path(
project_info.project_path(),
"index.xml"
)
self.file_state_cache.update(filename)
try:
# Try to get from our cache
return self.cache[filename]
except KeyError:
# If that fails, parse it afresh
try:
result = breathe.parser.doxygen.index.parse(filename)
self.cache[filename] = result
return result
except breathe.parser.doxygen.index.ParseError as e:
raise ParserError(e, filename)
except breathe.parser.doxygen.index.FileIOError as e:
raise FileIOError(e, filename)
class DoxygenCompoundParser(Parser):
def __init__(self, cache, path_handler, file_state_cache, project_info):
Parser.__init__(self, cache, path_handler, file_state_cache)
self.project_info = project_info
def parse(self, refid):
filename = self.path_handler.resolve_path(
self.project_info.project_path(),
"%s.xml" % refid
)
self.file_state_cache.update(filename)
try:
# Try to get from our cache
return self.cache[filename]
except KeyError:
# If that fails, parse it afresh
try:
result = breathe.parser.doxygen.compound.parse(filename)
self.cache[filename] = result
return result
except breathe.parser.doxygen.compound.ParseError as e:
raise ParserError(e, filename)
except breathe.parser.doxygen.compound.FileIOError as e:
raise FileIOError(e, filename)
class CacheFactory(object):
def create_cache(self):
# Return basic dictionary as cache
return {}
class DoxygenParserFactory(object):
def __init__(self, cache, path_handler, file_state_cache):
self.cache = cache
self.path_handler = path_handler
self.file_state_cache = file_state_cache
def create_index_parser(self):
return DoxygenIndexParser(self.cache, self.path_handler, self.file_state_cache)
def create_compound_parser(self, project_info):
return DoxygenCompoundParser(
self.cache,
self.path_handler,
self.file_state_cache,
project_info
)

Binary file not shown.

View File

@ -1,964 +0,0 @@
#!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from xml.dom import minidom
from xml.dom import Node
from xml.parsers.expat import ExpatError
from . import compoundsuper as supermod
from .compoundsuper import MixedContainer
class DoxygenTypeSub(supermod.DoxygenType):
node_type = "doxygendef"
def __init__(self, version=None, compounddef=None):
supermod.DoxygenType.__init__(self, version, compounddef)
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class compounddefTypeSub(supermod.compounddefType):
node_type = "compounddef"
def __init__(self, kind=None, prot=None, id=None, compoundname='', title='',
basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None,
incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None,
innerclass=None, innernamespace=None, innerpage=None, innergroup=None,
templateparamlist=None, sectiondef=None, briefdescription=None,
detaileddescription=None, inheritancegraph=None, collaborationgraph=None,
programlisting=None, location=None, listofallmembers=None):
supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title,
basecompoundref, derivedcompoundref, includes, includedby,
incdepgraph, invincdepgraph, innerdir, innerfile,
innerclass, innernamespace, innerpage, innergroup,
templateparamlist, sectiondef, briefdescription,
detaileddescription, inheritancegraph, collaborationgraph,
programlisting, location, listofallmembers)
supermod.compounddefType.subclass = compounddefTypeSub
# end class compounddefTypeSub
class listofallmembersTypeSub(supermod.listofallmembersType):
node_type = "listofallmembers"
def __init__(self, member=None):
supermod.listofallmembersType.__init__(self, member)
supermod.listofallmembersType.subclass = listofallmembersTypeSub
# end class listofallmembersTypeSub
class memberRefTypeSub(supermod.memberRefType):
node_type = "memberref"
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
supermod.memberRefType.subclass = memberRefTypeSub
# end class memberRefTypeSub
class compoundRefTypeSub(supermod.compoundRefType):
node_type = "compoundref"
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None,
content_=None):
supermod.compoundRefType.__init__(self, mixedclass_, content_)
supermod.compoundRefType.subclass = compoundRefTypeSub
# end class compoundRefTypeSub
class reimplementTypeSub(supermod.reimplementType):
node_type = "reimplement"
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.reimplementType.__init__(self, mixedclass_, content_)
supermod.reimplementType.subclass = reimplementTypeSub
# end class reimplementTypeSub
class incTypeSub(supermod.incType):
node_type = "inc"
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.incType.__init__(self, mixedclass_, content_)
supermod.incType.subclass = incTypeSub
# end class incTypeSub
class refTypeSub(supermod.refType):
node_type = "ref"
def __init__(self, node_name, prot=None, refid=None, valueOf_='', mixedclass_=None,
content_=None):
supermod.refType.__init__(self, mixedclass_, content_)
self.node_name = node_name
supermod.refType.subclass = refTypeSub
class refTextTypeSub(supermod.refTextType):
node_type = "reftex"
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None,
content_=None):
supermod.refTextType.__init__(self, mixedclass_, content_)
supermod.refTextType.subclass = refTextTypeSub
# end class refTextTypeSub
class sectiondefTypeSub(supermod.sectiondefType):
node_type = "sectiondef"
def __init__(self, kind=None, header='', description=None, memberdef=None):
supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
supermod.sectiondefType.subclass = sectiondefTypeSub
# end class sectiondefTypeSub
class memberdefTypeSub(supermod.memberdefType):
node_type = "memberdef"
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None,
readable=None, prot=None, explicit=None, new=None, final=None, writable=None,
add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None,
inline=None, settable=None, id=None, templateparamlist=None, type_=None,
definition='', argsstring='', name='', read='', write='', bitfield='',
reimplements=None, reimplementedby=None, param=None, enumvalue=None,
initializer=None, exceptions=None, briefdescription=None, detaileddescription=None,
inbodydescription=None, location=None, references=None, referencedby=None):
supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt,
readable, prot, explicit, new, final, writable, add, static,
remove, sealed, mutable, gettable, inline, settable, id,
templateparamlist, type_, definition, argsstring, name,
read, write, bitfield, reimplements, reimplementedby, param,
enumvalue, initializer, exceptions, briefdescription,
detaileddescription, inbodydescription, location,
references, referencedby)
self.parameterlist = supermod.docParamListType.factory()
self.parameterlist.kind = "param"
def buildChildren(self, child_, nodeName_):
supermod.memberdefType.buildChildren(self, child_, nodeName_)
if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'param':
# Get latest param
param = self.param[-1]
# If it doesn't have a description we're done
if not param.briefdescription:
return
# Construct our own param list from the descriptions stored inline
# with the parameters
paramdescription = param.briefdescription
paramname = supermod.docParamName.factory()
# Add parameter name
obj_ = paramname.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '',
param.declname)
paramname.content_.append(obj_)
paramnamelist = supermod.docParamNameList.factory()
paramnamelist.parametername.append(paramname)
paramlistitem = supermod.docParamListItem.factory()
paramlistitem.parameternamelist.append(paramnamelist)
# Add parameter description
paramlistitem.parameterdescription = paramdescription
self.parameterlist.parameteritem.append(paramlistitem)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'detaileddescription':
if not self.parameterlist.parameteritem:
# No items in our list
return
# Assume supermod.memberdefType.buildChildren has already built the
# description object, we just want to slot our parameterlist in at
# a reasonable point
if not self.detaileddescription:
# Create one if it doesn't exist
self.detaileddescription = supermod.descriptionType.factory()
detaileddescription = self.detaileddescription
para = supermod.docParaType.factory()
para.parameterlist.append(self.parameterlist)
obj_ = detaileddescription.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', para)
index = 0
detaileddescription.content_.insert(index, obj_)
supermod.memberdefType.subclass = memberdefTypeSub
# end class memberdefTypeSub
class descriptionTypeSub(supermod.descriptionType):
node_type = "description"
def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None,
content_=None):
supermod.descriptionType.__init__(self, mixedclass_, content_)
supermod.descriptionType.subclass = descriptionTypeSub
# end class descriptionTypeSub
class enumvalueTypeSub(supermod.enumvalueType):
node_type = "enumvalue"
def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None,
detaileddescription=None, mixedclass_=None, content_=None):
supermod.enumvalueType.__init__(self, mixedclass_, content_)
self.initializer = None
def buildChildren(self, child_, nodeName_):
# Get text from <name> child and put it in self.name
if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'name':
value_ = []
for text_ in child_.childNodes:
value_.append(text_.nodeValue)
valuestr_ = ''.join(value_)
self.name = valuestr_
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'briefdescription':
obj_ = supermod.descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'detaileddescription':
obj_ = supermod.descriptionType.factory()
obj_.build(child_)
self.set_detaileddescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'initializer':
childobj_ = supermod.linkedTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone,
'initializer', childobj_)
self.set_initializer(obj_)
self.content_.append(obj_)
supermod.enumvalueType.subclass = enumvalueTypeSub
# end class enumvalueTypeSub
class templateparamlistTypeSub(supermod.templateparamlistType):
node_type = "templateparamlist"
def __init__(self, param=None):
supermod.templateparamlistType.__init__(self, param)
supermod.templateparamlistType.subclass = templateparamlistTypeSub
# end class templateparamlistTypeSub
class paramTypeSub(supermod.paramType):
node_type = "param"
def __init__(self, type_=None, declname='', defname='', array='', defval=None,
briefdescription=None):
supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
class linkedTextTypeSub(supermod.linkedTextType):
node_type = "linkedtext"
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_)
supermod.linkedTextType.subclass = linkedTextTypeSub
# end class linkedTextTypeSub
class graphTypeSub(supermod.graphType):
node_type = "graph"
def __init__(self, node=None):
supermod.graphType.__init__(self, node)
supermod.graphType.subclass = graphTypeSub
# end class graphTypeSub
class nodeTypeSub(supermod.nodeType):
node_type = "node"
def __init__(self, id=None, label='', link=None, childnode=None):
supermod.nodeType.__init__(self, id, label, link, childnode)
supermod.nodeType.subclass = nodeTypeSub
# end class nodeTypeSub
class childnodeTypeSub(supermod.childnodeType):
node_type = "childnode"
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel)
supermod.childnodeType.subclass = childnodeTypeSub
# end class childnodeTypeSub
class linkTypeSub(supermod.linkType):
node_type = "link"
def __init__(self, refid=None, external=None, valueOf_=''):
supermod.linkType.__init__(self, refid, external)
supermod.linkType.subclass = linkTypeSub
# end class linkTypeSub
class listingTypeSub(supermod.listingType):
node_type = "listing"
def __init__(self, codeline=None):
supermod.listingType.__init__(self, codeline)
supermod.listingType.subclass = listingTypeSub
# end class listingTypeSub
class codelineTypeSub(supermod.codelineType):
node_type = "codeline"
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
supermod.codelineType.subclass = codelineTypeSub
# end class codelineTypeSub
class highlightTypeSub(supermod.highlightType):
node_type = "highlight"
def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
supermod.highlightType.__init__(self, mixedclass_, content_)
supermod.highlightType.subclass = highlightTypeSub
# end class highlightTypeSub
class referenceTypeSub(supermod.referenceType):
node_type = "reference"
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='',
mixedclass_=None, content_=None):
supermod.referenceType.__init__(self, mixedclass_, content_)
supermod.referenceType.subclass = referenceTypeSub
# end class referenceTypeSub
class locationTypeSub(supermod.locationType):
node_type = "location"
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None,
valueOf_=''):
supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
supermod.locationType.subclass = locationTypeSub
# end class locationTypeSub
class docSect1TypeSub(supermod.docSect1Type):
node_type = "docsect1"
def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None,
content_=None):
supermod.docSect1Type.__init__(self, mixedclass_, content_)
supermod.docSect1Type.subclass = docSect1TypeSub
# end class docSect1TypeSub
class docSect2TypeSub(supermod.docSect2Type):
node_type = "docsect2"
def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None,
content_=None):
supermod.docSect2Type.__init__(self, mixedclass_, content_)
supermod.docSect2Type.subclass = docSect2TypeSub
# end class docSect2TypeSub
class docSect3TypeSub(supermod.docSect3Type):
node_type = "docsect3"
def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None,
content_=None):
supermod.docSect3Type.__init__(self, mixedclass_, content_)
supermod.docSect3Type.subclass = docSect3TypeSub
# end class docSect3TypeSub
class docSect4TypeSub(supermod.docSect4Type):
node_type = "docsect4"
def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None,
content_=None):
supermod.docSect4Type.__init__(self, mixedclass_, content_)
supermod.docSect4Type.subclass = docSect4TypeSub
# end class docSect4TypeSub
class docInternalTypeSub(supermod.docInternalType):
node_type = "docinternal"
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_)
supermod.docInternalType.subclass = docInternalTypeSub
# end class docInternalTypeSub
class docInternalS1TypeSub(supermod.docInternalS1Type):
node_type = "docinternals1"
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
supermod.docInternalS1Type.subclass = docInternalS1TypeSub
# end class docInternalS1TypeSub
class docInternalS2TypeSub(supermod.docInternalS2Type):
node_type = "docinternals2"
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
supermod.docInternalS2Type.subclass = docInternalS2TypeSub
# end class docInternalS2TypeSub
class docInternalS3TypeSub(supermod.docInternalS3Type):
node_type = "docinternals3"
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
supermod.docInternalS3Type.subclass = docInternalS3TypeSub
# end class docInternalS3TypeSub
class docInternalS4TypeSub(supermod.docInternalS4Type):
node_type = "docinternals4"
def __init__(self, para=None, mixedclass_=None, content_=None):
supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
supermod.docInternalS4Type.subclass = docInternalS4TypeSub
# end class docInternalS4TypeSub
class docURLLinkSub(supermod.docURLLink):
node_type = "docurllink"
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docURLLink.__init__(self, mixedclass_, content_)
supermod.docURLLink.subclass = docURLLinkSub
# end class docURLLinkSub
class docAnchorTypeSub(supermod.docAnchorType):
node_type = "docanchor"
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docAnchorType.__init__(self, mixedclass_, content_)
supermod.docAnchorType.subclass = docAnchorTypeSub
# end class docAnchorTypeSub
class docFormulaTypeSub(supermod.docFormulaType):
node_type = "docformula"
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docFormulaType.__init__(self, mixedclass_, content_)
supermod.docFormulaType.subclass = docFormulaTypeSub
# end class docFormulaTypeSub
class docIndexEntryTypeSub(supermod.docIndexEntryType):
node_type = "docindexentry"
def __init__(self, primaryie='', secondaryie=''):
supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
# end class docIndexEntryTypeSub
class docListTypeSub(supermod.docListType):
node_type = "doclist"
def __init__(self, listitem=None, subtype=""):
self.node_subtype = "itemized"
if subtype is not "":
self.node_subtype = subtype
supermod.docListType.__init__(self, listitem)
supermod.docListType.subclass = docListTypeSub
# end class docListTypeSub
class docListItemTypeSub(supermod.docListItemType):
node_type = "doclistitem"
def __init__(self, para=None):
supermod.docListItemType.__init__(self, para)
supermod.docListItemType.subclass = docListItemTypeSub
# end class docListItemTypeSub
class docSimpleSectTypeSub(supermod.docSimpleSectType):
node_type = "docsimplesect"
def __init__(self, kind=None, title=None, para=None):
supermod.docSimpleSectType.__init__(self, kind, title, para)
supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
# end class docSimpleSectTypeSub
class docVarListEntryTypeSub(supermod.docVarListEntryType):
node_type = "docvarlistentry"
def __init__(self, term=None):
supermod.docVarListEntryType.__init__(self, term)
supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
# end class docVarListEntryTypeSub
class docRefTextTypeSub(supermod.docRefTextType):
node_type = "docreftext"
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None,
content_=None):
supermod.docRefTextType.__init__(self, mixedclass_, content_)
self.para = []
def buildChildren(self, child_, nodeName_):
supermod.docRefTextType.buildChildren(self, child_, nodeName_)
if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'para':
obj_ = supermod.docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
supermod.docRefTextType.subclass = docRefTextTypeSub
# end class docRefTextTypeSub
class docTableTypeSub(supermod.docTableType):
node_type = "doctable"
def __init__(self, rows=None, cols=None, row=None, caption=None):
supermod.docTableType.__init__(self, rows, cols, row, caption)
supermod.docTableType.subclass = docTableTypeSub
# end class docTableTypeSub
class docRowTypeSub(supermod.docRowType):
node_type = "docrow"
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry)
supermod.docRowType.subclass = docRowTypeSub
# end class docRowTypeSub
class docEntryTypeSub(supermod.docEntryType):
node_type = "docentry"
def __init__(self, thead=None, para=None):
supermod.docEntryType.__init__(self, thead, para)
supermod.docEntryType.subclass = docEntryTypeSub
# end class docEntryTypeSub
class docHeadingTypeSub(supermod.docHeadingType):
node_type = "docheading"
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docHeadingType.__init__(self, mixedclass_, content_)
def buildChildren(self, child_, nodeName_):
supermod.docHeadingType.buildChildren(self, child_, nodeName_)
# Account for styled content in the heading. This might need to be expanded to include other
# nodes as it seems from the xsd that headings can have a lot of different children but we
# really don't expect most of them to come up.
if child_.nodeType == Node.ELEMENT_NODE and (
nodeName_ == 'bold' or
nodeName_ == 'emphasis' or
nodeName_ == 'computeroutput' or
nodeName_ == 'subscript' or
nodeName_ == 'superscript' or
nodeName_ == 'center' or
nodeName_ == 'small'):
obj_ = supermod.docMarkupType.factory()
obj_.build(child_)
obj_.type_ = nodeName_
self.content_.append(obj_)
supermod.docHeadingType.subclass = docHeadingTypeSub
# end class docHeadingTypeSub
class docImageTypeSub(supermod.docImageType):
node_type = "docimage"
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='',
mixedclass_=None, content_=None):
supermod.docImageType.__init__(self, mixedclass_, content_)
supermod.docImageType.subclass = docImageTypeSub
# end class docImageTypeSub
class docDotFileTypeSub(supermod.docDotFileType):
node_type = "docdocfile"
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docDotFileType.__init__(self, mixedclass_, content_)
supermod.docDotFileType.subclass = docDotFileTypeSub
# end class docDotFileTypeSub
class docTocItemTypeSub(supermod.docTocItemType):
node_type = "doctocitem"
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docTocItemType.__init__(self, mixedclass_, content_)
supermod.docTocItemType.subclass = docTocItemTypeSub
# end class docTocItemTypeSub
class docTocListTypeSub(supermod.docTocListType):
node_type = "doctoclist"
def __init__(self, tocitem=None):
supermod.docTocListType.__init__(self, tocitem)
supermod.docTocListType.subclass = docTocListTypeSub
# end class docTocListTypeSub
class docLanguageTypeSub(supermod.docLanguageType):
node_type = "doclanguage"
def __init__(self, langid=None, para=None):
supermod.docLanguageType.__init__(self, langid, para)
supermod.docLanguageType.subclass = docLanguageTypeSub
# end class docLanguageTypeSub
class docParamListTypeSub(supermod.docParamListType):
node_type = "docparamlist"
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem)
supermod.docParamListType.subclass = docParamListTypeSub
# end class docParamListTypeSub
class docParamListItemSub(supermod.docParamListItem):
node_type = "docparamlistitem"
def __init__(self, parameternamelist=None, parameterdescription=None):
supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
supermod.docParamListItem.subclass = docParamListItemSub
# end class docParamListItemSub
class docParamNameListSub(supermod.docParamNameList):
node_type = "docparamnamelist"
def __init__(self, parametername=None):
supermod.docParamNameList.__init__(self, parametername)
supermod.docParamNameList.subclass = docParamNameListSub
# end class docParamNameListSub
class docParamNameSub(supermod.docParamName):
node_type = "docparamname"
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
supermod.docParamName.__init__(self, mixedclass_, content_)
supermod.docParamName.subclass = docParamNameSub
# end class docParamNameSub
class docXRefSectTypeSub(supermod.docXRefSectType):
node_type = "docxrefsect"
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
supermod.docXRefSectType.subclass = docXRefSectTypeSub
# end class docXRefSectTypeSub
class docCopyTypeSub(supermod.docCopyType):
node_type = "doccopy"
def __init__(self, link=None, para=None, sect1=None, internal=None):
supermod.docCopyType.__init__(self, link, para, sect1, internal)
supermod.docCopyType.subclass = docCopyTypeSub
# end class docCopyTypeSub
class docCharTypeSub(supermod.docCharType):
node_type = "docchar"
def __init__(self, char=None, valueOf_=''):
supermod.docCharType.__init__(self, char)
supermod.docCharType.subclass = docCharTypeSub
# end class docCharTypeSub
class verbatimTypeSub(object):
"""
New node type. Structure is largely pillaged from other nodes in order to
match the set.
"""
node_type = "verbatim"
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.text = ""
def factory(*args, **kwargs):
return verbatimTypeSub(*args, **kwargs)
factory = staticmethod(factory)
def buildAttributes(self, attrs):
pass
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.text += child_.nodeValue
class docParaTypeSub(supermod.docParaType):
node_type = "docpara"
def __init__(self, char=None, valueOf_=''):
supermod.docParaType.__init__(self, char)
self.parameterlist = []
self.simplesects = []
self.content = []
self.programlisting = []
self.images = []
def buildChildren(self, child_, nodeName_):
supermod.docParaType.buildChildren(self, child_, nodeName_)
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "ref":
obj_ = supermod.docRefTextType.factory()
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'parameterlist':
obj_ = supermod.docParamListType.factory()
obj_.build(child_)
self.parameterlist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'simplesect':
obj_ = supermod.docSimpleSectType.factory()
obj_.build(child_)
self.simplesects.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'programlisting':
obj_ = supermod.listingType.factory()
obj_.build(child_)
self.programlisting.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'image':
obj_ = supermod.docImageType.factory()
obj_.build(child_)
self.images.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and (
nodeName_ == 'bold' or
nodeName_ == 'emphasis' or
nodeName_ == 'computeroutput' or
nodeName_ == 'subscript' or
nodeName_ == 'superscript' or
nodeName_ == 'center' or
nodeName_ == 'small'):
obj_ = supermod.docMarkupType.factory()
obj_.build(child_)
obj_.type_ = nodeName_
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'verbatim':
childobj_ = verbatimTypeSub.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone,
'verbatim', childobj_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'formula':
childobj_ = docFormulaTypeSub.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone,
'formula', childobj_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "itemizedlist":
obj_ = supermod.docListType.factory(subtype="itemized")
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "orderedlist":
obj_ = supermod.docListType.factory(subtype="ordered")
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'heading':
obj_ = supermod.docHeadingType.factory()
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'ulink':
obj_ = supermod.docURLLink.factory()
obj_.build(child_)
self.content.append(obj_)
supermod.docParaType.subclass = docParaTypeSub
# end class docParaTypeSub
class docMarkupTypeSub(supermod.docMarkupType):
node_type = "docmarkup"
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
supermod.docMarkupType.__init__(self, valueOf_, mixedclass_, content_)
self.type_ = None
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '',
child_.nodeValue)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == 'ref':
childobj_ = supermod.docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref',
childobj_)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
supermod.docMarkupType.subclass = docMarkupTypeSub
# end class docMarkupTypeSub
class docTitleTypeSub(supermod.docTitleType):
node_type = "doctitle"
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
supermod.docTitleType.__init__(self, valueOf_, mixedclass_, content_)
self.type_ = None
supermod.docTitleType.subclass = docTitleTypeSub
# end class docTitleTypeSub
class ParseError(Exception):
pass
class FileIOError(Exception):
pass
def parse(inFilename):
try:
doc = minidom.parse(inFilename)
except IOError as e:
raise FileIOError(e)
except ExpatError as e:
raise ParseError(e)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj

File diff suppressed because it is too large Load Diff

View File

@ -1,63 +0,0 @@
#!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from xml.dom import minidom
from xml.parsers.expat import ExpatError
from . import indexsuper as supermod
class DoxygenTypeSub(supermod.DoxygenType):
node_type = "doxygen"
def __init__(self, version=None, compound=None):
supermod.DoxygenType.__init__(self, version, compound)
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class CompoundTypeSub(supermod.CompoundType):
node_type = "compound"
def __init__(self, kind=None, refid=None, name='', member=None):
supermod.CompoundType.__init__(self, kind, refid, name, member)
supermod.CompoundType.subclass = CompoundTypeSub
# end class CompoundTypeSub
class MemberTypeSub(supermod.MemberType):
node_type = "member"
def __init__(self, kind=None, refid=None, name=''):
supermod.MemberType.__init__(self, kind, refid, name)
supermod.MemberType.subclass = MemberTypeSub
# end class MemberTypeSub
class ParseError(Exception):
pass
class FileIOError(Exception):
pass
def parse(inFilename):
try:
doc = minidom.parse(inFilename)
except IOError as e:
raise FileIOError(e)
except ExpatError as e:
raise ParseError(e)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj

Binary file not shown.

View File

@ -1,362 +0,0 @@
#!/usr/bin/env python
#
# Generated Thu Jun 11 18:43:54 2009 by generateDS.py.
#
import sys
import getopt
from xml.dom import minidom
from xml.dom import Node
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper:
def format_string(self, input_data, input_name=''):
return input_data
def format_integer(self, input_data, input_name=''):
return '%d' % input_data
def format_float(self, input_data, input_name=''):
return '%f' % input_data
def format_double(self, input_data, input_name=''):
return '%e' % input_data
def format_boolean(self, input_data, input_name=''):
return '%s' % input_data
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&amp;')
s1 = s1.replace('<', '&lt;')
s1 = s1.replace('>', '&gt;')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&amp;')
s1 = s1.replace('<', '&lt;')
s1 = s1.replace('>', '&gt;')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', "&quot;")
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
class _MemberSpec(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type(self): return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
#
# Data representation classes.
#
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, version=None, compound=None):
self.version = version
if compound is None:
self.compound = []
else:
self.compound = compound
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
else:
return DoxygenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_compound(self): return self.compound
def set_compound(self, compound): self.compound = compound
def add_compound(self, value): self.compound.append(value)
def insert_compound(self, index, value): self.compound[index] = value
def get_version(self): return self.version
def set_version(self, version): self.version = version
def hasContent_(self):
if (
self.compound is not None
):
return True
else:
return False
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'compound':
obj_ = CompoundType.factory()
obj_.build(child_)
self.compound.append(obj_)
# end class DoxygenType
class CompoundType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, refid=None, name=None, member=None):
self.kind = kind
self.refid = refid
self.name = name
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if CompoundType.subclass:
return CompoundType.subclass(*args_, **kwargs_)
else:
return CompoundType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'member':
obj_ = MemberType.factory()
obj_.build(child_)
self.member.append(obj_)
# end class CompoundType
class MemberType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, refid=None, name=None):
self.kind = kind
self.refid = refid
self.name = name
def factory(*args_, **kwargs_):
if MemberType.subclass:
return MemberType.subclass(*args_, **kwargs_)
else:
return MemberType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def hasContent_(self):
if (
self.name is not None
):
return True
else:
return False
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
# end class MemberType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
Options:
-s Use the SAX parser, not the minidom parser.
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
namespacedef_='')
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('from index import *\n\n')
sys.stdout.write('rootObj = doxygenindex(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex")
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')

View File

@ -1,87 +0,0 @@
AUTOCFG_TEMPLATE = r"""
PROJECT_NAME = "{project_name}"
OUTPUT_DIRECTORY = {output_dir}
GENERATE_LATEX = NO
GENERATE_MAN = NO
GENERATE_RTF = NO
CASE_SENSE_NAMES = NO
INPUT = {input}
ENABLE_PREPROCESSING = YES
QUIET = YES
JAVADOC_AUTOBRIEF = YES
JAVADOC_AUTOBRIEF = NO
GENERATE_HTML = NO
GENERATE_XML = YES
ALIASES = "rst=\verbatim embed:rst"
ALIASES += "endrst=\endverbatim"
""".strip()
class ProjectData(object):
"Simple handler for the files and project_info for each project"
def __init__(self, auto_project_info, files):
self.auto_project_info = auto_project_info
self.files = files
class AutoDoxygenProcessHandle(object):
def __init__(self, path_handler, run_process, write_file, project_info_factory):
self.path_handler = path_handler
self.run_process = run_process
self.write_file = write_file
self.project_info_factory = project_info_factory
def generate_xml(self, app):
project_files = {}
# First collect together all the files which need to be doxygen processed for each project
for project_name, file_structure in app.config.breathe_projects_source.items():
folder = file_structure[0]
contents = file_structure[1]
auto_project_info = self.project_info_factory.create_auto_project_info(
project_name, folder)
project_files[project_name] = ProjectData(auto_project_info, contents)
# Iterate over the projects and generate doxygen xml output for the files for each one into
# a directory in the Sphinx build area
for project_name, data in project_files.items():
project_path = self.process(data.auto_project_info, data.files)
project_info = data.auto_project_info.create_project_info(project_path)
self.project_info_factory.store_project_info_for_auto(project_name, project_info)
def process(self, auto_project_info, files):
name = auto_project_info.name()
cfgfile = "%s.cfg" % name
full_paths = map(lambda x: auto_project_info.abs_path_to_source_file(x), files)
cfg = AUTOCFG_TEMPLATE.format(
project_name=name,
output_dir=name,
input=" ".join(full_paths)
)
build_dir = self.path_handler.join(
auto_project_info.build_dir(),
"breathe",
"doxygen"
)
self.write_file(build_dir, cfgfile, cfg)
self.run_process(['doxygen', cfgfile], cwd=build_dir)
return self.path_handler.join(build_dir, name, "xml")

Binary file not shown.

View File

@ -1,306 +0,0 @@
from .exception import BreatheError
import os
class ProjectError(BreatheError):
pass
class NoDefaultProjectError(ProjectError):
pass
class AutoProjectInfo(object):
"""Created as a temporary step in the automatic xml generation process"""
def __init__(
self,
name,
source_path,
build_dir,
reference,
source_dir,
config_dir,
domain_by_extension,
domain_by_file_pattern,
match
):
self._name = name
self._source_path = source_path
self._build_dir = build_dir
self._reference = reference
self._source_dir = source_dir
self._config_dir = config_dir
self._domain_by_extension = domain_by_extension
self._domain_by_file_pattern = domain_by_file_pattern
self._match = match
def name(self):
return self._name
def build_dir(self):
return self._build_dir
def abs_path_to_source_file(self, file_):
"""
Returns full path to the provide file assuming that the provided path is relative to the
projects conf.py directory as specified in the breathe_projects_source config variable.
"""
# os.path.join does the appropriate handling if _source_path is an absolute path
return os.path.join(self._config_dir, self._source_path, file_)
def create_project_info(self, project_path):
"""Creates a proper ProjectInfo object based on the information in this AutoProjectInfo"""
return ProjectInfo(
self._name,
project_path,
self._source_path,
self._reference,
self._source_dir,
self._config_dir,
self._domain_by_extension,
self._domain_by_file_pattern,
self._match
)
class ProjectInfo(object):
def __init__(
self,
name,
path,
source_path,
reference,
source_dir,
config_dir,
domain_by_extension,
domain_by_file_pattern,
match
):
self._name = name
self._project_path = path
self._source_path = source_path
self._reference = reference
self._source_dir = source_dir
self._config_dir = config_dir
self._domain_by_extension = domain_by_extension
self._domain_by_file_pattern = domain_by_file_pattern
self._match = match
def name(self):
return self._name
def project_path(self):
return self._project_path
def source_path(self):
return self._source_path
def relative_path_to_xml_file(self, file_):
"""
Returns relative path from Sphinx documentation top-level source directory to the specified
file assuming that the specified file is a path relative to the doxygen xml output
directory.
"""
# os.path.join does the appropriate handling if _project_path is an absolute path
full_xml_project_path = os.path.join(self._config_dir, self._project_path, file_)
return os.path.relpath(
full_xml_project_path,
self._source_dir
)
def sphinx_abs_path_to_file(self, file_):
"""
Prepends os.path.sep to the value returned by relative_path_to_file.
This is to match Sphinx's concept of an absolute path which starts from the top-level source
directory of the project.
"""
return os.path.sep + self.relative_path_to_xml_file(file_)
def reference(self):
return self._reference
def domain_for_file(self, file_):
domain = ""
extension = file_.split(".")[-1]
try:
domain = self._domain_by_extension[extension]
except KeyError:
pass
for pattern, pattern_domain in self._domain_by_file_pattern.items():
if self._match(file_, pattern):
domain = pattern_domain
return domain
class ProjectInfoFactory(object):
def __init__(self, source_dir, build_dir, config_dir, match):
self.source_dir = source_dir
self.build_dir = build_dir
self.config_dir = config_dir
self.match = match
self.projects = {}
self.default_project = None
self.domain_by_extension = {}
self.domain_by_file_pattern = {}
self.project_count = 0
self.project_info_store = {}
self.project_info_for_auto_store = {}
self.auto_project_info_store = {}
def update(
self,
projects,
default_project,
domain_by_extension,
domain_by_file_pattern,
projects_source,
build_dir
):
self.projects = projects
self.default_project = default_project
self.domain_by_extension = domain_by_extension
self.domain_by_file_pattern = domain_by_file_pattern
self.projects_source = projects_source
# If the breathe config values has a non-empty value for build_dir then use that otherwise
# stick with the default
if build_dir:
self.build_dir = build_dir
def default_path(self):
if not self.default_project:
raise NoDefaultProjectError(
"No breathe_default_project config setting to fall back on "
"for directive with no 'project' or 'path' specified."
)
try:
return self.projects[self.default_project]
except KeyError:
raise ProjectError(
("breathe_default_project value '%s' does not seem to be a valid key for the "
"breathe_projects dictionary") % self.default_project
)
def create_project_info(self, options):
name = ""
if "project" in options:
try:
path = self.projects[options["project"]]
name = options["project"]
except KeyError:
raise ProjectError("Unable to find project '%s' in breathe_projects dictionary"
% options["project"])
elif "path" in options:
path = options["path"]
else:
path = self.default_path()
try:
return self.project_info_store[path]
except KeyError:
reference = name
if not name:
name = "project%s" % self.project_count
reference = path
self.project_count += 1
project_info = ProjectInfo(
name,
path,
"NoSourcePath",
reference,
self.source_dir,
self.config_dir,
self.domain_by_extension,
self.domain_by_file_pattern,
self.match
)
self.project_info_store[path] = project_info
return project_info
def store_project_info_for_auto(self, name, project_info):
"""Stores the project info by name for later extraction by the auto directives.
Stored separately to the non-auto project info objects as they should never overlap.
"""
self.project_info_for_auto_store[name] = project_info
def retrieve_project_info_for_auto(self, options):
"""Retrieves the project info by name for later extraction by the auto directives.
Looks for the 'project' entry in the options dictionary. This is a less than ideal API but
it is designed to match the use of 'create_project_info' above for which it makes much more
sense.
"""
name = options.get('project', self.default_project)
if name is None:
raise NoDefaultProjectError(
"No breathe_default_project config setting to fall back on "
"for directive with no 'project' or 'path' specified."
)
return self.project_info_for_auto_store[name]
def create_auto_project_info(self, name, source_path):
key = source_path
try:
return self.auto_project_info_store[key]
except KeyError:
reference = name
if not name:
name = "project%s" % self.project_count
reference = source_path
self.project_count += 1
auto_project_info = AutoProjectInfo(
name,
source_path,
self.build_dir,
reference,
self.source_dir,
self.config_dir,
self.domain_by_extension,
self.domain_by_file_pattern,
self.match
)
self.auto_project_info_store[key] = auto_project_info
return auto_project_info

Binary file not shown.

Binary file not shown.

View File

@ -1,2 +0,0 @@

View File

@ -1,383 +0,0 @@
from .base import Renderer, RenderContext
from . import index as indexrenderer
from . import compound as compoundrenderer
from docutils import nodes
import textwrap
class RstContentCreator(object):
def __init__(self, list_type, dedent):
self.list_type = list_type
self.dedent = dedent
def __call__(self, text):
# Remove the first line which is "embed:rst[:leading-asterisk]"
text = "\n".join(text.split(u"\n")[1:])
# Remove starting whitespace
text = self.dedent(text)
# Inspired by autodoc.py in Sphinx
result = self.list_type()
for line in text.split("\n"):
result.append(line, "<breathe>")
return result
class UnicodeRenderer(Renderer):
def render(self):
# Skip any nodes that are pure whitespace
# Probably need a better way to do this as currently we're only doing
# it skip whitespace between higher-level nodes, but this will also
# skip any pure whitespace entries in actual content nodes
#
# We counter that second issue slightly by allowing through single white spaces
#
if self.data_object.strip():
return [self.node_factory.Text(self.data_object)]
elif self.data_object == unicode(" "):
return [self.node_factory.Text(self.data_object)]
else:
return []
class NullRenderer(Renderer):
def __init__(self):
pass
def render(self):
return []
class DoxygenToRstRendererFactory(object):
def __init__(
self,
node_type,
renderers,
renderer_factory_creator,
node_factory,
project_info,
state,
document,
rst_content_creator,
filter_,
target_handler,
domain_directive_factory
):
self.node_type = node_type
self.node_factory = node_factory
self.project_info = project_info
self.renderers = renderers
self.renderer_factory_creator = renderer_factory_creator
self.state = state
self.document = document
self.rst_content_creator = rst_content_creator
self.filter_ = filter_
self.target_handler = target_handler
self.domain_directive_factory = domain_directive_factory
def create_renderer(
self,
context
):
parent_data_object = context.node_stack[1]
data_object = context.node_stack[0]
if not self.filter_.allow(context.node_stack):
return NullRenderer()
child_renderer_factory = self.renderer_factory_creator.create_child_factory(
self.project_info,
data_object,
self
)
try:
node_type = data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == unicode:
node_type = "unicode"
else:
raise e
Renderer = self.renderers[node_type]
common_args = [
self.project_info,
context,
child_renderer_factory,
self.node_factory,
self.state,
self.document,
self.target_handler,
self.domain_directive_factory
]
if node_type == "docmarkup":
creator = self.node_factory.inline
if data_object.type_ == "emphasis":
creator = self.node_factory.emphasis
elif data_object.type_ == "computeroutput":
creator = self.node_factory.literal
elif data_object.type_ == "bold":
creator = self.node_factory.strong
elif data_object.type_ == "superscript":
creator = self.node_factory.superscript
elif data_object.type_ == "subscript":
creator = self.node_factory.subscript
elif data_object.type_ == "center":
print("Warning: does not currently handle 'center' text display")
elif data_object.type_ == "small":
print("Warning: does not currently handle 'small' text display")
return Renderer(
creator,
*common_args
)
if node_type == "verbatim":
return Renderer(
self.rst_content_creator,
*common_args
)
if node_type == "compound":
kind = data_object.kind
if kind in ["file", "dir", "page", "example", "group"]:
return Renderer(indexrenderer.FileRenderer, *common_args)
class_ = indexrenderer.CompoundTypeSubRenderer
# For compound node types Renderer is CreateCompoundTypeSubRenderer
# as defined below. This could be cleaner
return Renderer(
class_,
*common_args
)
if node_type == "memberdef":
if data_object.kind in ("function", "slot") or (data_object.kind == 'friend' and data_object.argsstring):
Renderer = compoundrenderer.FuncMemberDefTypeSubRenderer
elif data_object.kind == "enum":
Renderer = compoundrenderer.EnumMemberDefTypeSubRenderer
elif data_object.kind == "typedef":
Renderer = compoundrenderer.TypedefMemberDefTypeSubRenderer
elif data_object.kind == "variable":
Renderer = compoundrenderer.VariableMemberDefTypeSubRenderer
elif data_object.kind == "define":
Renderer = compoundrenderer.DefineMemberDefTypeSubRenderer
if node_type == "param":
return Renderer(
parent_data_object.node_type != "templateparamlist",
*common_args
)
if node_type == "docsimplesect":
if data_object.kind == "par":
Renderer = compoundrenderer.ParDocSimpleSectTypeSubRenderer
return Renderer(
*common_args
)
class CreateCompoundTypeSubRenderer(object):
def __init__(self, parser_factory):
self.parser_factory = parser_factory
def __call__(self, class_, project_info, *args):
compound_parser = self.parser_factory.create_compound_parser(project_info)
return class_(compound_parser, project_info, *args)
class CreateRefTypeSubRenderer(object):
def __init__(self, parser_factory):
self.parser_factory = parser_factory
def __call__(self, project_info, *args):
compound_parser = self.parser_factory.create_compound_parser(project_info)
return compoundrenderer.RefTypeSubRenderer(compound_parser, project_info, *args)
class DoxygenToRstRendererFactoryCreator(object):
def __init__(
self,
node_factory,
parser_factory,
domain_directive_factory,
rst_content_creator,
project_info
):
self.node_factory = node_factory
self.parser_factory = parser_factory
self.domain_directive_factory = domain_directive_factory
self.rst_content_creator = rst_content_creator
self.project_info = project_info
def create_factory(self, node_stack, state, document, filter_, target_handler):
data_object = node_stack[0]
renderers = {
"doxygen" : indexrenderer.DoxygenTypeSubRenderer,
"compound" : CreateCompoundTypeSubRenderer(self.parser_factory),
"doxygendef" : compoundrenderer.DoxygenTypeSubRenderer,
"compounddef" : compoundrenderer.CompoundDefTypeSubRenderer,
"sectiondef" : compoundrenderer.SectionDefTypeSubRenderer,
"memberdef" : compoundrenderer.MemberDefTypeSubRenderer,
"enumvalue" : compoundrenderer.EnumvalueTypeSubRenderer,
"linkedtext" : compoundrenderer.LinkedTextTypeSubRenderer,
"description" : compoundrenderer.DescriptionTypeSubRenderer,
"param" : compoundrenderer.ParamTypeSubRenderer,
"docreftext" : compoundrenderer.DocRefTextTypeSubRenderer,
"docheading" : compoundrenderer.DocHeadingTypeSubRenderer,
"docpara" : compoundrenderer.DocParaTypeSubRenderer,
"docmarkup" : compoundrenderer.DocMarkupTypeSubRenderer,
"docparamlist" : compoundrenderer.DocParamListTypeSubRenderer,
"docparamlistitem" : compoundrenderer.DocParamListItemSubRenderer,
"docparamnamelist" : compoundrenderer.DocParamNameListSubRenderer,
"docparamname" : compoundrenderer.DocParamNameSubRenderer,
"docsect1" : compoundrenderer.DocSect1TypeSubRenderer,
"docsimplesect" : compoundrenderer.DocSimpleSectTypeSubRenderer,
"doctitle" : compoundrenderer.DocTitleTypeSubRenderer,
"docformula" : compoundrenderer.DocForumlaTypeSubRenderer,
"docimage" : compoundrenderer.DocImageTypeSubRenderer,
"docurllink" : compoundrenderer.DocURLLinkSubRenderer,
"listing" : compoundrenderer.ListingTypeSubRenderer,
"codeline" : compoundrenderer.CodeLineTypeSubRenderer,
"highlight" : compoundrenderer.HighlightTypeSubRenderer,
"templateparamlist" : compoundrenderer.TemplateParamListRenderer,
"inc" : compoundrenderer.IncTypeSubRenderer,
"ref" : CreateRefTypeSubRenderer(self.parser_factory),
"verbatim" : compoundrenderer.VerbatimTypeSubRenderer,
"mixedcontainer" : compoundrenderer.MixedContainerRenderer,
"unicode" : UnicodeRenderer,
"doclist": compoundrenderer.DocListTypeSubRenderer,
"doclistitem": compoundrenderer.DocListItemTypeSubRenderer,
}
try:
node_type = data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == unicode:
node_type = "unicode"
else:
raise e
return DoxygenToRstRendererFactory(
"root",
renderers,
self,
self.node_factory,
self.project_info,
state,
document,
self.rst_content_creator,
filter_,
target_handler,
self.domain_directive_factory
)
def create_child_factory( self, project_info, data_object, parent_renderer_factory ):
try:
node_type = data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == unicode:
node_type = "unicode"
else:
raise e
return DoxygenToRstRendererFactory(
node_type,
parent_renderer_factory.renderers,
self,
self.node_factory,
parent_renderer_factory.project_info,
parent_renderer_factory.state,
parent_renderer_factory.document,
self.rst_content_creator,
parent_renderer_factory.filter_,
parent_renderer_factory.target_handler,
parent_renderer_factory.domain_directive_factory
)
# FactoryFactoryFactory. Ridiculous but necessary.
class DoxygenToRstRendererFactoryCreatorConstructor(object):
def __init__(
self,
node_factory,
parser_factory,
domain_directive_factory,
rst_content_creator
):
self.node_factory = node_factory
self.parser_factory = parser_factory
self.domain_directive_factory = domain_directive_factory
self.rst_content_creator = rst_content_creator
def create_factory_creator(self, project_info, document, options, target_handler):
return DoxygenToRstRendererFactoryCreator(
self.node_factory,
self.parser_factory,
self.domain_directive_factory,
self.rst_content_creator,
project_info,
)
def format_parser_error(name, error, filename, state, lineno, do_unicode_warning):
warning = '%s: Unable to parse xml file "%s". ' % (name, filename)
explanation = 'Reported error: %s. ' % error
unicode_explanation_text = ""
unicode_explanation = []
if do_unicode_warning:
unicode_explanation_text = textwrap.dedent("""
Parsing errors are often due to unicode errors associated with the encoding of the original
source files. Doxygen propagates invalid characters from the input source files to the
output xml.""").strip().replace("\n", " ")
unicode_explanation = [nodes.paragraph("", "", nodes.Text(unicode_explanation_text))]
return [nodes.warning("",
nodes.paragraph("", "", nodes.Text(warning)),
nodes.paragraph("", "", nodes.Text(explanation)),
*unicode_explanation
),
state.document.reporter.warning(warning + explanation + unicode_explanation_text, line=lineno)
]

View File

@ -1,127 +0,0 @@
class Renderer(object):
def __init__(self,
project_info,
context,
renderer_factory,
node_factory,
state,
document,
target_handler,
domain_directive_factory,
):
self.project_info = project_info
self.context = context
self.data_object = context.node_stack[0]
self.renderer_factory = renderer_factory
self.node_factory = node_factory
self.state = state
self.document = document
self.target_handler = target_handler
self.domain_directive_factory = domain_directive_factory
if self.context.domain == '':
self.context.domain = self.get_domain()
def get_domain(self):
"""Returns the domain for the current node."""
def get_filename(node):
"""Returns the name of a file where the declaration represented by node is located."""
try:
return node.location.file
except AttributeError:
return None
node_stack = self.context.node_stack
node = node_stack[0]
# An enumvalue node doesn't have location, so use its parent node for detecting the domain instead.
if type(node) == unicode or node.node_type == "enumvalue":
node = node_stack[1]
filename = get_filename(node)
if not filename and node.node_type == "compound":
file_data = self.compound_parser.parse(node.refid)
filename = get_filename(file_data.compounddef)
return self.project_info.domain_for_file(filename) if filename else ''
def get_fully_qualified_name(self):
names = []
node_stack = self.context.node_stack
node = node_stack[0]
if node.node_type == 'enumvalue':
names.append(node.name)
# Skip the name of the containing enum because it is not a part of the fully qualified name.
node_stack = node_stack[2:]
# If the node is a namespace, use its name because namespaces are skipped in the main loop.
if node.node_type == 'compound' and node.kind == 'namespace':
names.append(node.name)
for node in node_stack:
if node.node_type == 'ref' and len(names) == 0:
return node.valueOf_
if (node.node_type == 'compound' and node.kind not in ['file', 'namespace']) or \
node.node_type == 'memberdef':
# We skip the 'file' entries because the file name doesn't form part of the
# qualified name for the identifier. We skip the 'namespace' entries because if we
# find an object through the namespace 'compound' entry in the index.xml then we'll
# also have the 'compounddef' entry in our node stack and we'll get it from that. We
# need the 'compounddef' entry because if we find the object through the 'file'
# entry in the index.xml file then we need to get the namespace name from somewhere
names.insert(0, node.name)
if (node.node_type == 'compounddef' and node.kind == 'namespace'):
# Nested namespaces include their parent namespace(s) in compoundname. ie,
# compoundname is 'foo::bar' instead of just 'bar' for namespace 'bar' nested in
# namespace 'foo'. We need full compoundname because node_stack doesn't necessarily
# include parent namespaces and we stop here in case it does.
names.insert(0, node.compoundname)
break
return '::'.join(names)
def create_template_node(self, decl):
"""Creates a node for the ``template <...>`` part of the declaration."""
if not decl.templateparamlist:
return None
context = self.context.create_child_context(decl.templateparamlist)
renderer = self.renderer_factory.create_renderer(context)
nodes = [self.node_factory.Text("template <")]
nodes.extend(renderer.render())
nodes.append(self.node_factory.Text(">"))
signode = self.node_factory.desc_signature()
signode.extend(nodes)
return signode
def run_domain_directive(self, kind, names):
domain_directive = self.renderer_factory.domain_directive_factory.create(
self.context.domain, [kind, names] + self.context.directive_args[2:])
# Translate Breathe's no-link option into the standard noindex option.
if 'no-link' in self.context.directive_args[2]:
domain_directive.options['noindex'] = True
nodes = domain_directive.run()
# Filter out outer class names if we are rendering a member as a part of a class content.
signode = nodes[1].children[0]
if len(names) > 0 and self.context.child:
signode.children = [n for n in signode.children if not n.tagname == 'desc_addname']
return nodes
class RenderContext(object):
def __init__(self, node_stack, mask_factory, directive_args, domain='', child=False):
self.node_stack = node_stack
self.mask_factory = mask_factory
self.directive_args = directive_args
self.domain = domain
self.child = child
def create_child_context(self, data_object):
node_stack = self.node_stack[:]
node_stack.insert(0, self.mask_factory.mask(data_object))
return RenderContext(node_stack, self.mask_factory, self.directive_args, self.domain, True)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,115 +0,0 @@
from .base import Renderer
class DoxygenTypeSubRenderer(Renderer):
def render(self):
nodelist = []
# Process all the compound children
for compound in self.data_object.get_compound():
context = self.context.create_child_context(compound)
compound_renderer = self.renderer_factory.create_renderer(context)
nodelist.extend(compound_renderer.render())
return nodelist
class CompoundRenderer(Renderer):
"""Base class for CompoundTypeSubRenderer and RefTypeSubRenderer."""
def __init__(self, compound_parser, render_empty_node, *args):
self.compound_parser = compound_parser
self.render_empty_node = render_empty_node
Renderer.__init__(self, *args)
def create_doxygen_target(self):
"""Can be overridden to create a target node which uses the doxygen refid information
which can be used for creating links between internal doxygen elements.
The default implementation should suffice most of the time.
"""
refid = "%s%s" % (self.project_info.name(), self.data_object.refid)
return self.target_handler.create_target(refid)
def render_signature(self, file_data, doxygen_target):
# Defer to domains specific directive.
name, kind = self.get_node_info(file_data)
self.context.directive_args[1] = [self.get_fully_qualified_name()]
nodes = self.run_domain_directive(kind, self.context.directive_args[1])
node = nodes[1]
signode, contentnode = node.children
# The cpp domain in Sphinx doesn't support structs at the moment, so change the text from "class "
# to the correct kind which can be "class " or "struct ".
signode[0] = self.node_factory.desc_annotation(kind + ' ', kind + ' ')
# Check if there is template information and format it as desired
template_signode = self.create_template_node(file_data.compounddef)
if template_signode:
node.insert(0, template_signode)
node.children[0].insert(0, doxygen_target)
return nodes, contentnode
def render(self):
# Read in the corresponding xml file and process
file_data = self.compound_parser.parse(self.data_object.refid)
parent_context = self.context.create_child_context(file_data)
data_renderer = self.renderer_factory.create_renderer(parent_context)
rendered_data = data_renderer.render()
if not rendered_data and not self.render_empty_node:
return []
file_data = parent_context.node_stack[0]
new_context = parent_context.create_child_context(file_data.compounddef)
nodes, contentnode = self.render_signature(file_data, self.create_doxygen_target())
if file_data.compounddef.includes:
for include in file_data.compounddef.includes:
context = new_context.create_child_context(include)
renderer = self.renderer_factory.create_renderer(context)
contentnode.extend(renderer.render())
contentnode.extend(rendered_data)
return nodes
class CompoundTypeSubRenderer(CompoundRenderer):
def __init__(self, compound_parser, *args):
CompoundRenderer.__init__(self, compound_parser, True, *args)
def get_node_info(self, file_data):
return self.data_object.name, self.data_object.kind
class FileRenderer(CompoundTypeSubRenderer):
def render_signature(self, file_data, doxygen_target):
# Build targets for linking
targets = []
targets.extend(doxygen_target)
title_signode = self.node_factory.desc_signature()
title_signode.extend(targets)
# Set up the title
name, kind = self.get_node_info(file_data)
title_signode.append(self.node_factory.emphasis(text=kind))
title_signode.append(self.node_factory.Text(" "))
title_signode.append(self.node_factory.desc_name(text=name))
contentnode = self.node_factory.desc_content()
node = self.node_factory.desc()
node.document = self.state.document
node['objtype'] = kind
node.append(title_signode)
node.append(contentnode)
return [node], contentnode

View File

@ -1,62 +0,0 @@
"""
Masks
=====
Masks are related to filters. Filters can block the processing of particular parts of the xml
hierarchy but they can only work on node level. If the part of the xml hierarchy that you want to
filter out is read in as an instance of one of the classes in parser/doxygen/*.py then you can use
the filters. However, if you want to filter out an attribute from one of the nodes (and some of the
xml child nodes are read in as attributes on their parents) then you can't use a filter.
We introduce the Mask's to fulfil this need. The masks are designed to be applied to a particular
node type and to limit the access to particular attributes on the node. For example, then
NoParameterNamesMask wraps a node a returns all its standard attributes but returns None for the
'declname' and 'defname' attributes.
Currently the Mask functionality is only used for the text signature rendering for doing function
matching.
"""
class NoParameterNamesMask(object):
def __init__(self, data_object):
self.data_object = data_object
def __getattr__(self, attr):
if attr in ['declname', 'defname', 'defval']:
return None
return getattr(self.data_object, attr)
class MaskFactory(object):
def __init__(self, lookup):
self.lookup = lookup
def mask(self, data_object):
try:
node_type = data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == unicode:
node_type = "unicode"
else:
raise e
if node_type in self.lookup:
Mask = self.lookup[node_type]
return Mask(data_object)
return data_object
class NullMaskFactory(object):
def mask(self, data_object):
return data_object

View File

@ -1,40 +0,0 @@
class TargetHandler(object):
def __init__(self, project_info, node_factory, document):
self.project_info = project_info
self.node_factory = node_factory
self.document = document
def create_target(self, id_):
"""Creates a target node and registers it with the document and returns it in a list"""
target = self.node_factory.target(ids=[id_], names=[id_])
try:
self.document.note_explicit_target(target)
except Exception:
# TODO: We should really return a docutils warning node here
print("Warning: Duplicate target detected: %s" % id_)
return [target]
class NullTargetHandler(object):
def create_target(self, refid):
return []
class TargetHandlerFactory(object):
def __init__(self, node_factory):
self.node_factory = node_factory
def create_target_handler(self, options, project_info, document):
if options.has_key("no-link"):
return NullTargetHandler()
return TargetHandler(project_info, self.node_factory, document)

View File

@ -36,7 +36,7 @@ extensions = [
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'breathe',
# 'breathe',
]
# Add any paths that contain templates here, relative to this directory.
@ -51,7 +51,7 @@ source_suffix = '.rst'
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = 'source/index'
# General information about the project.
project = u'OBITools3'
@ -292,7 +292,7 @@ texinfo_documents = [
#texinfo_no_detailmenu = False
#Breathe configuration
sys.path.append( "../breathe/" )
breathe_projects = { "OBITools3": "../doxygen/xml/" }
sys.path.append( "breathe/" )
breathe_projects = { "OBITools3": "doxygen/xml/" }
breathe_default_project = "OBITools3"

View File

@ -1,4 +0,0 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

View File

@ -2,19 +2,19 @@
The OBItools3 Data Management System (OBIDMS)
*********************************************
A complete DNA Metabarcoding experiment rely on several kinds of data.
A complete DNA metabarcoding experiment relies on several kinds of data.
- The sequence data resulting of the PCR products sequencing,
- The sequence data resulting from the sequencing of the PCR products,
- The description of the samples including all their metadata,
- One or several refence database used for the taxonomical annotation
- One or several taxonomies.
- One or several reference databases used for the taxonomic annotation,
- One or several taxonomy databases.
Up to now each of these categories of data were stored in separate
files an nothing obliged to keep them together.
Up to now, each of these categories of data were stored in separate
files, and nothing made it mandatory to keep them together.
The `Data Management System` (DMS) of OBITools3 can be considered
as a basic database system.
The `Data Management System` (DMS) of OBITools3 can be viewed like a basic
database system.
OBIDMS UML
@ -25,11 +25,21 @@ OBIDMS UML
:download:`html version of the OBIDMS UML file <UML/ObiDMS_UML.class.violet.html>`
An OBIDMS directory consists of :
* OBIDMS column files
* OBIDMS release files
* OBIDMS dictionary files
* one OBIDMS history file
An OBIDMS directory contains :
* one `OBIDMS history file <#obidms-history-files>`_
* OBIDMS column directories
OBIDMS column directories
=========================
OBIDMS column directories contain :
* all the different versions of one OBIDMS column, under the form of different files (`OBIDMS column files <#obidms-column-files>`_)
* one `OBIDMS version file <#obidms-version-files>`_
The directory name is the column attribute with the extension ``.obicol``.
Example: ``count.obicol``
OBIDMS column files
@ -38,7 +48,7 @@ OBIDMS column files
Each OBIDMS column file contains :
* a header of a size equal to a multiple of PAGESIZE (PAGESIZE being equal to 4096 bytes
on most systems) containing metadata
* one column of data with the same OBIType
* Lines of data with the same `OBIType <types.html#obitypes>`_
Header
@ -48,27 +58,33 @@ The header of an OBIDMS column contains :
* Endian byte order
* Header size (PAGESIZE multiple)
*
* File status : Open/Closed
* Owner : PID of the process that created the file and is the only one allowed to modify it if it is open
* Number of lines (total or without the header?)
* OBIType
* Date of creation
* Version of the file
* Number of lines of data
* Number of lines of data used
* `OBIType <types.html#obitypes>`_ (type of the data)
* Date of creation of the file
* Version of the OBIDMS column
* The column name
* Eventual comments
Data
----
A column of data with the same OBIType.
A line of data corresponds to a vector of elements. Each element is associated with an element name.
Elements names are stored in the header. The correspondance between an element and its name is done
using their order in the lists of elements and elements names. This structure allows the storage of
dictionary-like data.
Example: In the header, the attribute ``elements_names`` will be associated with the value ``"sample_1;
sample_2;sample_3"``, and a line of data with the type ``OBInt_t`` will be stored as an ``OBInt_t`` vector
of size three e.g. ``5|8|4``.
Mandatory columns
-----------------
Some columns must exist in an OBIDMS directory :
* sequence identifiers column (type *OBIStr_t*)
* sequence identifiers column (type ``OBIStr_t``)
File name
@ -83,8 +99,7 @@ Example : ``count@3.odc``
Modifications
-------------
An OBIDMS column file can only be modified by the process that created it, if its status is set to Open. Those informations are
contained in the `header <#header>`_.
An OBIDMS column file can only be modified by the process that created it, and while its status is set to Open.
When a process wants to modify an OBIDMS column file that is closed, it must first clone it. Cloning creates a new version of the
file that belongs to the process, i.e., only that process can modify that file, as long as its status is set to Open. Once the process
@ -94,6 +109,8 @@ again.
That means that one column is stored in one file (if there is only one version)
or more (if there are several versions), and that there is one file per version.
All the versions of one column are stored in one directory.
Versioning
----------
@ -101,22 +118,22 @@ Versioning
The first version of a column file is numbered 0, and each new version increments that
number by 1.
The number of the latest version of an OBIDMS column is stored in an `OBIDMS release file <formats.html#obidms-release-files>`_.
The number of the latest version of an OBIDMS column is stored in the `OBIDMS version file <#obidms-version-files>`_ of its directory.
OBIDMS release files
OBIDMS version files
====================
Each OBIDMS column is associated with an OBIDMS release file that contains the number of the latest
Each OBIDMS column is associated with an OBIDMS version file in its directory, that contains the number of the latest
version of the column.
File name
---------
OBIDMS release files are named with the attribute associated to the data contained in the column, and
have the extension ``.odr``.
OBIDMS version files are named with the attribute associated to the data contained in the column, and
have the extension ``.odv``.
Example : ``count.odr``
Example : ``count.odv``
OBIDMS views
@ -140,19 +157,4 @@ operations ever done in the OBIDMS directory and the views in between them :
:width: 150 px
:align: center
OBIType header file
========================
.. doxygenfile:: obitypes.h
OBIIntColumn header file
========================
.. doxygenfile:: obiintcolumn.h
OBIColumn header file
=====================
.. doxygenfile:: obicolumn.h

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

After

Width:  |  Height:  |  Size: 67 KiB

File diff suppressed because it is too large Load Diff

View File

@ -2,28 +2,28 @@
Container types
===============
Containers allow to manage collection of values of homogeneous type.
Containers allow to manage collections of values of homogeneous type.
Three container types exist.
A container is a non-mutable structure once it has been locked.
Consequently just insert procedure are needed
Consequently, only insertion procedures are needed.
Lists
-----
Correspond to an ordered collection of values belonging an elementary type.
Correspond to an ordered collection of values belonging to an elementary type.
At its creation
At its creation, ...
Sets
----
Correspond to an unordered collection of values belonging an elementary type.
Correspond to an unordered collection of values belonging to an elementary type.
Dictionaries
------------
Dictionaries allow to associate a `key` to a `value`. Values can be retrieved through its associated key.
Values must belong an elementary type and keys must be *OBIStr_t*.
Dictionaries allow to associate a `key` to a `value`. Values can be retrieved through their associated key.
Values must belong to an elementary type and keys must be *OBIStr_t*.

View File

@ -2,8 +2,8 @@
Data in OBITools3
#################
The OBITools3 inaugure a new way to manage DNA metabarcoding data.
They rely on a `Data management System` (DMS) that can be considered as
The OBITools3 introduce a new way to manage DNA metabarcoding data.
They rely on a `Data management System` (DMS) that can be viewed like
a simplified database system.

View File

@ -12,7 +12,7 @@ Atomic types
========= ========= ============ ==============================
integer int32_t OBIInt_t a signed integer value
float double OBIFloat_t a floating value
boolean ? OBIBool_t a boolean true/false value
boolean bool OBIBool_t a boolean true/false value
char char OBIChar_t a character
index size_t OBIIdx_t an index in a data structure
========= ========= ============ ==============================

View File

@ -63,8 +63,14 @@ Issue tracking
==============
Issue tracking is done using `GitLab <https://about.gitlab.com/>`_ at http://git.metabarcoding.org/.
Creating a branch should always lead to the creation of a label that refers to it in GitLab.
Tickets should always be labelled with the branches for which they are relevant.
Tickets should always be labeled with the branches for which they are relevant.
*************
Documentation
*************
C functions are documented in the header files for public functions, and in the source file for private functions.
**************
@ -86,7 +92,7 @@ C99 :
* Object layer
* OBITools3 library
`Python 3 <https://www.python.org/>`_ :
`Python 3.5 <https://www.python.org/>`_ :
* Top layer code (scripts)
For the documentation, `Sphinx <http://sphinx-doc.org/>`_ should be used for both the original
@ -99,8 +105,22 @@ in the Sphinx documentation using `Breathe <https://breathe.readthedocs.org/en/l
Naming conventions
******************
.. todo::
Look for common naming conventions
Struct, Enum: ``Title_case``
Enum members, macros, constants: ``ALL_CAPS``
Functions, local variables: ``lower_case``
Public functions: ``obi_lower_case``
Functions that shouldn't be called directly: ``_lower_case`` (``_`` prefix)
Global variables: ``g_lower_case`` (``g_`` prefix)
Pointers: ``pointer_ptr`` (``_ptr`` suffix)
.. note::
Underscores are used to delimit 'words'.
*****************

View File

@ -11,7 +11,6 @@ OBITools3 documentation
Programming guidelines <guidelines>
Data structures <data>
Pistes de reflexion <pistes>
Indices and tables

View File

@ -1,21 +0,0 @@
###################
Pistes de reflexion
###################
******************************
Ce que l'on veut pouvoir faire
******************************
* Gerer les valeurs manquantes
* Modifier une colonne en cours d'ecriture (mmap)
* Ajouter des valeurs a la fin du fichier d'une colonne en cours d'ecriture (mmap)
*
******
Divers
******
* Si l'ordre d'une colonne est change, elle est reecrite (pas d'index).
* Utilisation de semaphores pour la lecture

View File

@ -0,0 +1,55 @@
==============
Special values
==============
NA values
=========
All OBITypes have an associated NA (Not Available) value.
NA values are implemented by specifying an explicit NA value for each type,
corresponding to the R standards as much as possible:
* For the type ``OBIInt_t``, the NA value is ``INT_MIN``.
* For the type ``OBIBool_t``, the NA value is ``2``.
* For the type ``OBIIdx_t`` and ``OBITaxid_t``, the NA value is ``SIZE_MAX``.
* For the type ``OBIChar_t``: the NA value is ``\0``.
* For the type ``OBIFloat_t``::
typedef union
{
double value;
unsigned int word[2];
} ieee_double;
static double NA_value(void)
{
volatile ieee_double x;
x.word[hw] = 0x7ff00000;
x.word[lw] = 1954;
return x.value;
}
Minimum and maximum values for ``OBIInt_t``
===========================================
* Maximum value : ``INT_MAX``
* Minimum value : ``INT_MIN(-1?)``
Infinity values for the type ``OBIFloat_t``
===========================================
* Positive infinity : ``INFINITY`` (should be defined in ``<math.h>``)
* Negative infinity : ``-INFINITY``
NaN value for the type ``OBIFloat_t``
=====================================
* NaN (Not a Number) value : ``NAN`` (should be defined in ``<math.h>`` but probably needs to be tested)

View File

@ -6,14 +6,10 @@ OBITypes
.. image:: ./UML/OBITypes_UML.png
:download:`html version of the OBITypes UML file <UML/OBITypes_UML.class.violet.html>`
.. note::
All OBITypes have an associated NA (Not Available) value.
We have currently two ideas for implementing NA values:
- By specifying an explicit NA value for each type
- By adding to each column of an OBIDMS a bit vector
indicating if the value is defined or not.
.. image:: ./UML/Obicolumn_classes_UML.png
:download:`html version of the OBIDMS classes UML file <UML/Obicolumn_classes_UML.class.violet.html>`
.. toctree::
@ -21,5 +17,4 @@ OBITypes
The elementary types <elementary>
The containers <containers>
Special values <specialvalues>

1
doc/sphinx/build_dir.txt Normal file
View File

@ -0,0 +1 @@
build/lib.macosx-10.6-intel-3.5

50
python/obi.py Normal file
View File

@ -0,0 +1,50 @@
#!/usr/local/bin/python3.4
'''
obi -- shortdesc
obi is a description
It defines classes_and_methods
@author: user_name
@copyright: 2014 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
default_config = { 'software' : "The OBITools",
'log' : False,
'loglevel' : 'INFO',
'progress' : True,
'defaultdms' : None
}
root_config_name='obi'
from obitools3.apps.config import getConfiguration # @UnresolvedImport
from obitools3.version import version
__all__ = []
__version__ = version
__date__ = '2014-09-28'
__updated__ = '2014-09-28'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
if __name__ =="__main__":
config = getConfiguration(root_config_name,
default_config)
config[root_config_name]['module'].run(config)

View File

@ -0,0 +1,3 @@
#cython: language_level=3
cpdef buildArgumentParser(str configname, str softname)

View File

@ -0,0 +1,61 @@
#cython: language_level=3
'''
Created on 27 mars 2016
@author: coissac
'''
import argparse
import sys
from .command import getCommandsList
class ObiParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
cpdef buildArgumentParser(str configname,
str softname):
parser = ObiParser()
parser.add_argument('--version', dest='%s:version' % configname,
action='store_true',
default=False,
help='Print the version of %s' % softname)
parser.add_argument('--log', dest='%s:log' % configname,
action='store',
type=str,
default=None,
help='Create a logfile')
parser.add_argument('--no-progress', dest='%s:progress' % configname,
action='store_false',
default=None,
help='Do not print the progress bar during analyzes')
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='additional help')
commands = getCommandsList()
for c in commands:
module = commands[c]
if hasattr(module, "run"):
if hasattr(module, "__title__"):
sub = subparsers.add_parser(c,help=module.__title__)
else:
sub = subparsers.add_parser(c)
if hasattr(module, "addOptions"):
module.addOptions(sub)
sub.set_defaults(**{'%s:module' % configname : module})
return parser

Some files were not shown because too many files have changed in this diff Show More