aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFernando Mercês <fernando@mentebinaria.com.br>2014-12-12 11:31:06 -0200
committerFernando Mercês <fernando@mentebinaria.com.br>2014-12-12 11:31:06 -0200
commitaff7e147de507f3026de8d703ac166cb6214e55f (patch)
tree3c799ab52ea4ad487587aac675fef6ef45828829
parente8205d9e9cf8f0e3c64cb6d3675d107a8e036077 (diff)
first commit
-rwxr-xr-xAIVDM_Encoder.py311
-rw-r--r--AiS_TX.grc1369
-rwxr-xr-xAiS_TX.py135
-rw-r--r--README13
-rw-r--r--gr-aistx/CMakeLists.txt153
-rw-r--r--gr-aistx/GPL621
-rw-r--r--gr-aistx/README24
-rw-r--r--gr-aistx/apps/CMakeLists.txt25
-rw-r--r--gr-aistx/cmake/Modules/CMakeParseArgumentsCopy.cmake138
-rw-r--r--gr-aistx/cmake/Modules/FindCppUnit.cmake36
-rw-r--r--gr-aistx/cmake/Modules/FindGnuradioCore.cmake26
-rw-r--r--gr-aistx/cmake/Modules/FindGruel.cmake26
-rw-r--r--gr-aistx/cmake/Modules/GrMiscUtils.cmake210
-rw-r--r--gr-aistx/cmake/Modules/GrPlatform.cmake46
-rw-r--r--gr-aistx/cmake/Modules/GrPython.cmake227
-rw-r--r--gr-aistx/cmake/Modules/GrSwig.cmake229
-rw-r--r--gr-aistx/cmake/Modules/GrTest.cmake133
-rw-r--r--gr-aistx/cmake/cmake_uninstall.cmake.in32
-rw-r--r--gr-aistx/docs/CMakeLists.txt35
-rw-r--r--gr-aistx/docs/README.AISTX11
-rw-r--r--gr-aistx/docs/doxygen/CMakeLists.txt52
-rw-r--r--gr-aistx/docs/doxygen/Doxyfile.in1504
-rw-r--r--gr-aistx/docs/doxygen/Doxyfile.swig_doc.in1514
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/__init__.py82
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/base.py219
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/doxyindex.py237
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/generated/__init__.py7
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/generated/compound.py503
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/generated/compoundsuper.py8342
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/generated/index.py77
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/generated/indexsuper.py523
-rw-r--r--gr-aistx/docs/doxygen/doxyxml/text.py56
-rw-r--r--gr-aistx/docs/doxygen/other/group_defs.dox7
-rw-r--r--gr-aistx/docs/doxygen/other/main_page.dox10
-rw-r--r--gr-aistx/docs/doxygen/swig_doc.py255
-rw-r--r--gr-aistx/grc/AISTX_Build_Frame.xml47
-rw-r--r--gr-aistx/grc/AISTX_DebugME.xml32
-rw-r--r--gr-aistx/grc/AISTX_nrz_to_nrzi.xml15
-rw-r--r--gr-aistx/grc/CMakeLists.txt23
-rw-r--r--gr-aistx/include/AISTX/Build_Frame.h60
-rw-r--r--gr-aistx/include/AISTX/CMakeLists.txt28
-rw-r--r--gr-aistx/include/AISTX/DebugME.h53
-rw-r--r--gr-aistx/include/AISTX/api.h33
-rw-r--r--gr-aistx/include/AISTX/nrz_to_nrzi.h54
-rw-r--r--gr-aistx/lib/Build_Frame_impl.cc469
-rw-r--r--gr-aistx/lib/Build_Frame_impl.h65
-rw-r--r--gr-aistx/lib/CMakeLists.txt67
-rw-r--r--gr-aistx/lib/DebugME_impl.cc98
-rw-r--r--gr-aistx/lib/DebugME_impl.h51
-rw-r--r--gr-aistx/lib/nrz_to_nrzi_impl.cc108
-rw-r--r--gr-aistx/lib/nrz_to_nrzi_impl.h51
-rw-r--r--gr-aistx/lib/qa_AISTX.cc36
-rw-r--r--gr-aistx/lib/qa_AISTX.h38
-rw-r--r--gr-aistx/lib/test_AISTX.cc43
-rw-r--r--gr-aistx/python/CMakeLists.txt46
-rw-r--r--gr-aistx/python/__init__.py54
-rwxr-xr-xgr-aistx/python/qa_Build_Frame.py39
-rwxr-xr-xgr-aistx/python/qa_DebugME.py39
-rwxr-xr-xgr-aistx/python/qa_nrz_to_nrzi.py39
-rw-r--r--gr-aistx/swig/AISTX_swig.i24
-rw-r--r--gr-aistx/swig/CMakeLists.txt61
-rw-r--r--unpacker.c86
62 files changed, 18947 insertions, 0 deletions
diff --git a/AIVDM_Encoder.py b/AIVDM_Encoder.py
new file mode 100755
index 0000000..645ffb9
--- /dev/null
+++ b/AIVDM_Encoder.py
@@ -0,0 +1,311 @@
+#!/usr/bin/env python
+#
+# This script is part of the AIS BlackToolkit.
+# AIVDM_Encoder.py allows you to generate arbitrary AIVDM payloads. The main AIS message types are supported.
+#
+# Copyright 2013-2014 -- Embyte & Pastus
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# Usage examples:
+# $ ./AIVDM_Encoder.py --type=1 --vsize=30x10 | xargs -IA ./unpacker A 1 A
+# $ ./AIVDM_Encoder.py --type=1 --vsize=30x10 | xargs -IX ./AiS_TX.py --payload=X --channel=A
+#
+
+import sys
+
+# Adapted from gpsd-3.9's driver_ais.c
+def encode_string(string):
+ vocabolary = "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^- !\"#$%&'()*+,-./0123456789:;<=>?"
+ encoded_string = ""
+ for c in string.upper():
+ index = vocabolary.find(c)
+ encoded_string += '{0:b}'.format(index).rjust(6,'0')
+ return encoded_string
+
+# NB. We add a mask to tell python how long is our rapresentation (overwise on negative integers, it cannot do the complement 2).
+def compute_long_lat (__long, __lat):
+ _long = '{0:b}'.format(int(round(__long*600000)) & 0b1111111111111111111111111111).rjust(28,'0')
+ _lat = '{0:b}'.format(int(round(__lat*600000)) & 0b111111111111111111111111111).rjust(27,'0')
+ return (_long, _lat)
+
+def compute_long_lat22 (__long, __lat):
+ _long = '{0:b}'.format(int(round(__long*600)) & 0b111111111111111111).rjust(18,'0')
+ _lat = '{0:b}'.format(int(round(__lat*600)) & 0b11111111111111111).rjust(17,'0')
+ return (_long, _lat)
+
+def encode_1(__mmsi, __speed, __long, __lat, __course, __ts):
+ _type = '{0:b}'.format(1).rjust(6,'0') # 18
+ _repeat = "00" # repeat (directive to an AIS transceiver that this message should be rebroadcast.)
+ _mmsi = '{0:b}'.format(__mmsi).rjust(30,'0') # 30 bits (247320162)
+ _status = '{0:b}'.format(15).rjust(4,'0') # status not defined
+ _rot = '{0:b}'.format(128).rjust(8,'0') # rate of turn not defined
+
+ _speed = '{0:b}'.format(int(round(__speed*10))).rjust(10,'0') # Speed over ground is in 0.1-knot resolution from 0 to 102 knots. value 1023 indicates speed is not available, value 1022 indicates 102.2 knots or higher.
+ _accurancy = '0' # > 10m
+
+ (_long, _lat) = compute_long_lat(__long, __lat)
+
+ _course = '{0:b}'.format(int(round(__course*10))).rjust(12,'0') # 0.1 resolution. Course over ground will be 3600 (0xE10) if that data is not available.
+ _true_heading = '1'*9 # 511 (N/A)
+ _ts = '{0:b}'.format(__ts).rjust(6,'0') # Second of UTC timestamp.
+
+ _flags = '0'*6
+ # '00': manufactor NaN
+ # '000': spare
+ # '0': Raim flag
+
+ _rstatus = '0'*19 # ??
+ # '11100000000000000110' : Radio status
+
+ return _type+_repeat+_mmsi+_status+_rot+_speed+_accurancy+_long+_lat+_course+_true_heading+_ts+_flags+_rstatus
+
+
+def encode_4(__mmsi, __speed, __long, __lat, __course, __ts):
+ _type = '{0:b}'.format(4).rjust(6,'0') # 18
+ _repeat = "00" # repeat (directive to an AIS transceiver that this message should be rebroadcast.)
+ _mmsi = '{0:b}'.format(__mmsi).rjust(30,'0') # 30 bits (247320162)
+
+ _hour = '{0:b}'.format(24).rjust(5,'0')
+ _min = _sec = '{0:b}'.format(60).rjust(6,'0')
+
+
+ _accurancy = '1' # <= 10m
+ (_long, _lat) = compute_long_lat(__long, __lat)
+
+ _device = '0001' # GPS
+ _flags = '0'*12
+ # '0': transmission control for packet 24
+ # '000000000': spare
+ # '0': Raim flag
+
+ _rstatus = '0'*19 # ??
+ # '11100000000000000110' : Radio status
+
+ return _type+_repeat+_mmsi+'0'*23+_hour+_min+_sec+_accurancy+_long+_lat+_device+'0'*11+_rstatus
+
+def encode_14(__mmsi, __msg):
+ _type = '{0:b}'.format(14).rjust(6,'0') # 14
+ _repeat = "00" # repeat (directive to an AIS transceiver that this message should be rebroadcast.)
+ _mmsi = '{0:b}'.format(__mmsi).rjust(30,'0') # 30 bits (247320162)
+ _spare = "00" # spare bit
+ _msg = encode_string(__msg)
+# _padding = '0'*(168-6-2-30-2-len(_msg))
+ return _type+_repeat+_mmsi+_spare+_msg
+
+
+def encode_18(__mmsi, __speed, __long, __lat, __course, __ts):
+ _type = '{0:b}'.format(18).rjust(6,'0') # 18
+ _repeat = "00" # repeat (directive to an AIS transceiver that this message should be rebroadcast.)
+ _mmsi = '{0:b}'.format(__mmsi).rjust(30,'0') # 30 bits (247320162)
+ _reserved = '0'*8
+ _speed = '{0:b}'.format(int(round(__speed*10))).rjust(10,'0') # Speed over ground is in 0.1-knot resolution from 0 to 102 knots. value 1023 indicates speed is not available, value 1022 indicates 102.2 knots or higher.
+ _accurancy = '0' # > 10m
+
+ (_long, _lat) = compute_long_lat(__long, __lat)
+
+ _course = '{0:b}'.format(int(round(__course*10))).rjust(12,'0') # 0.1 resolution. Course over ground will be 3600 (0xE10) if that data is not available.
+ _true_heading = '1'*9 # 511 (N/A)
+ _ts = '{0:b}'.format(__ts).rjust(6,'0') # Second of UTC timestamp.
+
+ _flags = '001011100'
+ # '00': Regional reserved
+ # '1': CS mode (carrier sense Class B)
+ # '0' Display flag
+ # '1': DSC
+ # '1': Band Flag
+ # '1': M22 Flag
+ # '0': Assigned 0 -> Autonomous mode
+ # '0': Raim flag
+
+ _rstatus = '11100000000000000110' # ??
+ # '11100000000000000110' : Radio status
+
+ return _type+_repeat+_mmsi+_reserved+_speed+_accurancy+_long+_lat+_course+_true_heading+_ts+_flags+_rstatus
+
+def encode_20(__mmsi, __offset, __slots, __timeout, __increment):
+ _type = '{0:b}'.format(20).rjust(6,'0') #
+ _repeat = '00' # repeat (directive to an AIS transceiver that this message should be rebroadcast.)
+ _mmsi = '{0:b}'.format(__mmsi).rjust(30,'0') # 30 bits (247320162)
+
+ _offset = '{0:b}'.format(__offset).rjust(12,'0')
+ _slots = '{0:b}'.format(__slots).rjust(4,'0')
+ _timeout = '{0:b}'.format(__timeout).rjust(3,'0')
+ _increment = '{0:b}'.format(__increment).rjust(11,'0')
+
+ return _type+_repeat+_mmsi+'00'+_offset+_slots+_timeout+_increment+'00'
+
+
+def encode_22(__mmsi, __channel_a, __channel_b, __ne_lon, __ne_lat, __sw_lon, __sw_lat):
+ _type = '{0:b}'.format(22).rjust(6,'0') #
+ _repeat = '00' # repeat (directive to an AIS transceiver that this message should be rebroadcast.)
+ _mmsi = '{0:b}'.format(__mmsi).rjust(30,'0') # 30 bits (247320162)
+
+ _channel_a = '{0:b}'.format(__channel_a).rjust(12,'0') # 2087
+ _channel_b = '{0:b}'.format(__channel_b).rjust(12,'0') # 2088
+
+ _txrxmode = '0'*4 #'{0:b}'.format(__channel_b).rjust(4,'0') # 0,1,2 (1 e 2 disable one tx)
+ _power = '0' # high ('1' = low)
+
+ (_ne_lon, _ne_lat) = compute_long_lat22(__ne_lon, __ne_lat)
+ (_sw_lon, _sw_lat) = compute_long_lat22(__sw_lon, __sw_lat)
+
+ _zonesize = '{0:b}'.format(4).rjust(3,'0') # default
+
+ return _type+_repeat+_mmsi+'00'+_channel_a+_channel_b+_txrxmode+_power+_ne_lon+_ne_lat+_sw_lon+_sw_lat+'000'+_zonesize+'0'*23
+
+
+def encode_23(__mmsi, __ne_lon, __ne_lat, __sw_lon, __sw_lat, __interval_time, __quiet_time):
+ _type = '{0:b}'.format(23).rjust(6,'0') #
+ _repeat = '00' # repeat (directive to an AIS transceiver that this message should be rebroadcast.)
+ _mmsi = '{0:b}'.format(__mmsi).rjust(30,'0') # 30 bits (247320162)
+
+ (_ne_lon, _ne_lat) = compute_long_lat22(__ne_lon, __ne_lat)
+ (_sw_lon, _sw_lat) = compute_long_lat22(__sw_lon, __sw_lat)
+
+ _station_type = '0000' # Target all stations (class A, B, ...)
+ _ship_type = '0'*8 # Target all ships/cargos
+
+ _txrxmode = '0'*2 #'{0:b}'.format(__channel_b).rjust(4,'0') # 0,1,2 (1 e 2 disable one tx) - NB: 2bits in message 23, 4bits in message 22
+
+ _interval = '{0:b}'.format(__interval_time).rjust(4,'0')
+ _quiet = '{0:b}'.format(__quiet_time).rjust(4,'0')
+
+ return _type+_repeat+_mmsi+'00'+_ne_lon+_ne_lat+_sw_lon+_sw_lat+_station_type+_ship_type+'0'*22+_txrxmode+_interval+_quiet+'0'*6
+
+
+
+def encode_24(__mmsi, __part, __vname="NAN", __callsign="NAN", __vsize="90x14", __vtype=60):
+ _type = '{0:b}'.format(24).rjust(6,'0') # 24
+ _repeat = "00" # repeat (directive to an AIS transceiver that this message should be rebroadcast.)
+ _mmsi = '{0:b}'.format(__mmsi).rjust(30,'0') # 30 bits (247320162)
+ if __part == "A":
+ _part = "00"
+ _vname = encode_string(__vname)
+ _padding = '0'*(156-6-2-30-2-len(_vname)) # 160 bits per RFC -> 4 bits padding added in Build_Frame_imple.cc
+ return _type+_repeat+_mmsi+_part+_vname+_padding
+
+ else:
+ _part = "01"
+ _vtype = '{0:b}'.format(__vtype).rjust(8,'0') # 60 = passengers
+ _vendorID = "0"*42 # vendor ID
+
+ _tmp = encode_string(__callsign)
+ _callsign = _tmp + "0"*(42-len(_tmp)) # 7 six-bit characters
+
+ _hl=int(__vsize[:__vsize.find("x")])/2 # AIS antenna in the middle of the boat
+ _hw=int(__vsize[__vsize.find("x")+1:])/2
+ _half_length='{0:b}'.format(_hl).rjust(9,'0')
+ _half_width='{0:b}'.format(_hw).rjust(6,'0')
+
+ return _type+_repeat+_mmsi+_part+_vtype+_vendorID+_callsign+_half_length+_half_length+_half_width+_half_width+"000000"
+
+
+
+def main():
+ from optparse import OptionParser
+
+ desc="""Use this tool to generate the binary payload of an AIVDM sentence."""
+
+ parser = OptionParser(description=desc)
+
+ parser.add_option("--type", help="""Type:
+ 1 = Position Report Class A;
+ 4 = Base Station Report;
+ 14 = Safety-Related Broadcast Message;
+ 18 = Standard Class B CS Position Report;
+ 20 = Data Link Management Message (ref. RFC);
+ 21 = Aid-to-Navigation Report;
+ 22 = Channel Management;
+ 23 = Group Assignment Command;
+ 24 = Static Data Report)""")
+
+ parser.add_option("--sart_msg",help="14. SART alarm message, default = SART ACTIVE", default="SART ACTIVE")
+
+ parser.add_option("--mmsi", help="""MMSI, default = 247320162.
+ 970010000 for SART device""",
+ default=247320162)
+ parser.add_option("--speed", help="18. Speed (knot), default = 0.1", default=0.1)
+ parser.add_option("--long", help="18. Longitude, default = 9.72357833333333", default=9.72357833333333)
+ parser.add_option("--lat", help="18. Latitude, default = 45.6910166666667", default=45.6910166666667)
+ parser.add_option("--course", help="18. Course, default = 83.4", default=83.4)
+ parser.add_option("--ts", help="18. Timestamp (sec), default = 38", default=38)
+ parser.add_option("--fatdmaoffset", help="20. Offset, default = 0", default=0)
+ parser.add_option("--fatdmaslots", help="20. Slot, default = 0", default=0)
+ parser.add_option("--fatdmatimeout", help="20. Timeout, default = 0", default=0)
+ parser.add_option("--fatdmaincrement", help="20. Increment, default = 0", default=0)
+
+ parser.add_option("--v_AtoN",help="21. Specify that the AtoN is virtual, default = real.", action="store_true")
+ parser.add_option("--aid_type", help="21. Type of AtoN (light, bouye)", default=1)
+ parser.add_option("--aid_name", help="21. Name of AtoN", default="@@@@@@@@@@@@@@@@@@@@")
+
+ parser.add_option("--channel_a", help="22. Specify channel frequency for A, default = 2087 (87B = 161.975 MHz). Ref ITU-R M.1084", default=2087)
+ parser.add_option("--channel_b", help="22. Specify channel frequency for B, default = 2088 (88B = 162.025 MHz). Ref ITU-R M.1084", default=2088)
+ parser.add_option("--ne_lon", help="22/23. Specify NE corner's longitude of rectangular jurisdiction area, default = 9.9", default=9.9)
+ parser.add_option("--ne_lat", help="22/23. Specify NE corner's latitude of rectangular jurisdiction area, default = 45.8", default=45.8)
+ parser.add_option("--sw_lon", help="22/23. Specify SW corner's longitude of rectangular jurisdiction area, default = 9.5", default=9.5)
+ parser.add_option("--sw_lat", help="22/23. Specify SW corner's latitude of rectangular jurisdiction area, default = 45.5", default=45.5)
+
+ parser.add_option("--interval", help="23. Commands the respective stations to the reporting interval, default = 1 (10 minutes)", default=1)
+ parser.add_option("--quiet", help="23. Force the respective stations to quiet interval, default = 15 minutes)", default=15)
+
+ parser.add_option("--part", help="24. Message part (A/B), default = A", default="A")
+ parser.add_option("--vname", help="24A. Vessel Name (UPPER CASE), default = NaN", default="NAN")
+ parser.add_option("--callsign", help="24B. Call Sign (UPPER CASE, max 7 chars.), default = KC9CAF", default="KC9CAF")
+ parser.add_option("--vtype", help="""24B. Type of ship and cargo type: 60 Passenger, 70 Cargo, 80 Tanker, 3x Special, 5x Carrying dangerous goods, harmful substances or marine pollutants:
+ - 35 Engaged in military operations ;-)
+ - 51 Search and rescue vessels
+ - 55 Law enforcement vessels
+ """, default=60)
+ parser.add_option("--vsize", help="24B/21. Vessel Size (multiple of 2), default = 90x14", default="90x14")
+
+
+ (options, args) = parser.parse_args()
+ if not options.type:
+ parser.error("Sentence type not specified: -h for help.")
+
+ payload = ""
+
+ if options.type == "1":
+ payload = encode_1(int(options.mmsi), float(options.speed), float(options.long), float(options.lat), float(options.course), int(options.ts))
+
+ elif options.type == "4":
+ payload = encode_4(int(options.mmsi), float(options.speed), float(options.long), float(options.lat), float(options.course), int(options.ts))
+
+ elif options.type == "14":
+ payload = encode_14(int(options.mmsi), options.sart_msg)
+
+ elif options.type == "20":
+ payload = encode_20(int(options.mmsi), int(options.fatdmaoffset), int(options.fatdmaslots), int(options.fatdmatimeout), int(options.fatdmaincrement))
+
+ elif options.type == "18":
+ payload = encode_18(int(options.mmsi), float(options.speed), float(options.long), float(options.lat), float(options.course), int(options.ts))
+
+ elif options.type == "21":
+ if options.v_AtoN == True: __virtual = '1'
+ else: __virtual = '0'
+ payload = encode_21(int(options.mmsi), int(options.aid_type), options.aid_name, float(options.long), float(options.lat), options.vsize, __virtual)
+
+ elif options.type == "22":
+ payload = encode_22(int(options.mmsi), int(options.channel_a), int(options.channel_b), float(options.ne_lon), float(options.ne_lat), float(options.sw_lon), float(options.sw_lat))
+
+ elif options.type == "23":
+ payload = encode_23(int(options.mmsi), float(options.ne_lon), float(options.ne_lat), float(options.sw_lon), float(options.sw_lat), int(options.interval), int(options.quiet))
+
+ elif options.type == "24":
+ if options.part=="A":
+ payload = encode_24(int(options.mmsi), "A", __vname=options.vname.upper())
+ else:
+ payload = encode_24(int(options.mmsi), "B", __callsign=options.callsign.upper(), __vsize=options.vsize, __vtype=int(options.vtype))
+
+ else:
+ parser.error("Sentence type not supported: -h for help.")
+
+ print payload
+
+
+if __name__ == "__main__":
+ main()
diff --git a/AiS_TX.grc b/AiS_TX.grc
new file mode 100644
index 0000000..a4cbe0c
--- /dev/null
+++ b/AiS_TX.grc
@@ -0,0 +1,1369 @@
+<?xml version='1.0' encoding='ASCII'?>
+<flow_graph>
+ <timestamp>Fri Nov 14 14:27:09 2014</timestamp>
+ <block>
+ <key>options</key>
+ <param>
+ <key>id</key>
+ <value>top_block</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>title</key>
+ <value></value>
+ </param>
+ <param>
+ <key>author</key>
+ <value></value>
+ </param>
+ <param>
+ <key>description</key>
+ <value></value>
+ </param>
+ <param>
+ <key>window_size</key>
+ <value>1280, 1024</value>
+ </param>
+ <param>
+ <key>generate_options</key>
+ <value>wx_gui</value>
+ </param>
+ <param>
+ <key>category</key>
+ <value>Custom</value>
+ </param>
+ <param>
+ <key>run_options</key>
+ <value>prompt</value>
+ </param>
+ <param>
+ <key>run</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>max_nouts</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>realtime_scheduling</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(-1, 0)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>analog_sig_source_x</key>
+ <param>
+ <key>id</key>
+ <value>analog_sig_source_x_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>samp_rate</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>waveform</key>
+ <value>analog.GR_SIN_WAVE</value>
+ </param>
+ <param>
+ <key>freq</key>
+ <value>-25000</value>
+ </param>
+ <param>
+ <key>amp</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>offset</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(253, 184)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>blks2_selector</key>
+ <param>
+ <key>id</key>
+ <value>blks2_selector_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>num_inputs</key>
+ <value>3</value>
+ </param>
+ <param>
+ <key>num_outputs</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>input_index</key>
+ <value>channel_select</value>
+ </param>
+ <param>
+ <key>output_index</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>vlen</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(953, 164)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>variable</key>
+ <param>
+ <key>id</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>value</key>
+ <value>326531</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(173, 0)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>variable</key>
+ <param>
+ <key>id</key>
+ <value>bit_rate</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>value</key>
+ <value>9600</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(281, -1)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>digital_gmsk_mod</key>
+ <param>
+ <key>id</key>
+ <value>digital_gmsk_mod_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>samples_per_symbol</key>
+ <value>int(samp_rate/bit_rate)</value>
+ </param>
+ <param>
+ <key>bt</key>
+ <value>0.4</value>
+ </param>
+ <param>
+ <key>verbose</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>log</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(254, 103)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>blocks_multiply_xx</key>
+ <param>
+ <key>id</key>
+ <value>blocks_multiply_xx_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>num_inputs</key>
+ <value>2</value>
+ </param>
+ <param>
+ <key>vlen</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(464, 148)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>blocks_multiply_const_vxx</key>
+ <param>
+ <key>id</key>
+ <value>blocks_multiply_const_vxx_0_1</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>const</key>
+ <value>0.9</value>
+ </param>
+ <param>
+ <key>vlen</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(681, 160)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>blocks_multiply_const_vxx</key>
+ <param>
+ <key>id</key>
+ <value>blocks_multiply_const_vxx_0_1_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>const</key>
+ <value>0.9</value>
+ </param>
+ <param>
+ <key>vlen</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(697, 376)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>blocks_add_xx</key>
+ <param>
+ <key>id</key>
+ <value>blocks_add_xx_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>num_inputs</key>
+ <value>2</value>
+ </param>
+ <param>
+ <key>vlen</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(803, 212)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>blocks_multiply_const_vxx</key>
+ <param>
+ <key>id</key>
+ <value>blocks_multiply_const_vxx_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>const</key>
+ <value>0.45</value>
+ </param>
+ <param>
+ <key>vlen</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(611, 208)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>blocks_multiply_const_vxx</key>
+ <param>
+ <key>id</key>
+ <value>blocks_multiply_const_vxx_0_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>const</key>
+ <value>0.45</value>
+ </param>
+ <param>
+ <key>vlen</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(605, 301)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>blocks_multiply_xx</key>
+ <param>
+ <key>id</key>
+ <value>blocks_multiply_xx_0_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>num_inputs</key>
+ <value>2</value>
+ </param>
+ <param>
+ <key>vlen</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(458, 364)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>digital_gmsk_mod</key>
+ <param>
+ <key>id</key>
+ <value>digital_gmsk_mod_0_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>samples_per_symbol</key>
+ <value>int(samp_rate/bit_rate)</value>
+ </param>
+ <param>
+ <key>bt</key>
+ <value>0.4</value>
+ </param>
+ <param>
+ <key>verbose</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>log</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(249, 321)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>analog_sig_source_x</key>
+ <param>
+ <key>id</key>
+ <value>analog_sig_source_x_0_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>samp_rate</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>waveform</key>
+ <value>analog.GR_SIN_WAVE</value>
+ </param>
+ <param>
+ <key>freq</key>
+ <value>25000</value>
+ </param>
+ <param>
+ <key>amp</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>offset</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(250, 401)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>variable</key>
+ <param>
+ <key>id</key>
+ <value>channel_select</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>value</key>
+ <value>2</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(368, -1)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>uhd_usrp_sink</key>
+ <param>
+ <key>id</key>
+ <value>uhd_usrp_sink_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>fc32</value>
+ </param>
+ <param>
+ <key>otw</key>
+ <value></value>
+ </param>
+ <param>
+ <key>stream_args</key>
+ <value></value>
+ </param>
+ <param>
+ <key>dev_addr</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sync</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_rate</key>
+ <value>0.0</value>
+ </param>
+ <param>
+ <key>num_mboards</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>clock_source0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source1</key>
+ <value>gpsdo</value>
+ </param>
+ <param>
+ <key>sd_spec1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>nchan</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>samp_rate</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>center_freq0</key>
+ <value>uhd.tune_request_t(162000000, 19000000)</value>
+ </param>
+ <param>
+ <key>gain0</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant0</key>
+ <value>TX/RX</value>
+ </param>
+ <param>
+ <key>bw0</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq1</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain1</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw1</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant8</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant9</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant10</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant11</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant12</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant13</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant14</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant15</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant16</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant17</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant18</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant19</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant20</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant21</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant22</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant23</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant24</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant25</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant26</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant27</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant28</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant29</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant30</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>gain31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant31</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(932, 305)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>AISTX_Build_Frame</key>
+ <param>
+ <key>id</key>
+ <value>AISTX_Build_Frame_0</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>sentence</key>
+ <value>000100000001101100011001110111011011110000000000000000000000011000111100111100100000101100100000101101000110011010001010010100010000000001000000000000000000000000000000</value>
+ </param>
+ <param>
+ <key>repeat</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>enable_NRZI</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(0, 97)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <block>
+ <key>AISTX_Build_Frame</key>
+ <param>
+ <key>id</key>
+ <value>AISTX_Build_Frame_1</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>sentence</key>
+ <value>010100000001101100011001110111011011110000000000000001011110000000000000</value>
+ </param>
+ <param>
+ <key>repeat</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>enable_NRZI</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(1, 317)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ </block>
+ <connection>
+ <source_block_id>digital_gmsk_mod_0</source_block_id>
+ <sink_block_id>blocks_multiply_xx_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>analog_sig_source_x_0</source_block_id>
+ <sink_block_id>blocks_multiply_xx_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>1</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>analog_sig_source_x_0_0</source_block_id>
+ <sink_block_id>blocks_multiply_xx_0_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>1</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>digital_gmsk_mod_0_0</source_block_id>
+ <sink_block_id>blocks_multiply_xx_0_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_multiply_xx_0_0</source_block_id>
+ <sink_block_id>blocks_multiply_const_vxx_0_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_multiply_xx_0</source_block_id>
+ <sink_block_id>blocks_multiply_const_vxx_0_1</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_multiply_xx_0</source_block_id>
+ <sink_block_id>blocks_multiply_const_vxx_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blks2_selector_0</source_block_id>
+ <sink_block_id>uhd_usrp_sink_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_add_xx_0</source_block_id>
+ <sink_block_id>blks2_selector_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>2</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_multiply_const_vxx_0_1</source_block_id>
+ <sink_block_id>blks2_selector_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_multiply_const_vxx_0_1_0</source_block_id>
+ <sink_block_id>blks2_selector_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>1</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_multiply_xx_0_0</source_block_id>
+ <sink_block_id>blocks_multiply_const_vxx_0_1_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_multiply_const_vxx_0</source_block_id>
+ <sink_block_id>blocks_add_xx_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>blocks_multiply_const_vxx_0_0</source_block_id>
+ <sink_block_id>blocks_add_xx_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>1</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>AISTX_Build_Frame_0</source_block_id>
+ <sink_block_id>digital_gmsk_mod_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>AISTX_Build_Frame_1</source_block_id>
+ <sink_block_id>digital_gmsk_mod_0_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+</flow_graph>
diff --git a/AiS_TX.py b/AiS_TX.py
new file mode 100755
index 0000000..5ea5deb
--- /dev/null
+++ b/AiS_TX.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+#
+# This script is part of the AIS BlackToolkit.
+# AiS_TX.py implements a software-based AIS transmitter accordingly to specifications (ITU-R M.1371-4).
+#
+# A fully functional GnuRadio installation is required, including our AIS Frame Builder block, namely gr-aistx.
+#
+# Tested on:
+# GnuRadio 3.6.5.1
+# Debian 7.1.0 wheezy
+# GNU C++ version 4.7.3; Boost_104900
+# UHD_003.005.003-0-unknown
+# Ettus USRP B100 Version 2)
+#
+# Copyright 2013-2014 -- Embyte & Pastus
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# Usage example:
+# $ ./AIVDM_Encoder.py --type=1 --mmsi=970010000 --lat=45.6910 --long=9.7235 | xargs -IX ./AiS_TX.py --payload=X --channel=A
+#
+
+from gnuradio import blocks
+from gnuradio import digital
+from gnuradio import eng_notation
+from gnuradio import gr
+from gnuradio import uhd
+from gnuradio.eng_option import eng_option
+from gnuradio.gr import firdes
+from grc_gnuradio import wxgui as grc_wxgui
+from optparse import OptionParser
+import AISTX
+import time
+import wx
+
+class top_block(grc_wxgui.top_block_gui):
+
+ def __init__(self, p, c, pw, ff, sr, br):
+ grc_wxgui.top_block_gui.__init__(self, title="Top Block")
+ _icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
+ self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
+
+ ##################################################
+ # Variables
+ ##################################################
+ self.samp_rate = samp_rate = sr
+ self.channel_select = channel_select = c
+ self.bit_rate = bit_rate = br
+
+ ##################################################
+ # Blocks
+ ##################################################
+ self.uhd_usrp_sink_0 = uhd.usrp_sink(
+ device_addr="",
+ stream_args=uhd.stream_args(
+ cpu_format="fc32",
+ channels=range(1),
+ ),
+ )
+ self.uhd_usrp_sink_0.set_samp_rate(samp_rate)
+ self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request_t(161975000+50000*c,ff), 0)
+ self.uhd_usrp_sink_0.set_gain(-10, 0)
+ self.uhd_usrp_sink_0.set_antenna("TX/RX", 0)
+ self.digital_gmsk_mod_0 = digital.gmsk_mod(
+ samples_per_symbol=int(samp_rate/bit_rate),
+ bt=0.4,
+ verbose=False,
+ log=False,
+ )
+ self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vcc((0.9, ))
+ self.AISTX_Build_Frame_0 = AISTX.Build_Frame(p, False, True)
+
+ ##################################################
+ # Connections
+ ##################################################
+ self.connect((self.AISTX_Build_Frame_0, 0), (self.digital_gmsk_mod_0, 0))
+ self.connect((self.digital_gmsk_mod_0, 0), (self.blocks_multiply_const_vxx_0, 0))
+ self.connect((self.blocks_multiply_const_vxx_0, 0), (self.uhd_usrp_sink_0, 0))
+
+
+ def get_samp_rate(self):
+ return self.samp_rate
+
+ def set_samp_rate(self, samp_rate):
+ self.samp_rate = samp_rate
+ self.uhd_usrp_sink_0.set_samp_rate(self.samp_rate)
+
+ def get_channel_select(self):
+ return self.channel_select
+
+ def set_channel_select(self, channel_select):
+ self.channel_select = channel_select
+ self.analog_sig_source_x_0.set_frequency(-25000+50000*self.channel_select)
+
+ def get_bit_rate(self):
+ return self.bit_rate
+
+ def set_bit_rate(self, bit_rate):
+ self.bit_rate = bit_rate
+
+if __name__ == '__main__':
+
+ desc="""GnuRadio-Based AIS Transmitter. Copyright Embyte & Pastus 2013-2014."""
+
+ parser = OptionParser(option_class=eng_option, usage="%prog: [options]", description=desc)
+
+ parser.add_option("--payload", help="""Specify the message payload to transmit
+ (e.g., crafted via AIVDM_Encoder)""")
+ parser.add_option("--channel", help="""Specify the AIS channel:
+ - A: 161.975Mhz (87B)
+ - B: 162.025Mhz (88B)""")
+ parser.add_option("--power", help="""Specify the transmisson power, between -12dB and +12dB (default is -10dB)""", type="int", default = -10)
+ parser.add_option("--filter_frequency", help="""Specify the filter frequency (default is 19MHz)""", type="int", default = 19000000)
+ parser.add_option("--sampling_rate", help="""Specify the sampling rate (default is 326.531KHz)""", type="int", default = 326531)
+ parser.add_option("--bit_rate", help="""Specify the bit rate (default is 9600 baud)""", type="int", default = 9600)
+
+ (options, args) = parser.parse_args()
+
+ if not options.payload:
+ parser.error("Payload not specified: -h for help.")
+
+ if not options.channel:
+ parser.error("Channel not specified: -h for help.")
+
+ if options.channel!="A" and options.channel!="B":
+ parser.error("Channel accepts value A or B: -h for help")
+
+ channel_ID = 0 if options.channel=="A" else 1
+
+ tb = top_block(p=options.payload, c=channel_ID, pw=options.power, ff=options.filter_frequency, sr=options.sampling_rate, br=options.bit_rate)
+ tb.Run(True)
+
diff --git a/README b/README
new file mode 100644
index 0000000..37469dd
--- /dev/null
+++ b/README
@@ -0,0 +1,13 @@
+
+Thanks for downloading the AIS BlackToolkit! This directory contains:
+
+The AIS transmitter in form of GRC graph a1e4b54db83fb895a3c94493126bb318 AiS_TX.grc
+The AIS transmitter in form of script kiddie script 52540b46f316ee460f4684a262132d36 AiS_TX.py
+An AIVDM encoder supporting the main message types 056ac09822b342da296617ba7b4055dd AIVDM_Encoder.py
+The AIS Frame Builder block for GnuRadio c8b7018386f64dd725fde119f406f574 gr-aistx.tgz
+A NMEA encoder (binary to ASCII) 2534dc06c21fe53f45bd2abab96bb6a7 unpacker.c
+
+Remember to install GnuRadio and gr-aistx first. A short description is included in each file's header.
+
+Good luck!
+
diff --git a/gr-aistx/CMakeLists.txt b/gr-aistx/CMakeLists.txt
new file mode 100644
index 0000000..2f7abf6
--- /dev/null
+++ b/gr-aistx/CMakeLists.txt
@@ -0,0 +1,153 @@
+# Copyright 2011,2012 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+
+########################################################################
+# Project setup
+########################################################################
+cmake_minimum_required(VERSION 2.6)
+project(gr-AISTX CXX C)
+enable_testing()
+
+#select the release build type by default to get optimization flags
+if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE "Release")
+ message(STATUS "Build type not specified: defaulting to release.")
+endif(NOT CMAKE_BUILD_TYPE)
+set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "")
+
+list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/Modules)
+
+########################################################################
+# Compiler specific setup
+########################################################################
+if(CMAKE_COMPILER_IS_GNUCXX AND NOT WIN32)
+ #http://gcc.gnu.org/wiki/Visibility
+ add_definitions(-fvisibility=hidden)
+endif()
+
+########################################################################
+# Find boost
+########################################################################
+if(UNIX AND EXISTS "/usr/lib64")
+ list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix
+endif(UNIX AND EXISTS "/usr/lib64")
+set(Boost_ADDITIONAL_VERSIONS
+ "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39"
+ "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44"
+ "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49"
+ "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54"
+ "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59"
+ "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64"
+ "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69"
+)
+find_package(Boost "1.35" COMPONENTS filesystem system)
+
+if(NOT Boost_FOUND)
+ message(FATAL_ERROR "Boost required to compile AISTX")
+endif()
+
+########################################################################
+# Install directories
+########################################################################
+include(GrPlatform) #define LIB_SUFFIX
+set(GR_RUNTIME_DIR bin)
+set(GR_LIBRARY_DIR lib${LIB_SUFFIX})
+set(GR_INCLUDE_DIR include/AISTX)
+set(GR_DATA_DIR share)
+set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME})
+set(GR_DOC_DIR ${GR_DATA_DIR}/doc)
+set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME})
+set(GR_CONF_DIR etc)
+set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d)
+set(GR_LIBEXEC_DIR libexec)
+set(GR_PKG_LIBEXEC_DIR ${GR_LIBEXEC_DIR}/${CMAKE_PROJECT_NAME})
+set(GRC_BLOCKS_DIR ${GR_PKG_DATA_DIR}/grc/blocks)
+
+########################################################################
+# Find gnuradio build dependencies
+########################################################################
+find_package(Gruel)
+find_package(GnuradioCore)
+find_package(CppUnit)
+
+# To run a more advanced search for GNU Radio and it's components and
+# versions, use the following. Add any components required to the list
+# of GR_REQUIRED_COMPONENTS (in all caps) and change "version" to the
+# minimum API compatible version required.
+#
+# set(GR_REQUIRED_COMPONENTS CORE BLOCKS FILTER ...)
+# find_package(Gnuradio "version")
+
+if(NOT GRUEL_FOUND)
+ message(FATAL_ERROR "Gruel required to compile AISTX")
+endif()
+
+if(NOT GNURADIO_CORE_FOUND)
+ message(FATAL_ERROR "GnuRadio Core required to compile AISTX")
+endif()
+
+if(NOT CPPUNIT_FOUND)
+ message(FATAL_ERROR "CppUnit required to compile AISTX")
+endif()
+
+########################################################################
+# Setup the include and linker paths
+########################################################################
+include_directories(
+ ${CMAKE_SOURCE_DIR}/include
+ ${Boost_INCLUDE_DIRS}
+ ${GRUEL_INCLUDE_DIRS}
+ ${CPPUNIT_INCLUDE_DIRS}
+ ${GNURADIO_CORE_INCLUDE_DIRS}
+)
+
+link_directories(
+ ${Boost_LIBRARY_DIRS}
+ ${GRUEL_LIBRARY_DIRS}
+ ${CPPUNIT_LIBRARY_DIRS}
+ ${GNURADIO_CORE_LIBRARY_DIRS}
+)
+
+# Set component parameters
+set(GR_AISTX_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include CACHE INTERNAL "" FORCE)
+set(GR_AISTX_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/swig CACHE INTERNAL "" FORCE)
+
+########################################################################
+# Create uninstall target
+########################################################################
+configure_file(
+ ${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake
+@ONLY)
+
+add_custom_target(uninstall
+ ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake
+)
+
+########################################################################
+# Add subdirectories
+########################################################################
+add_subdirectory(include/AISTX)
+add_subdirectory(lib)
+add_subdirectory(swig)
+add_subdirectory(python)
+add_subdirectory(grc)
+add_subdirectory(apps)
+add_subdirectory(docs)
diff --git a/gr-aistx/GPL b/gr-aistx/GPL
new file mode 100644
index 0000000..94a0453
--- /dev/null
+++ b/gr-aistx/GPL
@@ -0,0 +1,621 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
diff --git a/gr-aistx/README b/gr-aistx/README
new file mode 100644
index 0000000..31fb5ec
--- /dev/null
+++ b/gr-aistx/README
@@ -0,0 +1,24 @@
+
+This directory contains a custom block for GnuRadio we called AIS Frame Builder.
+It is part of the AIS BlackToolkit.
+
+This block serves as generator of AIS frames and implements the full AIS stack.
+It is composed of three main components covering respectively the
+application/presentation layers, the link layer and the physical layer,
+as defined in the protocol specification for AIS.
+
+Install as described in the official out-of-tree documentation, i.e.:
+
+$ mkdir build
+$ cd build
+$ cmake ../
+$ make
+$ sudo make install
+
+Copyright 2013-2014 -- Embyte & Pastus
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2
+of the License, or (at your option) any later version.
+
diff --git a/gr-aistx/apps/CMakeLists.txt b/gr-aistx/apps/CMakeLists.txt
new file mode 100644
index 0000000..c837d77
--- /dev/null
+++ b/gr-aistx/apps/CMakeLists.txt
@@ -0,0 +1,25 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+include(GrPython)
+
+GR_PYTHON_INSTALL(
+ PROGRAMS
+ DESTINATION bin
+)
diff --git a/gr-aistx/cmake/Modules/CMakeParseArgumentsCopy.cmake b/gr-aistx/cmake/Modules/CMakeParseArgumentsCopy.cmake
new file mode 100644
index 0000000..7ce4c49
--- /dev/null
+++ b/gr-aistx/cmake/Modules/CMakeParseArgumentsCopy.cmake
@@ -0,0 +1,138 @@
+# CMAKE_PARSE_ARGUMENTS(<prefix> <options> <one_value_keywords> <multi_value_keywords> args...)
+#
+# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions for
+# parsing the arguments given to that macro or function.
+# It processes the arguments and defines a set of variables which hold the
+# values of the respective options.
+#
+# The <options> argument contains all options for the respective macro,
+# i.e. keywords which can be used when calling the macro without any value
+# following, like e.g. the OPTIONAL keyword of the install() command.
+#
+# The <one_value_keywords> argument contains all keywords for this macro
+# which are followed by one value, like e.g. DESTINATION keyword of the
+# install() command.
+#
+# The <multi_value_keywords> argument contains all keywords for this macro
+# which can be followed by more than one value, like e.g. the TARGETS or
+# FILES keywords of the install() command.
+#
+# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the
+# keywords listed in <options>, <one_value_keywords> and
+# <multi_value_keywords> a variable composed of the given <prefix>
+# followed by "_" and the name of the respective keyword.
+# These variables will then hold the respective value from the argument list.
+# For the <options> keywords this will be TRUE or FALSE.
+#
+# All remaining arguments are collected in a variable
+# <prefix>_UNPARSED_ARGUMENTS, this can be checked afterwards to see whether
+# your macro was called with unrecognized parameters.
+#
+# As an example here a my_install() macro, which takes similar arguments as the
+# real install() command:
+#
+# function(MY_INSTALL)
+# set(options OPTIONAL FAST)
+# set(oneValueArgs DESTINATION RENAME)
+# set(multiValueArgs TARGETS CONFIGURATIONS)
+# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+# ...
+#
+# Assume my_install() has been called like this:
+# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub)
+#
+# After the cmake_parse_arguments() call the macro will have set the following
+# variables:
+# MY_INSTALL_OPTIONAL = TRUE
+# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install()
+# MY_INSTALL_DESTINATION = "bin"
+# MY_INSTALL_RENAME = "" (was not used)
+# MY_INSTALL_TARGETS = "foo;bar"
+# MY_INSTALL_CONFIGURATIONS = "" (was not used)
+# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL"
+#
+# You can the continue and process these variables.
+#
+# Keywords terminate lists of values, e.g. if directly after a one_value_keyword
+# another recognized keyword follows, this is interpreted as the beginning of
+# the new option.
+# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in
+# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would
+# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor.
+
+#=============================================================================
+# Copyright 2010 Alexander Neundorf <neundorf@kde.org>
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of CMake, substitute the full
+# License text for the above reference.)
+
+
+if(__CMAKE_PARSE_ARGUMENTS_INCLUDED)
+ return()
+endif()
+set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE)
+
+
+function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames)
+ # first set all result variables to empty/FALSE
+ foreach(arg_name ${_singleArgNames} ${_multiArgNames})
+ set(${prefix}_${arg_name})
+ endforeach(arg_name)
+
+ foreach(option ${_optionNames})
+ set(${prefix}_${option} FALSE)
+ endforeach(option)
+
+ set(${prefix}_UNPARSED_ARGUMENTS)
+
+ set(insideValues FALSE)
+ set(currentArgName)
+
+ # now iterate over all arguments and fill the result variables
+ foreach(currentArg ${ARGN})
+ list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword
+ list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword
+ list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword
+
+ if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1)
+ if(insideValues)
+ if("${insideValues}" STREQUAL "SINGLE")
+ set(${prefix}_${currentArgName} ${currentArg})
+ set(insideValues FALSE)
+ elseif("${insideValues}" STREQUAL "MULTI")
+ list(APPEND ${prefix}_${currentArgName} ${currentArg})
+ endif()
+ else(insideValues)
+ list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg})
+ endif(insideValues)
+ else()
+ if(NOT ${optionIndex} EQUAL -1)
+ set(${prefix}_${currentArg} TRUE)
+ set(insideValues FALSE)
+ elseif(NOT ${singleArgIndex} EQUAL -1)
+ set(currentArgName ${currentArg})
+ set(${prefix}_${currentArgName})
+ set(insideValues "SINGLE")
+ elseif(NOT ${multiArgIndex} EQUAL -1)
+ set(currentArgName ${currentArg})
+ set(${prefix}_${currentArgName})
+ set(insideValues "MULTI")
+ endif()
+ endif()
+
+ endforeach(currentArg)
+
+ # propagate the result variables to the caller:
+ foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames})
+ set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE)
+ endforeach(arg_name)
+ set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE)
+
+endfunction(CMAKE_PARSE_ARGUMENTS _options _singleArgs _multiArgs)
diff --git a/gr-aistx/cmake/Modules/FindCppUnit.cmake b/gr-aistx/cmake/Modules/FindCppUnit.cmake
new file mode 100644
index 0000000..9af308f
--- /dev/null
+++ b/gr-aistx/cmake/Modules/FindCppUnit.cmake
@@ -0,0 +1,36 @@
+# http://www.cmake.org/pipermail/cmake/2006-October/011446.html
+# Modified to use pkg config and use standard var names
+
+#
+# Find the CppUnit includes and library
+#
+# This module defines
+# CPPUNIT_INCLUDE_DIR, where to find tiff.h, etc.
+# CPPUNIT_LIBRARIES, the libraries to link against to use CppUnit.
+# CPPUNIT_FOUND, If false, do not try to use CppUnit.
+
+INCLUDE(FindPkgConfig)
+PKG_CHECK_MODULES(PC_CPPUNIT "cppunit")
+
+FIND_PATH(CPPUNIT_INCLUDE_DIRS
+ NAMES cppunit/TestCase.h
+ HINTS ${PC_CPPUNIT_INCLUDE_DIR}
+ PATHS
+ /usr/local/include
+ /usr/include
+)
+
+FIND_LIBRARY(CPPUNIT_LIBRARIES
+ NAMES cppunit
+ HINTS ${PC_CPPUNIT_LIBDIR}
+ PATHS
+ ${CPPUNIT_INCLUDE_DIRS}/../lib
+ /usr/local/lib
+ /usr/lib
+)
+
+LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS})
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
+MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
diff --git a/gr-aistx/cmake/Modules/FindGnuradioCore.cmake b/gr-aistx/cmake/Modules/FindGnuradioCore.cmake
new file mode 100644
index 0000000..3773588
--- /dev/null
+++ b/gr-aistx/cmake/Modules/FindGnuradioCore.cmake
@@ -0,0 +1,26 @@
+INCLUDE(FindPkgConfig)
+PKG_CHECK_MODULES(PC_GNURADIO_CORE gnuradio-core)
+
+FIND_PATH(
+ GNURADIO_CORE_INCLUDE_DIRS
+ NAMES gr_random.h
+ HINTS $ENV{GNURADIO_CORE_DIR}/include/gnuradio
+ ${PC_GNURADIO_CORE_INCLUDEDIR}
+ PATHS /usr/local/include/gnuradio
+ /usr/include/gnuradio
+)
+
+FIND_LIBRARY(
+ GNURADIO_CORE_LIBRARIES
+ NAMES gnuradio-core
+ HINTS $ENV{GNURADIO_CORE_DIR}/lib
+ ${PC_GNURADIO_CORE_LIBDIR}
+ PATHS /usr/local/lib
+ /usr/local/lib64
+ /usr/lib
+ /usr/lib64
+)
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_CORE DEFAULT_MSG GNURADIO_CORE_LIBRARIES GNURADIO_CORE_INCLUDE_DIRS)
+MARK_AS_ADVANCED(GNURADIO_CORE_LIBRARIES GNURADIO_CORE_INCLUDE_DIRS)
diff --git a/gr-aistx/cmake/Modules/FindGruel.cmake b/gr-aistx/cmake/Modules/FindGruel.cmake
new file mode 100644
index 0000000..58dff70
--- /dev/null
+++ b/gr-aistx/cmake/Modules/FindGruel.cmake
@@ -0,0 +1,26 @@
+INCLUDE(FindPkgConfig)
+PKG_CHECK_MODULES(PC_GRUEL gruel)
+
+FIND_PATH(
+ GRUEL_INCLUDE_DIRS
+ NAMES gruel/attributes.h
+ HINTS $ENV{GRUEL_DIR}/include
+ ${PC_GRUEL_INCLUDEDIR}
+ PATHS /usr/local/include
+ /usr/include
+)
+
+FIND_LIBRARY(
+ GRUEL_LIBRARIES
+ NAMES gruel
+ HINTS $ENV{GRUEL_DIR}/lib
+ ${PC_GRUEL_LIBDIR}
+ PATHS /usr/local/lib
+ /usr/local/lib64
+ /usr/lib
+ /usr/lib64
+)
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(GRUEL DEFAULT_MSG GRUEL_LIBRARIES GRUEL_INCLUDE_DIRS)
+MARK_AS_ADVANCED(GRUEL_LIBRARIES GRUEL_INCLUDE_DIRS)
diff --git a/gr-aistx/cmake/Modules/GrMiscUtils.cmake b/gr-aistx/cmake/Modules/GrMiscUtils.cmake
new file mode 100644
index 0000000..9331d5d
--- /dev/null
+++ b/gr-aistx/cmake/Modules/GrMiscUtils.cmake
@@ -0,0 +1,210 @@
+# Copyright 2010-2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE)
+
+########################################################################
+# Set global variable macro.
+# Used for subdirectories to export settings.
+# Example: include and library paths.
+########################################################################
+function(GR_SET_GLOBAL var)
+ set(${var} ${ARGN} CACHE INTERNAL "" FORCE)
+endfunction(GR_SET_GLOBAL)
+
+########################################################################
+# Set the pre-processor definition if the condition is true.
+# - def the pre-processor definition to set and condition name
+########################################################################
+function(GR_ADD_COND_DEF def)
+ if(${def})
+ add_definitions(-D${def})
+ endif(${def})
+endfunction(GR_ADD_COND_DEF)
+
+########################################################################
+# Check for a header and conditionally set a compile define.
+# - hdr the relative path to the header file
+# - def the pre-processor definition to set
+########################################################################
+function(GR_CHECK_HDR_N_DEF hdr def)
+ include(CheckIncludeFileCXX)
+ CHECK_INCLUDE_FILE_CXX(${hdr} ${def})
+ GR_ADD_COND_DEF(${def})
+endfunction(GR_CHECK_HDR_N_DEF)
+
+########################################################################
+# Include subdirectory macro.
+# Sets the CMake directory variables,
+# includes the subdirectory CMakeLists.txt,
+# resets the CMake directory variables.
+#
+# This macro includes subdirectories rather than adding them
+# so that the subdirectory can affect variables in the level above.
+# This provides a work-around for the lack of convenience libraries.
+# This way a subdirectory can append to the list of library sources.
+########################################################################
+macro(GR_INCLUDE_SUBDIRECTORY subdir)
+ #insert the current directories on the front of the list
+ list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR})
+ list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR})
+
+ #set the current directories to the names of the subdirs
+ set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir})
+ set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir})
+
+ #include the subdirectory CMakeLists to run it
+ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+ include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt)
+
+ #reset the value of the current directories
+ list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR)
+ list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR)
+
+ #pop the subdir names of the front of the list
+ list(REMOVE_AT _cmake_source_dirs 0)
+ list(REMOVE_AT _cmake_binary_dirs 0)
+endmacro(GR_INCLUDE_SUBDIRECTORY)
+
+########################################################################
+# Check if a compiler flag works and conditionally set a compile define.
+# - flag the compiler flag to check for
+# - have the variable to set with result
+########################################################################
+macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have)
+ include(CheckCXXCompilerFlag)
+ CHECK_CXX_COMPILER_FLAG(${flag} ${have})
+ if(${have})
+ add_definitions(${flag})
+ endif(${have})
+endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE)
+
+########################################################################
+# Generates the .la libtool file
+# This appears to generate libtool files that cannot be used by auto*.
+# Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest])
+# Notice: there is not COMPONENT option, these will not get distributed.
+########################################################################
+function(GR_LIBTOOL)
+ if(NOT DEFINED GENERATE_LIBTOOL)
+ set(GENERATE_LIBTOOL OFF) #disabled by default
+ endif()
+
+ if(GENERATE_LIBTOOL)
+ include(CMakeParseArgumentsCopy)
+ CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN})
+
+ find_program(LIBTOOL libtool)
+ if(LIBTOOL)
+ include(CMakeMacroLibtoolFile)
+ CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION})
+ endif(LIBTOOL)
+ endif(GENERATE_LIBTOOL)
+
+endfunction(GR_LIBTOOL)
+
+########################################################################
+# Do standard things to the library target
+# - set target properties
+# - make install rules
+# Also handle gnuradio custom naming conventions w/ extras mode.
+########################################################################
+function(GR_LIBRARY_FOO target)
+ #parse the arguments for component names
+ include(CMakeParseArgumentsCopy)
+ CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN})
+
+ #set additional target properties
+ set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER})
+
+ #install the generated files like so...
+ install(TARGETS ${target}
+ LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file
+ ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file
+ RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file
+ )
+
+ #extras mode enabled automatically on linux
+ if(NOT DEFINED LIBRARY_EXTRAS)
+ set(LIBRARY_EXTRAS ${LINUX})
+ endif()
+
+ #special extras mode to enable alternative naming conventions
+ if(LIBRARY_EXTRAS)
+
+ #create .la file before changing props
+ GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR})
+
+ #give the library a special name with ultra-zero soversion
+ set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0")
+ set(target_name lib${target}-${LIBVER}.so.0.0.0)
+
+ #custom command to generate symlinks
+ add_custom_command(
+ TARGET ${target}
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so
+ COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0
+ COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install
+ )
+
+ #and install the extra symlinks
+ install(
+ FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so
+ ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0
+ DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT}
+ )
+
+ endif(LIBRARY_EXTRAS)
+endfunction(GR_LIBRARY_FOO)
+
+########################################################################
+# Create a dummy custom command that depends on other targets.
+# Usage:
+# GR_GEN_TARGET_DEPS(unique_name target_deps <target1> <target2> ...)
+# ADD_CUSTOM_COMMAND(<the usual args> ${target_deps})
+#
+# Custom command cant depend on targets, but can depend on executables,
+# and executables can depend on targets. So this is the process:
+########################################################################
+function(GR_GEN_TARGET_DEPS name var)
+ file(
+ WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in
+ "int main(void){return 0;}\n"
+ )
+ execute_process(
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in
+ ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp
+ )
+ add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp)
+ if(ARGN)
+ add_dependencies(${name} ${ARGN})
+ endif(ARGN)
+
+ if(CMAKE_CROSSCOMPILING)
+ set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross
+ else()
+ set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE)
+ endif()
+endfunction(GR_GEN_TARGET_DEPS)
diff --git a/gr-aistx/cmake/Modules/GrPlatform.cmake b/gr-aistx/cmake/Modules/GrPlatform.cmake
new file mode 100644
index 0000000..a2e4f3b
--- /dev/null
+++ b/gr-aistx/cmake/Modules/GrPlatform.cmake
@@ -0,0 +1,46 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_PLATFORM_CMAKE TRUE)
+
+########################################################################
+# Setup additional defines for OS types
+########################################################################
+if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ set(LINUX TRUE)
+endif()
+
+if(LINUX AND EXISTS "/etc/debian_version")
+ set(DEBIAN TRUE)
+endif()
+
+if(LINUX AND EXISTS "/etc/redhat-release")
+ set(REDHAT TRUE)
+endif()
+
+########################################################################
+# when the library suffix should be 64 (applies to redhat linux family)
+########################################################################
+if(NOT DEFINED LIB_SUFFIX AND REDHAT AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$")
+ set(LIB_SUFFIX 64)
+endif()
+set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix")
diff --git a/gr-aistx/cmake/Modules/GrPython.cmake b/gr-aistx/cmake/Modules/GrPython.cmake
new file mode 100644
index 0000000..efdddf3
--- /dev/null
+++ b/gr-aistx/cmake/Modules/GrPython.cmake
@@ -0,0 +1,227 @@
+# Copyright 2010-2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_PYTHON_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_PYTHON_CMAKE TRUE)
+
+########################################################################
+# Setup the python interpreter:
+# This allows the user to specify a specific interpreter,
+# or finds the interpreter via the built-in cmake module.
+########################################################################
+#this allows the user to override PYTHON_EXECUTABLE
+if(PYTHON_EXECUTABLE)
+
+ set(PYTHONINTERP_FOUND TRUE)
+
+#otherwise if not set, try to automatically find it
+else(PYTHON_EXECUTABLE)
+
+ #use the built-in find script
+ find_package(PythonInterp)
+
+ #and if that fails use the find program routine
+ if(NOT PYTHONINTERP_FOUND)
+ find_program(PYTHON_EXECUTABLE NAMES python python2.7 python2.6 python2.5)
+ if(PYTHON_EXECUTABLE)
+ set(PYTHONINTERP_FOUND TRUE)
+ endif(PYTHON_EXECUTABLE)
+ endif(NOT PYTHONINTERP_FOUND)
+
+endif(PYTHON_EXECUTABLE)
+
+#make the path to the executable appear in the cmake gui
+set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter")
+
+#make sure we can use -B with python (introduced in 2.6)
+if(PYTHON_EXECUTABLE)
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -B -c ""
+ OUTPUT_QUIET ERROR_QUIET
+ RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT
+ )
+ if(PYTHON_HAS_DASH_B_RESULT EQUAL 0)
+ set(PYTHON_DASH_B "-B")
+ endif()
+endif(PYTHON_EXECUTABLE)
+
+########################################################################
+# Check for the existence of a python module:
+# - desc a string description of the check
+# - mod the name of the module to import
+# - cmd an additional command to run
+# - have the result variable to set
+########################################################################
+macro(GR_PYTHON_CHECK_MODULE desc mod cmd have)
+ message(STATUS "")
+ message(STATUS "Python checking for ${desc}")
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "
+#########################################
+try: import ${mod}
+except: exit(-1)
+try: assert ${cmd}
+except: exit(-1)
+#########################################"
+ RESULT_VARIABLE ${have}
+ )
+ if(${have} EQUAL 0)
+ message(STATUS "Python checking for ${desc} - found")
+ set(${have} TRUE)
+ else(${have} EQUAL 0)
+ message(STATUS "Python checking for ${desc} - not found")
+ set(${have} FALSE)
+ endif(${have} EQUAL 0)
+endmacro(GR_PYTHON_CHECK_MODULE)
+
+########################################################################
+# Sets the python installation directory GR_PYTHON_DIR
+########################################################################
+execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "
+from distutils import sysconfig
+print sysconfig.get_python_lib(plat_specific=True, prefix='')
+" OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE
+)
+file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR)
+
+########################################################################
+# Create an always-built target with a unique name
+# Usage: GR_UNIQUE_TARGET(<description> <dependencies list>)
+########################################################################
+function(GR_UNIQUE_TARGET desc)
+ file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+ execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib
+unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5]
+print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))"
+ OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE)
+ add_custom_target(${_target} ALL DEPENDS ${ARGN})
+endfunction(GR_UNIQUE_TARGET)
+
+########################################################################
+# Install python sources (also builds and installs byte-compiled python)
+########################################################################
+function(GR_PYTHON_INSTALL)
+ include(CMakeParseArgumentsCopy)
+ CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN})
+
+ ####################################################################
+ if(GR_PYTHON_INSTALL_FILES)
+ ####################################################################
+ install(${ARGN}) #installs regular python files
+
+ #create a list of all generated files
+ unset(pysrcfiles)
+ unset(pycfiles)
+ unset(pyofiles)
+ foreach(pyfile ${GR_PYTHON_INSTALL_FILES})
+ get_filename_component(pyfile ${pyfile} ABSOLUTE)
+ list(APPEND pysrcfiles ${pyfile})
+
+ #determine if this file is in the source or binary directory
+ file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile})
+ string(LENGTH "${source_rel_path}" source_rel_path_len)
+ file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile})
+ string(LENGTH "${binary_rel_path}" binary_rel_path_len)
+
+ #and set the generated path appropriately
+ if(${source_rel_path_len} GREATER ${binary_rel_path_len})
+ set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path})
+ else()
+ set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path})
+ endif()
+ list(APPEND pycfiles ${pygenfile}c)
+ list(APPEND pyofiles ${pygenfile}o)
+
+ #ensure generation path exists
+ get_filename_component(pygen_path ${pygenfile} PATH)
+ file(MAKE_DIRECTORY ${pygen_path})
+
+ endforeach(pyfile)
+
+ #the command to generate the pyc files
+ add_custom_command(
+ DEPENDS ${pysrcfiles} OUTPUT ${pycfiles}
+ COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles}
+ )
+
+ #the command to generate the pyo files
+ add_custom_command(
+ DEPENDS ${pysrcfiles} OUTPUT ${pyofiles}
+ COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles}
+ )
+
+ #create install rule and add generated files to target list
+ set(python_install_gen_targets ${pycfiles} ${pyofiles})
+ install(FILES ${python_install_gen_targets}
+ DESTINATION ${GR_PYTHON_INSTALL_DESTINATION}
+ COMPONENT ${GR_PYTHON_INSTALL_COMPONENT}
+ )
+
+
+ ####################################################################
+ elseif(GR_PYTHON_INSTALL_PROGRAMS)
+ ####################################################################
+ file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native)
+
+ foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS})
+ get_filename_component(pyfile_name ${pyfile} NAME)
+ get_filename_component(pyfile ${pyfile} ABSOLUTE)
+ string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe")
+ list(APPEND python_install_gen_targets ${pyexefile})
+
+ get_filename_component(pyexefile_path ${pyexefile} PATH)
+ file(MAKE_DIRECTORY ${pyexefile_path})
+
+ add_custom_command(
+ OUTPUT ${pyexefile} DEPENDS ${pyfile}
+ COMMAND ${PYTHON_EXECUTABLE} -c
+ \"open('${pyexefile}', 'w').write('\#!${pyexe_native}\\n'+open('${pyfile}').read())\"
+ COMMENT "Shebangin ${pyfile_name}"
+ )
+
+ #on windows, python files need an extension to execute
+ get_filename_component(pyfile_ext ${pyfile} EXT)
+ if(WIN32 AND NOT pyfile_ext)
+ set(pyfile_name "${pyfile_name}.py")
+ endif()
+
+ install(PROGRAMS ${pyexefile} RENAME ${pyfile_name}
+ DESTINATION ${GR_PYTHON_INSTALL_DESTINATION}
+ COMPONENT ${GR_PYTHON_INSTALL_COMPONENT}
+ )
+ endforeach(pyfile)
+
+ endif()
+
+ GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets})
+
+endfunction(GR_PYTHON_INSTALL)
+
+########################################################################
+# Write the python helper script that generates byte code files
+########################################################################
+file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py "
+import sys, py_compile
+files = sys.argv[1:]
+srcs, gens = files[:len(files)/2], files[len(files)/2:]
+for src, gen in zip(srcs, gens):
+ py_compile.compile(file=src, cfile=gen, doraise=True)
+")
diff --git a/gr-aistx/cmake/Modules/GrSwig.cmake b/gr-aistx/cmake/Modules/GrSwig.cmake
new file mode 100644
index 0000000..6ba5ee3
--- /dev/null
+++ b/gr-aistx/cmake/Modules/GrSwig.cmake
@@ -0,0 +1,229 @@
+# Copyright 2010-2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_SWIG_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_SWIG_CMAKE TRUE)
+
+include(GrPython)
+
+########################################################################
+# Builds a swig documentation file to be generated into python docstrings
+# Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....)
+#
+# Set the following variable to specify extra dependent targets:
+# - GR_SWIG_DOCS_SOURCE_DEPS
+# - GR_SWIG_DOCS_TARGET_DEPS
+########################################################################
+function(GR_SWIG_MAKE_DOCS output_file)
+ find_package(Doxygen)
+ if(DOXYGEN_FOUND)
+
+ #setup the input files variable list, quote formated
+ set(input_files)
+ unset(INPUT_PATHS)
+ foreach(input_path ${ARGN})
+ if (IS_DIRECTORY ${input_path}) #when input path is a directory
+ file(GLOB input_path_h_files ${input_path}/*.h)
+ else() #otherwise its just a file, no glob
+ set(input_path_h_files ${input_path})
+ endif()
+ list(APPEND input_files ${input_path_h_files})
+ set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"")
+ endforeach(input_path)
+
+ #determine the output directory
+ get_filename_component(name ${output_file} NAME_WE)
+ get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH)
+ set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs)
+ make_directory(${OUTPUT_DIRECTORY})
+
+ #generate the Doxyfile used by doxygen
+ configure_file(
+ ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in
+ ${OUTPUT_DIRECTORY}/Doxyfile
+ @ONLY)
+
+ #Create a dummy custom command that depends on other targets
+ include(GrMiscUtils)
+ GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS})
+
+ #call doxygen on the Doxyfile + input headers
+ add_custom_command(
+ OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml
+ DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps}
+ COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile
+ COMMENT "Generating doxygen xml for ${name} docs"
+ )
+
+ #call the swig_doc script on the xml files
+ add_custom_command(
+ OUTPUT ${output_file}
+ DEPENDS ${input_files} ${OUTPUT_DIRECTORY}/xml/index.xml
+ COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
+ ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py
+ ${OUTPUT_DIRECTORY}/xml
+ ${output_file}
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen
+ )
+
+ else(DOXYGEN_FOUND)
+ file(WRITE ${output_file} "\n") #no doxygen -> empty file
+ endif(DOXYGEN_FOUND)
+endfunction(GR_SWIG_MAKE_DOCS)
+
+########################################################################
+# Build a swig target for the common gnuradio use case. Usage:
+# GR_SWIG_MAKE(target ifile ifile ifile...)
+#
+# Set the following variables before calling:
+# - GR_SWIG_FLAGS
+# - GR_SWIG_INCLUDE_DIRS
+# - GR_SWIG_LIBRARIES
+# - GR_SWIG_SOURCE_DEPS
+# - GR_SWIG_TARGET_DEPS
+# - GR_SWIG_DOC_FILE
+# - GR_SWIG_DOC_DIRS
+########################################################################
+macro(GR_SWIG_MAKE name)
+ set(ifiles ${ARGN})
+
+ #do swig doc generation if specified
+ if (GR_SWIG_DOC_FILE)
+ set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS})
+ set(GR_SWIG_DOCS_TAREGT_DEPS ${GR_SWIG_TARGET_DEPS})
+ GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS})
+ list(APPEND GR_SWIG_SOURCE_DEPS ${GR_SWIG_DOC_FILE})
+ endif()
+
+ #append additional include directories
+ find_package(PythonLibs)
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs)
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS})
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR})
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR})
+
+ #determine include dependencies for swig file
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE}
+ ${CMAKE_BINARY_DIR}/get_swig_deps.py
+ "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}"
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ )
+
+ #Create a dummy custom command that depends on other targets
+ include(GrMiscUtils)
+ GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS})
+ set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag)
+ add_custom_command(
+ OUTPUT ${tag_file}
+ DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps}
+ COMMAND ${CMAKE_COMMAND} -E touch ${tag_file}
+ )
+
+ #append the specified include directories
+ include_directories(${GR_SWIG_INCLUDE_DIRS})
+ list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file})
+
+ #setup the swig flags with flags and include directories
+ set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS})
+ foreach(dir ${GR_SWIG_INCLUDE_DIRS})
+ list(APPEND CMAKE_SWIG_FLAGS "-I${dir}")
+ endforeach(dir)
+
+ #set the C++ property on the swig .i file so it builds
+ set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON)
+
+ #setup the actual swig library target to be built
+ include(UseSWIG)
+ SWIG_ADD_MODULE(${name} python ${ifiles})
+ SWIG_LINK_LIBRARIES(${name} ${PYTHON_LIBRARIES} ${GR_SWIG_LIBRARIES})
+
+endmacro(GR_SWIG_MAKE)
+
+########################################################################
+# Install swig targets generated by GR_SWIG_MAKE. Usage:
+# GR_SWIG_INSTALL(
+# TARGETS target target target...
+# [DESTINATION destination]
+# [COMPONENT component]
+# )
+########################################################################
+macro(GR_SWIG_INSTALL)
+
+ include(CMakeParseArgumentsCopy)
+ CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN})
+
+ foreach(name ${GR_SWIG_INSTALL_TARGETS})
+ install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME}
+ DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
+ COMPONENT ${GR_SWIG_INSTALL_COMPONENT}
+ )
+
+ include(GrPython)
+ GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py
+ DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
+ COMPONENT ${GR_SWIG_INSTALL_COMPONENT}
+ )
+
+ GR_LIBTOOL(
+ TARGET ${SWIG_MODULE_${name}_REAL_NAME}
+ DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
+ )
+
+ endforeach(name)
+
+endmacro(GR_SWIG_INSTALL)
+
+########################################################################
+# Generate a python file that can determine swig dependencies.
+# Used by the make macro above to determine extra dependencies.
+# When you build C++, CMake figures out the header dependencies.
+# This code essentially performs that logic for swig includes.
+########################################################################
+file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py "
+
+import os, sys, re
+
+include_matcher = re.compile('[#|%]include\\s*[<|\"](.*)[>|\"]')
+include_dirs = sys.argv[2].split(';')
+
+def get_swig_incs(file_path):
+ file_contents = open(file_path, 'r').read()
+ return include_matcher.findall(file_contents, re.MULTILINE)
+
+def get_swig_deps(file_path, level):
+ deps = [file_path]
+ if level == 0: return deps
+ for inc_file in get_swig_incs(file_path):
+ for inc_dir in include_dirs:
+ inc_path = os.path.join(inc_dir, inc_file)
+ if not os.path.exists(inc_path): continue
+ deps.extend(get_swig_deps(inc_path, level-1))
+ return deps
+
+if __name__ == '__main__':
+ ifiles = sys.argv[1].split(';')
+ deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], [])
+ #sys.stderr.write(';'.join(set(deps)) + '\\n\\n')
+ print(';'.join(set(deps)))
+")
diff --git a/gr-aistx/cmake/Modules/GrTest.cmake b/gr-aistx/cmake/Modules/GrTest.cmake
new file mode 100644
index 0000000..6174c03
--- /dev/null
+++ b/gr-aistx/cmake/Modules/GrTest.cmake
@@ -0,0 +1,133 @@
+# Copyright 2010-2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_TEST_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_TEST_CMAKE TRUE)
+
+########################################################################
+# Add a unit test and setup the environment for a unit test.
+# Takes the same arguments as the ADD_TEST function.
+#
+# Before calling set the following variables:
+# GR_TEST_TARGET_DEPS - built targets for the library path
+# GR_TEST_LIBRARY_DIRS - directories for the library path
+# GR_TEST_PYTHON_DIRS - directories for the python path
+########################################################################
+function(GR_ADD_TEST test_name)
+
+ if(WIN32)
+ #Ensure that the build exe also appears in the PATH.
+ list(APPEND GR_TEST_TARGET_DEPS ${ARGN})
+
+ #In the land of windows, all libraries must be in the PATH.
+ #Since the dependent libraries are not yet installed,
+ #we must manually set them in the PATH to run tests.
+ #The following appends the path of a target dependency.
+ foreach(target ${GR_TEST_TARGET_DEPS})
+ get_target_property(location ${target} LOCATION)
+ if(location)
+ get_filename_component(path ${location} PATH)
+ string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path})
+ list(APPEND GR_TEST_LIBRARY_DIRS ${path})
+ endif(location)
+ endforeach(target)
+
+ #SWIG generates the python library files into a subdirectory.
+ #Therefore, we must append this subdirectory into PYTHONPATH.
+ #Only do this for the python directories matching the following:
+ foreach(pydir ${GR_TEST_PYTHON_DIRS})
+ get_filename_component(name ${pydir} NAME)
+ if(name MATCHES "^(swig|lib|src)$")
+ list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE})
+ endif()
+ endforeach(pydir)
+ endif(WIN32)
+
+ file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir)
+ file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list?
+ file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list?
+
+ set(environs "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}")
+
+ #http://www.cmake.org/pipermail/cmake/2009-May/029464.html
+ #Replaced this add test + set environs code with the shell script generation.
+ #Its nicer to be able to manually run the shell script to diagnose problems.
+ #ADD_TEST(${ARGV})
+ #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}")
+
+ if(UNIX)
+ set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH")
+ #set both LD and DYLD paths to cover multiple UNIX OS library paths
+ list(APPEND libpath "$LD_LIBRARY_PATH" "$DYLD_LIBRARY_PATH")
+ list(APPEND pypath "$PYTHONPATH")
+
+ #replace list separator with the path separator
+ string(REPLACE ";" ":" libpath "${libpath}")
+ string(REPLACE ";" ":" pypath "${pypath}")
+ list(APPEND environs "PATH=${binpath}" "LD_LIBRARY_PATH=${libpath}" "DYLD_LIBRARY_PATH=${libpath}" "PYTHONPATH=${pypath}")
+
+ #generate a bat file that sets the environment and runs the test
+ find_program(SHELL sh)
+ set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh)
+ file(WRITE ${sh_file} "#!${SHELL}\n")
+ #each line sets an environment variable
+ foreach(environ ${environs})
+ file(APPEND ${sh_file} "export ${environ}\n")
+ endforeach(environ)
+ #load the command to run with its arguments
+ foreach(arg ${ARGN})
+ file(APPEND ${sh_file} "${arg} ")
+ endforeach(arg)
+ file(APPEND ${sh_file} "\n")
+
+ #make the shell file executable
+ execute_process(COMMAND chmod +x ${sh_file})
+
+ add_test(${test_name} ${SHELL} ${sh_file})
+
+ endif(UNIX)
+
+ if(WIN32)
+ list(APPEND libpath ${DLL_PATHS} "%PATH%")
+ list(APPEND pypath "%PYTHONPATH%")
+
+ #replace list separator with the path separator (escaped)
+ string(REPLACE ";" "\\;" libpath "${libpath}")
+ string(REPLACE ";" "\\;" pypath "${pypath}")
+ list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}")
+
+ #generate a bat file that sets the environment and runs the test
+ set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat)
+ file(WRITE ${bat_file} "@echo off\n")
+ #each line sets an environment variable
+ foreach(environ ${environs})
+ file(APPEND ${bat_file} "SET ${environ}\n")
+ endforeach(environ)
+ #load the command to run with its arguments
+ foreach(arg ${ARGN})
+ file(APPEND ${bat_file} "${arg} ")
+ endforeach(arg)
+ file(APPEND ${bat_file} "\n")
+
+ add_test(${test_name} ${bat_file})
+ endif(WIN32)
+
+endfunction(GR_ADD_TEST)
diff --git a/gr-aistx/cmake/cmake_uninstall.cmake.in b/gr-aistx/cmake/cmake_uninstall.cmake.in
new file mode 100644
index 0000000..9ae1ae4
--- /dev/null
+++ b/gr-aistx/cmake/cmake_uninstall.cmake.in
@@ -0,0 +1,32 @@
+# http://www.vtk.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F
+
+IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
+ MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"")
+ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
+
+FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files)
+STRING(REGEX REPLACE "\n" ";" files "${files}")
+FOREACH(file ${files})
+ MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"")
+ IF(EXISTS "$ENV{DESTDIR}${file}")
+ EXEC_PROGRAM(
+ "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval
+ )
+ IF(NOT "${rm_retval}" STREQUAL 0)
+ MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"")
+ ENDIF(NOT "${rm_retval}" STREQUAL 0)
+ ELSEIF(IS_SYMLINK "$ENV{DESTDIR}${file}")
+ EXEC_PROGRAM(
+ "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval
+ )
+ IF(NOT "${rm_retval}" STREQUAL 0)
+ MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"")
+ ENDIF(NOT "${rm_retval}" STREQUAL 0)
+ ELSE(EXISTS "$ENV{DESTDIR}${file}")
+ MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.")
+ ENDIF(EXISTS "$ENV{DESTDIR}${file}")
+ENDFOREACH(file)
diff --git a/gr-aistx/docs/CMakeLists.txt b/gr-aistx/docs/CMakeLists.txt
new file mode 100644
index 0000000..f16fbf6
--- /dev/null
+++ b/gr-aistx/docs/CMakeLists.txt
@@ -0,0 +1,35 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Setup dependencies
+########################################################################
+find_package(Doxygen)
+
+########################################################################
+# Begin conditional configuration
+########################################################################
+if(ENABLE_DOXYGEN)
+
+########################################################################
+# Add subdirectories
+########################################################################
+add_subdirectory(doxygen)
+
+endif(ENABLE_DOXYGEN)
diff --git a/gr-aistx/docs/README.AISTX b/gr-aistx/docs/README.AISTX
new file mode 100644
index 0000000..90c9c74
--- /dev/null
+++ b/gr-aistx/docs/README.AISTX
@@ -0,0 +1,11 @@
+This is the AISTX-write-a-block package meant as a guide to building
+out-of-tree packages. To use the AISTX blocks, the Python namespaces
+is in 'AISTX', which is imported as:
+
+ import AISTX
+
+See the Doxygen documentation for details about the blocks available
+in this package. A quick listing of the details can be found in Python
+after importing by using:
+
+ help(AISTX)
diff --git a/gr-aistx/docs/doxygen/CMakeLists.txt b/gr-aistx/docs/doxygen/CMakeLists.txt
new file mode 100644
index 0000000..1b44799
--- /dev/null
+++ b/gr-aistx/docs/doxygen/CMakeLists.txt
@@ -0,0 +1,52 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Create the doxygen configuration file
+########################################################################
+file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} top_srcdir)
+file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} top_builddir)
+file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} abs_top_srcdir)
+file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir)
+
+set(HAVE_DOT ${DOXYGEN_DOT_FOUND})
+set(enable_html_docs YES)
+set(enable_latex_docs NO)
+set(enable_xml_docs YES)
+
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
+ ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+@ONLY)
+
+set(BUILT_DIRS ${CMAKE_CURRENT_BINARY_DIR}/xml ${CMAKE_CURRENT_BINARY_DIR}/html)
+
+########################################################################
+# Make and install doxygen docs
+########################################################################
+add_custom_command(
+ OUTPUT ${BUILT_DIRS}
+ COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMENT "Generating documentation with doxygen"
+)
+
+add_custom_target(doxygen_target ALL DEPENDS ${BUILT_DIRS})
+
+install(DIRECTORY ${BUILT_DIRS} DESTINATION ${GR_PKG_DOC_DIR})
diff --git a/gr-aistx/docs/doxygen/Doxyfile.in b/gr-aistx/docs/doxygen/Doxyfile.in
new file mode 100644
index 0000000..0820d91
--- /dev/null
+++ b/gr-aistx/docs/doxygen/Doxyfile.in
@@ -0,0 +1,1504 @@
+# Doxyfile 1.5.7.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = "GNU Radio's AISTX Package"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek,
+# Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish,
+# Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, Slovene,
+# Spanish, Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = YES
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = YES
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = YES
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen to replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penality.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will rougly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 4
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = NO
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = NO
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = NO
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= NO
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page. This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by
+# doxygen. The layout file controls the global structure of the generated output files
+# in an output format independent way. The create the layout file that represents
+# doxygen's defaults, run doxygen with the -l option. You can optionally specify a
+# file name after the option, if omitted DoxygenLayout.xml will be used as the name
+# of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text "
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = @top_srcdir@ @top_builddir@
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS = *.h \
+ *.dox
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE = @abs_top_builddir@/docs/doxygen/html \
+ @abs_top_builddir@/docs/doxygen/xml \
+ @abs_top_builddir@/docs/doxygen/other/doxypy.py \
+ @abs_top_builddir@/_CPack_Packages \
+ @abs_top_srcdir@/cmake
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = */.deps/* \
+ */.libs/* \
+ */.svn/* \
+ */CVS/* \
+ */__init__.py \
+ */qa_*.cc \
+ */qa_*.h \
+ */qa_*.py
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS = ad9862 \
+ numpy \
+ *swig* \
+ *Swig* \
+ *my_top_block* \
+ *my_graph* \
+ *app_top_block* \
+ *am_rx_graph* \
+ *_queue_watcher_thread* \
+ *parse* \
+ *MyFrame* \
+ *MyApp* \
+ *PyObject* \
+ *wfm_rx_block* \
+ *_sptr* \
+ *debug* \
+ *wfm_rx_sca_block* \
+ *tv_rx_block* \
+ *wxapt_rx_block* \
+ *example_signal*
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS = *.py=@top_srcdir@/gnuradio-core/doc/other/doxypy.py
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code. Otherwise they will link to the documentstion.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = @enable_html_docs@
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = YES
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER
+# are set, an additional index file will be generated that can be used as input for
+# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated
+# HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#namespace">Qt Help Project / Namespace</a>.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#virtual-folders">Qt Help Project / Virtual Folders</a>.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file .
+
+QHG_LOCATION =
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = YES
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to FRAME, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature. Other possible values
+# for this tag are: HIERARCHIES, which will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list;
+# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which
+# disables this behavior completely. For backwards compatibility with previous
+# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE
+# respectively.
+
+GENERATE_TREEVIEW = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 180
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = @enable_latex_docs@
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = letter
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = @enable_xml_docs@
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = @HAVE_DOT@
+
+# By default doxygen will write a font called FreeSans.ttf to the output
+# directory and reference it in all dot files that doxygen generates. This
+# font does not include all possible unicode characters however, so when you need
+# these (or just want a differently looking font) you can specify the font name
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = NO
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
diff --git a/gr-aistx/docs/doxygen/Doxyfile.swig_doc.in b/gr-aistx/docs/doxygen/Doxyfile.swig_doc.in
new file mode 100644
index 0000000..50b8aa8
--- /dev/null
+++ b/gr-aistx/docs/doxygen/Doxyfile.swig_doc.in
@@ -0,0 +1,1514 @@
+# Doxyfile 1.6.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = @CPACK_PACKAGE_NAME@
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = @OUTPUT_DIRECTORY@
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it parses.
+# With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this tag.
+# The format is ext=language, where ext is a file extension, and language is one of
+# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP,
+# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat
+# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = YES
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen to replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penality.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will rougly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by
+# doxygen. The layout file controls the global structure of the generated output files
+# in an output format independent way. The create the layout file that represents
+# doxygen's defaults, run doxygen with the -l option. You can optionally specify a
+# file name after the option, if omitted DoxygenLayout.xml will be used as the name
+# of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = @INPUT_PATHS@
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS = *.h
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = NO
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER
+# are set, an additional index file will be generated that can be used as input for
+# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated
+# HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add.
+# For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP)
+# there is already a search function so this one should typically
+# be disabled.
+
+SEARCHENGINE = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = YES
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# By default doxygen will write a font called FreeSans.ttf to the output
+# directory and reference it in all dot files that doxygen generates. This
+# font does not include all possible unicode characters however, so when you need
+# these (or just want a differently looking font) you can specify the font name
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/gr-aistx/docs/doxygen/doxyxml/__init__.py b/gr-aistx/docs/doxygen/doxyxml/__init__.py
new file mode 100644
index 0000000..5cd0b3c
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/__init__.py
@@ -0,0 +1,82 @@
+#
+# Copyright 2010 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+Python interface to contents of doxygen xml documentation.
+
+Example use:
+See the contents of the example folder for the C++ and
+doxygen-generated xml used in this example.
+
+>>> # Parse the doxygen docs.
+>>> import os
+>>> this_dir = os.path.dirname(globals()['__file__'])
+>>> xml_path = this_dir + "/example/xml/"
+>>> di = DoxyIndex(xml_path)
+
+Get a list of all top-level objects.
+
+>>> print([mem.name() for mem in di.members()])
+[u'Aadvark', u'aadvarky_enough', u'main']
+
+Get all functions.
+
+>>> print([mem.name() for mem in di.in_category(DoxyFunction)])
+[u'aadvarky_enough', u'main']
+
+Check if an object is present.
+
+>>> di.has_member(u'Aadvark')
+True
+>>> di.has_member(u'Fish')
+False
+
+Get an item by name and check its properties.
+
+>>> aad = di.get_member(u'Aadvark')
+>>> print(aad.brief_description)
+Models the mammal Aadvark.
+>>> print(aad.detailed_description)
+Sadly the model is incomplete and cannot capture all aspects of an aadvark yet.
+<BLANKLINE>
+This line is uninformative and is only to test line breaks in the comments.
+>>> [mem.name() for mem in aad.members()]
+[u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness']
+>>> aad.get_member(u'print').brief_description
+u'Outputs the vital aadvark statistics.'
+
+"""
+
+from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther
+
+def _test():
+ import os
+ this_dir = os.path.dirname(globals()['__file__'])
+ xml_path = this_dir + "/example/xml/"
+ di = DoxyIndex(xml_path)
+ # Get the Aadvark class
+ aad = di.get_member('Aadvark')
+ aad.brief_description
+ import doctest
+ return doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
diff --git a/gr-aistx/docs/doxygen/doxyxml/base.py b/gr-aistx/docs/doxygen/doxyxml/base.py
new file mode 100644
index 0000000..e8f026a
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/base.py
@@ -0,0 +1,219 @@
+#
+# Copyright 2010 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+A base class is created.
+
+Classes based upon this are used to make more user-friendly interfaces
+to the doxygen xml docs than the generated classes provide.
+"""
+
+import os
+import pdb
+
+from xml.parsers.expat import ExpatError
+
+from generated import compound
+
+
+class Base(object):
+
+ class Duplicate(StandardError):
+ pass
+
+ class NoSuchMember(StandardError):
+ pass
+
+ class ParsingError(StandardError):
+ pass
+
+ def __init__(self, parse_data, top=None):
+ self._parsed = False
+ self._error = False
+ self._parse_data = parse_data
+ self._members = []
+ self._dict_members = {}
+ self._in_category = {}
+ self._data = {}
+ if top is not None:
+ self._xml_path = top._xml_path
+ # Set up holder of references
+ else:
+ top = self
+ self._refs = {}
+ self._xml_path = parse_data
+ self.top = top
+
+ @classmethod
+ def from_refid(cls, refid, top=None):
+ """ Instantiate class from a refid rather than parsing object. """
+ # First check to see if its already been instantiated.
+ if top is not None and refid in top._refs:
+ return top._refs[refid]
+ # Otherwise create a new instance and set refid.
+ inst = cls(None, top=top)
+ inst.refid = refid
+ inst.add_ref(inst)
+ return inst
+
+ @classmethod
+ def from_parse_data(cls, parse_data, top=None):
+ refid = getattr(parse_data, 'refid', None)
+ if refid is not None and top is not None and refid in top._refs:
+ return top._refs[refid]
+ inst = cls(parse_data, top=top)
+ if refid is not None:
+ inst.refid = refid
+ inst.add_ref(inst)
+ return inst
+
+ def add_ref(self, obj):
+ if hasattr(obj, 'refid'):
+ self.top._refs[obj.refid] = obj
+
+ mem_classes = []
+
+ def get_cls(self, mem):
+ for cls in self.mem_classes:
+ if cls.can_parse(mem):
+ return cls
+ raise StandardError(("Did not find a class for object '%s'." \
+ % (mem.get_name())))
+
+ def convert_mem(self, mem):
+ try:
+ cls = self.get_cls(mem)
+ converted = cls.from_parse_data(mem, self.top)
+ if converted is None:
+ raise StandardError('No class matched this object.')
+ self.add_ref(converted)
+ return converted
+ except StandardError, e:
+ print e
+
+ @classmethod
+ def includes(cls, inst):
+ return isinstance(inst, cls)
+
+ @classmethod
+ def can_parse(cls, obj):
+ return False
+
+ def _parse(self):
+ self._parsed = True
+
+ def _get_dict_members(self, cat=None):
+ """
+ For given category a dictionary is returned mapping member names to
+ members of that category. For names that are duplicated the name is
+ mapped to None.
+ """
+ self.confirm_no_error()
+ if cat not in self._dict_members:
+ new_dict = {}
+ for mem in self.in_category(cat):
+ if mem.name() not in new_dict:
+ new_dict[mem.name()] = mem
+ else:
+ new_dict[mem.name()] = self.Duplicate
+ self._dict_members[cat] = new_dict
+ return self._dict_members[cat]
+
+ def in_category(self, cat):
+ self.confirm_no_error()
+ if cat is None:
+ return self._members
+ if cat not in self._in_category:
+ self._in_category[cat] = [mem for mem in self._members
+ if cat.includes(mem)]
+ return self._in_category[cat]
+
+ def get_member(self, name, cat=None):
+ self.confirm_no_error()
+ # Check if it's in a namespace or class.
+ bits = name.split('::')
+ first = bits[0]
+ rest = '::'.join(bits[1:])
+ member = self._get_dict_members(cat).get(first, self.NoSuchMember)
+ # Raise any errors that are returned.
+ if member in set([self.NoSuchMember, self.Duplicate]):
+ raise member()
+ if rest:
+ return member.get_member(rest, cat=cat)
+ return member
+
+ def has_member(self, name, cat=None):
+ try:
+ mem = self.get_member(name, cat=cat)
+ return True
+ except self.NoSuchMember:
+ return False
+
+ def data(self):
+ self.confirm_no_error()
+ return self._data
+
+ def members(self):
+ self.confirm_no_error()
+ return self._members
+
+ def process_memberdefs(self):
+ mdtss = []
+ for sec in self._retrieved_data.compounddef.sectiondef:
+ mdtss += sec.memberdef
+ # At the moment we lose all information associated with sections.
+ # Sometimes a memberdef is in several sectiondef.
+ # We make sure we don't get duplicates here.
+ uniques = set([])
+ for mem in mdtss:
+ converted = self.convert_mem(mem)
+ pair = (mem.name, mem.__class__)
+ if pair not in uniques:
+ uniques.add(pair)
+ self._members.append(converted)
+
+ def retrieve_data(self):
+ filename = os.path.join(self._xml_path, self.refid + '.xml')
+ try:
+ self._retrieved_data = compound.parse(filename)
+ except ExpatError:
+ print('Error in xml in file %s' % filename)
+ self._error = True
+ self._retrieved_data = None
+
+ def check_parsed(self):
+ if not self._parsed:
+ self._parse()
+
+ def confirm_no_error(self):
+ self.check_parsed()
+ if self._error:
+ raise self.ParsingError()
+
+ def error(self):
+ self.check_parsed()
+ return self._error
+
+ def name(self):
+ # first see if we can do it without processing.
+ if self._parse_data is not None:
+ return self._parse_data.name
+ self.check_parsed()
+ return self._retrieved_data.compounddef.name
diff --git a/gr-aistx/docs/doxygen/doxyxml/doxyindex.py b/gr-aistx/docs/doxygen/doxyxml/doxyindex.py
new file mode 100644
index 0000000..0132ab8
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/doxyindex.py
@@ -0,0 +1,237 @@
+#
+# Copyright 2010 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+Classes providing more user-friendly interfaces to the doxygen xml
+docs than the generated classes provide.
+"""
+
+import os
+
+from generated import index
+from base import Base
+from text import description
+
+class DoxyIndex(Base):
+ """
+ Parses a doxygen xml directory.
+ """
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyIndex, self)._parse()
+ self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
+ for mem in self._root.compound:
+ converted = self.convert_mem(mem)
+ # For files we want the contents to be accessible directly
+ # from the parent rather than having to go through the file
+ # object.
+ if self.get_cls(mem) == DoxyFile:
+ if mem.name.endswith('.h'):
+ self._members += converted.members()
+ self._members.append(converted)
+ else:
+ self._members.append(converted)
+
+
+def generate_swig_doc_i(self):
+ """
+ %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
+ Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
+ """
+ pass
+
+
+class DoxyCompMem(Base):
+
+
+ kind = None
+
+ def __init__(self, *args, **kwargs):
+ super(DoxyCompMem, self).__init__(*args, **kwargs)
+
+ @classmethod
+ def can_parse(cls, obj):
+ return obj.kind == cls.kind
+
+ def set_descriptions(self, parse_data):
+ bd = description(getattr(parse_data, 'briefdescription', None))
+ dd = description(getattr(parse_data, 'detaileddescription', None))
+ self._data['brief_description'] = bd
+ self._data['detailed_description'] = dd
+
+class DoxyCompound(DoxyCompMem):
+ pass
+
+class DoxyMember(DoxyCompMem):
+ pass
+
+
+class DoxyFunction(DoxyMember):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'function'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyFunction, self)._parse()
+ self.set_descriptions(self._parse_data)
+ self._data['params'] = []
+ prms = self._parse_data.param
+ for prm in prms:
+ self._data['params'].append(DoxyParam(prm))
+
+ brief_description = property(lambda self: self.data()['brief_description'])
+ detailed_description = property(lambda self: self.data()['detailed_description'])
+ params = property(lambda self: self.data()['params'])
+
+Base.mem_classes.append(DoxyFunction)
+
+
+class DoxyParam(DoxyMember):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyParam, self)._parse()
+ self.set_descriptions(self._parse_data)
+ self._data['declname'] = self._parse_data.declname
+
+ brief_description = property(lambda self: self.data()['brief_description'])
+ detailed_description = property(lambda self: self.data()['detailed_description'])
+ declname = property(lambda self: self.data()['declname'])
+
+class DoxyClass(DoxyCompound):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'class'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyClass, self)._parse()
+ self.retrieve_data()
+ if self._error:
+ return
+ self.set_descriptions(self._retrieved_data.compounddef)
+ # Sectiondef.kind tells about whether private or public.
+ # We just ignore this for now.
+ self.process_memberdefs()
+
+ brief_description = property(lambda self: self.data()['brief_description'])
+ detailed_description = property(lambda self: self.data()['detailed_description'])
+
+Base.mem_classes.append(DoxyClass)
+
+
+class DoxyFile(DoxyCompound):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'file'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyFile, self)._parse()
+ self.retrieve_data()
+ self.set_descriptions(self._retrieved_data.compounddef)
+ if self._error:
+ return
+ self.process_memberdefs()
+
+ brief_description = property(lambda self: self.data()['brief_description'])
+ detailed_description = property(lambda self: self.data()['detailed_description'])
+
+Base.mem_classes.append(DoxyFile)
+
+
+class DoxyNamespace(DoxyCompound):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'namespace'
+
+Base.mem_classes.append(DoxyNamespace)
+
+
+class DoxyGroup(DoxyCompound):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'group'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyGroup, self)._parse()
+ self.retrieve_data()
+ if self._error:
+ return
+ cdef = self._retrieved_data.compounddef
+ self._data['title'] = description(cdef.title)
+ # Process inner groups
+ grps = cdef.innergroup
+ for grp in grps:
+ converted = DoxyGroup.from_refid(grp.refid, top=self.top)
+ self._members.append(converted)
+ # Process inner classes
+ klasses = cdef.innerclass
+ for kls in klasses:
+ converted = DoxyClass.from_refid(kls.refid, top=self.top)
+ self._members.append(converted)
+ # Process normal members
+ self.process_memberdefs()
+
+ title = property(lambda self: self.data()['title'])
+
+
+Base.mem_classes.append(DoxyGroup)
+
+
+class DoxyFriend(DoxyMember):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'friend'
+
+Base.mem_classes.append(DoxyFriend)
+
+
+class DoxyOther(Base):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page'])
+
+ @classmethod
+ def can_parse(cls, obj):
+ return obj.kind in cls.kinds
+
+Base.mem_classes.append(DoxyOther)
+
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/__init__.py b/gr-aistx/docs/doxygen/doxyxml/generated/__init__.py
new file mode 100644
index 0000000..3982397
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/__init__.py
@@ -0,0 +1,7 @@
+"""
+Contains generated files produced by generateDS.py.
+
+These do the real work of parsing the doxygen xml files but the
+resultant classes are not very friendly to navigate so the rest of the
+doxyxml module processes them further.
+"""
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/compound.py b/gr-aistx/docs/doxygen/doxyxml/generated/compound.py
new file mode 100644
index 0000000..1522ac2
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/compound.py
@@ -0,0 +1,503 @@
+#!/usr/bin/env python
+
+"""
+Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
+"""
+
+from string import lower as str_lower
+from xml.dom import minidom
+from xml.dom import Node
+
+import sys
+
+import compoundsuper as supermod
+from compoundsuper import MixedContainer
+
+
+class DoxygenTypeSub(supermod.DoxygenType):
+ def __init__(self, version=None, compounddef=None):
+ supermod.DoxygenType.__init__(self, version, compounddef)
+
+ def find(self, details):
+
+ return self.compounddef.find(details)
+
+supermod.DoxygenType.subclass = DoxygenTypeSub
+# end class DoxygenTypeSub
+
+
+class compounddefTypeSub(supermod.compounddefType):
+ def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
+ supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
+
+ def find(self, details):
+
+ if self.id == details.refid:
+ return self
+
+ for sectiondef in self.sectiondef:
+ result = sectiondef.find(details)
+ if result:
+ return result
+
+
+supermod.compounddefType.subclass = compounddefTypeSub
+# end class compounddefTypeSub
+
+
+class listofallmembersTypeSub(supermod.listofallmembersType):
+ def __init__(self, member=None):
+ supermod.listofallmembersType.__init__(self, member)
+supermod.listofallmembersType.subclass = listofallmembersTypeSub
+# end class listofallmembersTypeSub
+
+
+class memberRefTypeSub(supermod.memberRefType):
+ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
+ supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
+supermod.memberRefType.subclass = memberRefTypeSub
+# end class memberRefTypeSub
+
+
+class compoundRefTypeSub(supermod.compoundRefType):
+ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.compoundRefType.__init__(self, mixedclass_, content_)
+supermod.compoundRefType.subclass = compoundRefTypeSub
+# end class compoundRefTypeSub
+
+
+class reimplementTypeSub(supermod.reimplementType):
+ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.reimplementType.__init__(self, mixedclass_, content_)
+supermod.reimplementType.subclass = reimplementTypeSub
+# end class reimplementTypeSub
+
+
+class incTypeSub(supermod.incType):
+ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.incType.__init__(self, mixedclass_, content_)
+supermod.incType.subclass = incTypeSub
+# end class incTypeSub
+
+
+class refTypeSub(supermod.refType):
+ def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.refType.__init__(self, mixedclass_, content_)
+supermod.refType.subclass = refTypeSub
+# end class refTypeSub
+
+
+
+class refTextTypeSub(supermod.refTextType):
+ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.refTextType.__init__(self, mixedclass_, content_)
+
+supermod.refTextType.subclass = refTextTypeSub
+# end class refTextTypeSub
+
+class sectiondefTypeSub(supermod.sectiondefType):
+
+
+ def __init__(self, kind=None, header='', description=None, memberdef=None):
+ supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
+
+ def find(self, details):
+
+ for memberdef in self.memberdef:
+ if memberdef.id == details.refid:
+ return memberdef
+
+ return None
+
+
+supermod.sectiondefType.subclass = sectiondefTypeSub
+# end class sectiondefTypeSub
+
+
+class memberdefTypeSub(supermod.memberdefType):
+ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
+ supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
+supermod.memberdefType.subclass = memberdefTypeSub
+# end class memberdefTypeSub
+
+
+class descriptionTypeSub(supermod.descriptionType):
+ def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
+ supermod.descriptionType.__init__(self, mixedclass_, content_)
+supermod.descriptionType.subclass = descriptionTypeSub
+# end class descriptionTypeSub
+
+
+class enumvalueTypeSub(supermod.enumvalueType):
+ def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
+ supermod.enumvalueType.__init__(self, mixedclass_, content_)
+supermod.enumvalueType.subclass = enumvalueTypeSub
+# end class enumvalueTypeSub
+
+
+class templateparamlistTypeSub(supermod.templateparamlistType):
+ def __init__(self, param=None):
+ supermod.templateparamlistType.__init__(self, param)
+supermod.templateparamlistType.subclass = templateparamlistTypeSub
+# end class templateparamlistTypeSub
+
+
+class paramTypeSub(supermod.paramType):
+ def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
+ supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
+supermod.paramType.subclass = paramTypeSub
+# end class paramTypeSub
+
+
+class linkedTextTypeSub(supermod.linkedTextType):
+ def __init__(self, ref=None, mixedclass_=None, content_=None):
+ supermod.linkedTextType.__init__(self, mixedclass_, content_)
+supermod.linkedTextType.subclass = linkedTextTypeSub
+# end class linkedTextTypeSub
+
+
+class graphTypeSub(supermod.graphType):
+ def __init__(self, node=None):
+ supermod.graphType.__init__(self, node)
+supermod.graphType.subclass = graphTypeSub
+# end class graphTypeSub
+
+
+class nodeTypeSub(supermod.nodeType):
+ def __init__(self, id=None, label='', link=None, childnode=None):
+ supermod.nodeType.__init__(self, id, label, link, childnode)
+supermod.nodeType.subclass = nodeTypeSub
+# end class nodeTypeSub
+
+
+class childnodeTypeSub(supermod.childnodeType):
+ def __init__(self, relation=None, refid=None, edgelabel=None):
+ supermod.childnodeType.__init__(self, relation, refid, edgelabel)
+supermod.childnodeType.subclass = childnodeTypeSub
+# end class childnodeTypeSub
+
+
+class linkTypeSub(supermod.linkType):
+ def __init__(self, refid=None, external=None, valueOf_=''):
+ supermod.linkType.__init__(self, refid, external)
+supermod.linkType.subclass = linkTypeSub
+# end class linkTypeSub
+
+
+class listingTypeSub(supermod.listingType):
+ def __init__(self, codeline=None):
+ supermod.listingType.__init__(self, codeline)
+supermod.listingType.subclass = listingTypeSub
+# end class listingTypeSub
+
+
+class codelineTypeSub(supermod.codelineType):
+ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
+ supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
+supermod.codelineType.subclass = codelineTypeSub
+# end class codelineTypeSub
+
+
+class highlightTypeSub(supermod.highlightType):
+ def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
+ supermod.highlightType.__init__(self, mixedclass_, content_)
+supermod.highlightType.subclass = highlightTypeSub
+# end class highlightTypeSub
+
+
+class referenceTypeSub(supermod.referenceType):
+ def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.referenceType.__init__(self, mixedclass_, content_)
+supermod.referenceType.subclass = referenceTypeSub
+# end class referenceTypeSub
+
+
+class locationTypeSub(supermod.locationType):
+ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
+ supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
+supermod.locationType.subclass = locationTypeSub
+# end class locationTypeSub
+
+
+class docSect1TypeSub(supermod.docSect1Type):
+ def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
+ supermod.docSect1Type.__init__(self, mixedclass_, content_)
+supermod.docSect1Type.subclass = docSect1TypeSub
+# end class docSect1TypeSub
+
+
+class docSect2TypeSub(supermod.docSect2Type):
+ def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
+ supermod.docSect2Type.__init__(self, mixedclass_, content_)
+supermod.docSect2Type.subclass = docSect2TypeSub
+# end class docSect2TypeSub
+
+
+class docSect3TypeSub(supermod.docSect3Type):
+ def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
+ supermod.docSect3Type.__init__(self, mixedclass_, content_)
+supermod.docSect3Type.subclass = docSect3TypeSub
+# end class docSect3TypeSub
+
+
+class docSect4TypeSub(supermod.docSect4Type):
+ def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
+ supermod.docSect4Type.__init__(self, mixedclass_, content_)
+supermod.docSect4Type.subclass = docSect4TypeSub
+# end class docSect4TypeSub
+
+
+class docInternalTypeSub(supermod.docInternalType):
+ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
+ supermod.docInternalType.__init__(self, mixedclass_, content_)
+supermod.docInternalType.subclass = docInternalTypeSub
+# end class docInternalTypeSub
+
+
+class docInternalS1TypeSub(supermod.docInternalS1Type):
+ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
+ supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
+supermod.docInternalS1Type.subclass = docInternalS1TypeSub
+# end class docInternalS1TypeSub
+
+
+class docInternalS2TypeSub(supermod.docInternalS2Type):
+ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
+ supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
+supermod.docInternalS2Type.subclass = docInternalS2TypeSub
+# end class docInternalS2TypeSub
+
+
+class docInternalS3TypeSub(supermod.docInternalS3Type):
+ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
+ supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
+supermod.docInternalS3Type.subclass = docInternalS3TypeSub
+# end class docInternalS3TypeSub
+
+
+class docInternalS4TypeSub(supermod.docInternalS4Type):
+ def __init__(self, para=None, mixedclass_=None, content_=None):
+ supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
+supermod.docInternalS4Type.subclass = docInternalS4TypeSub
+# end class docInternalS4TypeSub
+
+
+class docURLLinkSub(supermod.docURLLink):
+ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docURLLink.__init__(self, mixedclass_, content_)
+supermod.docURLLink.subclass = docURLLinkSub
+# end class docURLLinkSub
+
+
+class docAnchorTypeSub(supermod.docAnchorType):
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docAnchorType.__init__(self, mixedclass_, content_)
+supermod.docAnchorType.subclass = docAnchorTypeSub
+# end class docAnchorTypeSub
+
+
+class docFormulaTypeSub(supermod.docFormulaType):
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docFormulaType.__init__(self, mixedclass_, content_)
+supermod.docFormulaType.subclass = docFormulaTypeSub
+# end class docFormulaTypeSub
+
+
+class docIndexEntryTypeSub(supermod.docIndexEntryType):
+ def __init__(self, primaryie='', secondaryie=''):
+ supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
+supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
+# end class docIndexEntryTypeSub
+
+
+class docListTypeSub(supermod.docListType):
+ def __init__(self, listitem=None):
+ supermod.docListType.__init__(self, listitem)
+supermod.docListType.subclass = docListTypeSub
+# end class docListTypeSub
+
+
+class docListItemTypeSub(supermod.docListItemType):
+ def __init__(self, para=None):
+ supermod.docListItemType.__init__(self, para)
+supermod.docListItemType.subclass = docListItemTypeSub
+# end class docListItemTypeSub
+
+
+class docSimpleSectTypeSub(supermod.docSimpleSectType):
+ def __init__(self, kind=None, title=None, para=None):
+ supermod.docSimpleSectType.__init__(self, kind, title, para)
+supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
+# end class docSimpleSectTypeSub
+
+
+class docVarListEntryTypeSub(supermod.docVarListEntryType):
+ def __init__(self, term=None):
+ supermod.docVarListEntryType.__init__(self, term)
+supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
+# end class docVarListEntryTypeSub
+
+
+class docRefTextTypeSub(supermod.docRefTextType):
+ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docRefTextType.__init__(self, mixedclass_, content_)
+supermod.docRefTextType.subclass = docRefTextTypeSub
+# end class docRefTextTypeSub
+
+
+class docTableTypeSub(supermod.docTableType):
+ def __init__(self, rows=None, cols=None, row=None, caption=None):
+ supermod.docTableType.__init__(self, rows, cols, row, caption)
+supermod.docTableType.subclass = docTableTypeSub
+# end class docTableTypeSub
+
+
+class docRowTypeSub(supermod.docRowType):
+ def __init__(self, entry=None):
+ supermod.docRowType.__init__(self, entry)
+supermod.docRowType.subclass = docRowTypeSub
+# end class docRowTypeSub
+
+
+class docEntryTypeSub(supermod.docEntryType):
+ def __init__(self, thead=None, para=None):
+ supermod.docEntryType.__init__(self, thead, para)
+supermod.docEntryType.subclass = docEntryTypeSub
+# end class docEntryTypeSub
+
+
+class docHeadingTypeSub(supermod.docHeadingType):
+ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docHeadingType.__init__(self, mixedclass_, content_)
+supermod.docHeadingType.subclass = docHeadingTypeSub
+# end class docHeadingTypeSub
+
+
+class docImageTypeSub(supermod.docImageType):
+ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docImageType.__init__(self, mixedclass_, content_)
+supermod.docImageType.subclass = docImageTypeSub
+# end class docImageTypeSub
+
+
+class docDotFileTypeSub(supermod.docDotFileType):
+ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docDotFileType.__init__(self, mixedclass_, content_)
+supermod.docDotFileType.subclass = docDotFileTypeSub
+# end class docDotFileTypeSub
+
+
+class docTocItemTypeSub(supermod.docTocItemType):
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docTocItemType.__init__(self, mixedclass_, content_)
+supermod.docTocItemType.subclass = docTocItemTypeSub
+# end class docTocItemTypeSub
+
+
+class docTocListTypeSub(supermod.docTocListType):
+ def __init__(self, tocitem=None):
+ supermod.docTocListType.__init__(self, tocitem)
+supermod.docTocListType.subclass = docTocListTypeSub
+# end class docTocListTypeSub
+
+
+class docLanguageTypeSub(supermod.docLanguageType):
+ def __init__(self, langid=None, para=None):
+ supermod.docLanguageType.__init__(self, langid, para)
+supermod.docLanguageType.subclass = docLanguageTypeSub
+# end class docLanguageTypeSub
+
+
+class docParamListTypeSub(supermod.docParamListType):
+ def __init__(self, kind=None, parameteritem=None):
+ supermod.docParamListType.__init__(self, kind, parameteritem)
+supermod.docParamListType.subclass = docParamListTypeSub
+# end class docParamListTypeSub
+
+
+class docParamListItemSub(supermod.docParamListItem):
+ def __init__(self, parameternamelist=None, parameterdescription=None):
+ supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
+supermod.docParamListItem.subclass = docParamListItemSub
+# end class docParamListItemSub
+
+
+class docParamNameListSub(supermod.docParamNameList):
+ def __init__(self, parametername=None):
+ supermod.docParamNameList.__init__(self, parametername)
+supermod.docParamNameList.subclass = docParamNameListSub
+# end class docParamNameListSub
+
+
+class docParamNameSub(supermod.docParamName):
+ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
+ supermod.docParamName.__init__(self, mixedclass_, content_)
+supermod.docParamName.subclass = docParamNameSub
+# end class docParamNameSub
+
+
+class docXRefSectTypeSub(supermod.docXRefSectType):
+ def __init__(self, id=None, xreftitle=None, xrefdescription=None):
+ supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
+supermod.docXRefSectType.subclass = docXRefSectTypeSub
+# end class docXRefSectTypeSub
+
+
+class docCopyTypeSub(supermod.docCopyType):
+ def __init__(self, link=None, para=None, sect1=None, internal=None):
+ supermod.docCopyType.__init__(self, link, para, sect1, internal)
+supermod.docCopyType.subclass = docCopyTypeSub
+# end class docCopyTypeSub
+
+
+class docCharTypeSub(supermod.docCharType):
+ def __init__(self, char=None, valueOf_=''):
+ supermod.docCharType.__init__(self, char)
+supermod.docCharType.subclass = docCharTypeSub
+# end class docCharTypeSub
+
+class docParaTypeSub(supermod.docParaType):
+ def __init__(self, char=None, valueOf_=''):
+ supermod.docParaType.__init__(self, char)
+
+ self.parameterlist = []
+ self.simplesects = []
+ self.content = []
+
+ def buildChildren(self, child_, nodeName_):
+ supermod.docParaType.buildChildren(self, child_, nodeName_)
+
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == "ref":
+ obj_ = supermod.docRefTextType.factory()
+ obj_.build(child_)
+ self.content.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parameterlist':
+ obj_ = supermod.docParamListType.factory()
+ obj_.build(child_)
+ self.parameterlist.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'simplesect':
+ obj_ = supermod.docSimpleSectType.factory()
+ obj_.build(child_)
+ self.simplesects.append(obj_)
+
+
+supermod.docParaType.subclass = docParaTypeSub
+# end class docParaTypeSub
+
+
+
+def parse(inFilename):
+ doc = minidom.parse(inFilename)
+ rootNode = doc.documentElement
+ rootObj = supermod.DoxygenType.factory()
+ rootObj.build(rootNode)
+ return rootObj
+
+
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/compoundsuper.py b/gr-aistx/docs/doxygen/doxyxml/generated/compoundsuper.py
new file mode 100644
index 0000000..6255dda
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/compoundsuper.py
@@ -0,0 +1,8342 @@
+#!/usr/bin/env python
+
+#
+# Generated Thu Jun 11 18:44:25 2009 by generateDS.py.
+#
+
+import sys
+import getopt
+from string import lower as str_lower
+from xml.dom import minidom
+from xml.dom import Node
+
+#
+# User methods
+#
+# Calls to the methods in these classes are generated by generateDS.py.
+# You can replace these methods by re-implementing the following class
+# in a module named generatedssuper.py.
+
+try:
+ from generatedssuper import GeneratedsSuper
+except ImportError, exp:
+
+ class GeneratedsSuper:
+ def format_string(self, input_data, input_name=''):
+ return input_data
+ def format_integer(self, input_data, input_name=''):
+ return '%d' % input_data
+ def format_float(self, input_data, input_name=''):
+ return '%f' % input_data
+ def format_double(self, input_data, input_name=''):
+ return '%e' % input_data
+ def format_boolean(self, input_data, input_name=''):
+ return '%s' % input_data
+
+
+#
+# If you have installed IPython you can uncomment and use the following.
+# IPython is available from http://ipython.scipy.org/.
+#
+
+## from IPython.Shell import IPShellEmbed
+## args = ''
+## ipshell = IPShellEmbed(args,
+## banner = 'Dropping into IPython',
+## exit_msg = 'Leaving Interpreter, back to program.')
+
+# Then use the following line where and when you want to drop into the
+# IPython shell:
+# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
+
+#
+# Globals
+#
+
+ExternalEncoding = 'ascii'
+
+#
+# Support/utility functions.
+#
+
+def showIndent(outfile, level):
+ for idx in range(level):
+ outfile.write(' ')
+
+def quote_xml(inStr):
+ s1 = (isinstance(inStr, basestring) and inStr or
+ '%s' % inStr)
+ s1 = s1.replace('&', '&amp;')
+ s1 = s1.replace('<', '&lt;')
+ s1 = s1.replace('>', '&gt;')
+ return s1
+
+def quote_attrib(inStr):
+ s1 = (isinstance(inStr, basestring) and inStr or
+ '%s' % inStr)
+ s1 = s1.replace('&', '&amp;')
+ s1 = s1.replace('<', '&lt;')
+ s1 = s1.replace('>', '&gt;')
+ if '"' in s1:
+ if "'" in s1:
+ s1 = '"%s"' % s1.replace('"', "&quot;")
+ else:
+ s1 = "'%s'" % s1
+ else:
+ s1 = '"%s"' % s1
+ return s1
+
+def quote_python(inStr):
+ s1 = inStr
+ if s1.find("'") == -1:
+ if s1.find('\n') == -1:
+ return "'%s'" % s1
+ else:
+ return "'''%s'''" % s1
+ else:
+ if s1.find('"') != -1:
+ s1 = s1.replace('"', '\\"')
+ if s1.find('\n') == -1:
+ return '"%s"' % s1
+ else:
+ return '"""%s"""' % s1
+
+
+class MixedContainer:
+ # Constants for category:
+ CategoryNone = 0
+ CategoryText = 1
+ CategorySimple = 2
+ CategoryComplex = 3
+ # Constants for content_type:
+ TypeNone = 0
+ TypeText = 1
+ TypeString = 2
+ TypeInteger = 3
+ TypeFloat = 4
+ TypeDecimal = 5
+ TypeDouble = 6
+ TypeBoolean = 7
+ def __init__(self, category, content_type, name, value):
+ self.category = category
+ self.content_type = content_type
+ self.name = name
+ self.value = value
+ def getCategory(self):
+ return self.category
+ def getContenttype(self, content_type):
+ return self.content_type
+ def getValue(self):
+ return self.value
+ def getName(self):
+ return self.name
+ def export(self, outfile, level, name, namespace):
+ if self.category == MixedContainer.CategoryText:
+ outfile.write(self.value)
+ elif self.category == MixedContainer.CategorySimple:
+ self.exportSimple(outfile, level, name)
+ else: # category == MixedContainer.CategoryComplex
+ self.value.export(outfile, level, namespace,name)
+ def exportSimple(self, outfile, level, name):
+ if self.content_type == MixedContainer.TypeString:
+ outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeInteger or \
+ self.content_type == MixedContainer.TypeBoolean:
+ outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeFloat or \
+ self.content_type == MixedContainer.TypeDecimal:
+ outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeDouble:
+ outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
+ def exportLiteral(self, outfile, level, name):
+ if self.category == MixedContainer.CategoryText:
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
+ (self.category, self.content_type, self.name, self.value))
+ elif self.category == MixedContainer.CategorySimple:
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
+ (self.category, self.content_type, self.name, self.value))
+ else: # category == MixedContainer.CategoryComplex
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s",\n' % \
+ (self.category, self.content_type, self.name,))
+ self.value.exportLiteral(outfile, level + 1)
+ showIndent(outfile, level)
+ outfile.write(')\n')
+
+
+class _MemberSpec(object):
+ def __init__(self, name='', data_type='', container=0):
+ self.name = name
+ self.data_type = data_type
+ self.container = container
+ def set_name(self, name): self.name = name
+ def get_name(self): return self.name
+ def set_data_type(self, data_type): self.data_type = data_type
+ def get_data_type(self): return self.data_type
+ def set_container(self, container): self.container = container
+ def get_container(self): return self.container
+
+
+#
+# Data representation classes.
+#
+
+class DoxygenType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, version=None, compounddef=None):
+ self.version = version
+ self.compounddef = compounddef
+ def factory(*args_, **kwargs_):
+ if DoxygenType.subclass:
+ return DoxygenType.subclass(*args_, **kwargs_)
+ else:
+ return DoxygenType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_compounddef(self): return self.compounddef
+ def set_compounddef(self, compounddef): self.compounddef = compounddef
+ def get_version(self): return self.version
+ def set_version(self, version): self.version = version
+ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
+ outfile.write(' version=%s' % (quote_attrib(self.version), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
+ if self.compounddef:
+ self.compounddef.export(outfile, level, namespace_, name_='compounddef')
+ def hasContent_(self):
+ if (
+ self.compounddef is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='DoxygenType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.version is not None:
+ showIndent(outfile, level)
+ outfile.write('version = "%s",\n' % (self.version,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.compounddef:
+ showIndent(outfile, level)
+ outfile.write('compounddef=model_.compounddefType(\n')
+ self.compounddef.exportLiteral(outfile, level, name_='compounddef')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('version'):
+ self.version = attrs.get('version').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'compounddef':
+ obj_ = compounddefType.factory()
+ obj_.build(child_)
+ self.set_compounddef(obj_)
+# end class DoxygenType
+
+
+class compounddefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
+ self.kind = kind
+ self.prot = prot
+ self.id = id
+ self.compoundname = compoundname
+ self.title = title
+ if basecompoundref is None:
+ self.basecompoundref = []
+ else:
+ self.basecompoundref = basecompoundref
+ if derivedcompoundref is None:
+ self.derivedcompoundref = []
+ else:
+ self.derivedcompoundref = derivedcompoundref
+ if includes is None:
+ self.includes = []
+ else:
+ self.includes = includes
+ if includedby is None:
+ self.includedby = []
+ else:
+ self.includedby = includedby
+ self.incdepgraph = incdepgraph
+ self.invincdepgraph = invincdepgraph
+ if innerdir is None:
+ self.innerdir = []
+ else:
+ self.innerdir = innerdir
+ if innerfile is None:
+ self.innerfile = []
+ else:
+ self.innerfile = innerfile
+ if innerclass is None:
+ self.innerclass = []
+ else:
+ self.innerclass = innerclass
+ if innernamespace is None:
+ self.innernamespace = []
+ else:
+ self.innernamespace = innernamespace
+ if innerpage is None:
+ self.innerpage = []
+ else:
+ self.innerpage = innerpage
+ if innergroup is None:
+ self.innergroup = []
+ else:
+ self.innergroup = innergroup
+ self.templateparamlist = templateparamlist
+ if sectiondef is None:
+ self.sectiondef = []
+ else:
+ self.sectiondef = sectiondef
+ self.briefdescription = briefdescription
+ self.detaileddescription = detaileddescription
+ self.inheritancegraph = inheritancegraph
+ self.collaborationgraph = collaborationgraph
+ self.programlisting = programlisting
+ self.location = location
+ self.listofallmembers = listofallmembers
+ def factory(*args_, **kwargs_):
+ if compounddefType.subclass:
+ return compounddefType.subclass(*args_, **kwargs_)
+ else:
+ return compounddefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_compoundname(self): return self.compoundname
+ def set_compoundname(self, compoundname): self.compoundname = compoundname
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_basecompoundref(self): return self.basecompoundref
+ def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref
+ def add_basecompoundref(self, value): self.basecompoundref.append(value)
+ def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value
+ def get_derivedcompoundref(self): return self.derivedcompoundref
+ def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref
+ def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value)
+ def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value
+ def get_includes(self): return self.includes
+ def set_includes(self, includes): self.includes = includes
+ def add_includes(self, value): self.includes.append(value)
+ def insert_includes(self, index, value): self.includes[index] = value
+ def get_includedby(self): return self.includedby
+ def set_includedby(self, includedby): self.includedby = includedby
+ def add_includedby(self, value): self.includedby.append(value)
+ def insert_includedby(self, index, value): self.includedby[index] = value
+ def get_incdepgraph(self): return self.incdepgraph
+ def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph
+ def get_invincdepgraph(self): return self.invincdepgraph
+ def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph
+ def get_innerdir(self): return self.innerdir
+ def set_innerdir(self, innerdir): self.innerdir = innerdir
+ def add_innerdir(self, value): self.innerdir.append(value)
+ def insert_innerdir(self, index, value): self.innerdir[index] = value
+ def get_innerfile(self): return self.innerfile
+ def set_innerfile(self, innerfile): self.innerfile = innerfile
+ def add_innerfile(self, value): self.innerfile.append(value)
+ def insert_innerfile(self, index, value): self.innerfile[index] = value
+ def get_innerclass(self): return self.innerclass
+ def set_innerclass(self, innerclass): self.innerclass = innerclass
+ def add_innerclass(self, value): self.innerclass.append(value)
+ def insert_innerclass(self, index, value): self.innerclass[index] = value
+ def get_innernamespace(self): return self.innernamespace
+ def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace
+ def add_innernamespace(self, value): self.innernamespace.append(value)
+ def insert_innernamespace(self, index, value): self.innernamespace[index] = value
+ def get_innerpage(self): return self.innerpage
+ def set_innerpage(self, innerpage): self.innerpage = innerpage
+ def add_innerpage(self, value): self.innerpage.append(value)
+ def insert_innerpage(self, index, value): self.innerpage[index] = value
+ def get_innergroup(self): return self.innergroup
+ def set_innergroup(self, innergroup): self.innergroup = innergroup
+ def add_innergroup(self, value): self.innergroup.append(value)
+ def insert_innergroup(self, index, value): self.innergroup[index] = value
+ def get_templateparamlist(self): return self.templateparamlist
+ def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
+ def get_sectiondef(self): return self.sectiondef
+ def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef
+ def add_sectiondef(self, value): self.sectiondef.append(value)
+ def insert_sectiondef(self, index, value): self.sectiondef[index] = value
+ def get_briefdescription(self): return self.briefdescription
+ def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def get_detaileddescription(self): return self.detaileddescription
+ def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def get_inheritancegraph(self): return self.inheritancegraph
+ def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph
+ def get_collaborationgraph(self): return self.collaborationgraph
+ def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph
+ def get_programlisting(self): return self.programlisting
+ def set_programlisting(self, programlisting): self.programlisting = programlisting
+ def get_location(self): return self.location
+ def set_location(self, location): self.location = location
+ def get_listofallmembers(self): return self.listofallmembers
+ def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='compounddefType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'):
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'):
+ if self.compoundname is not None:
+ showIndent(outfile, level)
+ outfile.write('<%scompoundname>%s</%scompoundname>\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_))
+ if self.title is not None:
+ showIndent(outfile, level)
+ outfile.write('<%stitle>%s</%stitle>\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_))
+ for basecompoundref_ in self.basecompoundref:
+ basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref')
+ for derivedcompoundref_ in self.derivedcompoundref:
+ derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref')
+ for includes_ in self.includes:
+ includes_.export(outfile, level, namespace_, name_='includes')
+ for includedby_ in self.includedby:
+ includedby_.export(outfile, level, namespace_, name_='includedby')
+ if self.incdepgraph:
+ self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph')
+ if self.invincdepgraph:
+ self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph')
+ for innerdir_ in self.innerdir:
+ innerdir_.export(outfile, level, namespace_, name_='innerdir')
+ for innerfile_ in self.innerfile:
+ innerfile_.export(outfile, level, namespace_, name_='innerfile')
+ for innerclass_ in self.innerclass:
+ innerclass_.export(outfile, level, namespace_, name_='innerclass')
+ for innernamespace_ in self.innernamespace:
+ innernamespace_.export(outfile, level, namespace_, name_='innernamespace')
+ for innerpage_ in self.innerpage:
+ innerpage_.export(outfile, level, namespace_, name_='innerpage')
+ for innergroup_ in self.innergroup:
+ innergroup_.export(outfile, level, namespace_, name_='innergroup')
+ if self.templateparamlist:
+ self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
+ for sectiondef_ in self.sectiondef:
+ sectiondef_.export(outfile, level, namespace_, name_='sectiondef')
+ if self.briefdescription:
+ self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ if self.detaileddescription:
+ self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
+ if self.inheritancegraph:
+ self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph')
+ if self.collaborationgraph:
+ self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph')
+ if self.programlisting:
+ self.programlisting.export(outfile, level, namespace_, name_='programlisting')
+ if self.location:
+ self.location.export(outfile, level, namespace_, name_='location')
+ if self.listofallmembers:
+ self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers')
+ def hasContent_(self):
+ if (
+ self.compoundname is not None or
+ self.title is not None or
+ self.basecompoundref is not None or
+ self.derivedcompoundref is not None or
+ self.includes is not None or
+ self.includedby is not None or
+ self.incdepgraph is not None or
+ self.invincdepgraph is not None or
+ self.innerdir is not None or
+ self.innerfile is not None or
+ self.innerclass is not None or
+ self.innernamespace is not None or
+ self.innerpage is not None or
+ self.innergroup is not None or
+ self.templateparamlist is not None or
+ self.sectiondef is not None or
+ self.briefdescription is not None or
+ self.detaileddescription is not None or
+ self.inheritancegraph is not None or
+ self.collaborationgraph is not None or
+ self.programlisting is not None or
+ self.location is not None or
+ self.listofallmembers is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='compounddefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding))
+ if self.title:
+ showIndent(outfile, level)
+ outfile.write('title=model_.xsd_string(\n')
+ self.title.exportLiteral(outfile, level, name_='title')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('basecompoundref=[\n')
+ level += 1
+ for basecompoundref in self.basecompoundref:
+ showIndent(outfile, level)
+ outfile.write('model_.basecompoundref(\n')
+ basecompoundref.exportLiteral(outfile, level, name_='basecompoundref')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('derivedcompoundref=[\n')
+ level += 1
+ for derivedcompoundref in self.derivedcompoundref:
+ showIndent(outfile, level)
+ outfile.write('model_.derivedcompoundref(\n')
+ derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('includes=[\n')
+ level += 1
+ for includes in self.includes:
+ showIndent(outfile, level)
+ outfile.write('model_.includes(\n')
+ includes.exportLiteral(outfile, level, name_='includes')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('includedby=[\n')
+ level += 1
+ for includedby in self.includedby:
+ showIndent(outfile, level)
+ outfile.write('model_.includedby(\n')
+ includedby.exportLiteral(outfile, level, name_='includedby')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.incdepgraph:
+ showIndent(outfile, level)
+ outfile.write('incdepgraph=model_.graphType(\n')
+ self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.invincdepgraph:
+ showIndent(outfile, level)
+ outfile.write('invincdepgraph=model_.graphType(\n')
+ self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('innerdir=[\n')
+ level += 1
+ for innerdir in self.innerdir:
+ showIndent(outfile, level)
+ outfile.write('model_.innerdir(\n')
+ innerdir.exportLiteral(outfile, level, name_='innerdir')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innerfile=[\n')
+ level += 1
+ for innerfile in self.innerfile:
+ showIndent(outfile, level)
+ outfile.write('model_.innerfile(\n')
+ innerfile.exportLiteral(outfile, level, name_='innerfile')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innerclass=[\n')
+ level += 1
+ for innerclass in self.innerclass:
+ showIndent(outfile, level)
+ outfile.write('model_.innerclass(\n')
+ innerclass.exportLiteral(outfile, level, name_='innerclass')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innernamespace=[\n')
+ level += 1
+ for innernamespace in self.innernamespace:
+ showIndent(outfile, level)
+ outfile.write('model_.innernamespace(\n')
+ innernamespace.exportLiteral(outfile, level, name_='innernamespace')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innerpage=[\n')
+ level += 1
+ for innerpage in self.innerpage:
+ showIndent(outfile, level)
+ outfile.write('model_.innerpage(\n')
+ innerpage.exportLiteral(outfile, level, name_='innerpage')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innergroup=[\n')
+ level += 1
+ for innergroup in self.innergroup:
+ showIndent(outfile, level)
+ outfile.write('model_.innergroup(\n')
+ innergroup.exportLiteral(outfile, level, name_='innergroup')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.templateparamlist:
+ showIndent(outfile, level)
+ outfile.write('templateparamlist=model_.templateparamlistType(\n')
+ self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('sectiondef=[\n')
+ level += 1
+ for sectiondef in self.sectiondef:
+ showIndent(outfile, level)
+ outfile.write('model_.sectiondef(\n')
+ sectiondef.exportLiteral(outfile, level, name_='sectiondef')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.briefdescription:
+ showIndent(outfile, level)
+ outfile.write('briefdescription=model_.descriptionType(\n')
+ self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.detaileddescription:
+ showIndent(outfile, level)
+ outfile.write('detaileddescription=model_.descriptionType(\n')
+ self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.inheritancegraph:
+ showIndent(outfile, level)
+ outfile.write('inheritancegraph=model_.graphType(\n')
+ self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.collaborationgraph:
+ showIndent(outfile, level)
+ outfile.write('collaborationgraph=model_.graphType(\n')
+ self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.programlisting:
+ showIndent(outfile, level)
+ outfile.write('programlisting=model_.listingType(\n')
+ self.programlisting.exportLiteral(outfile, level, name_='programlisting')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.location:
+ showIndent(outfile, level)
+ outfile.write('location=model_.locationType(\n')
+ self.location.exportLiteral(outfile, level, name_='location')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.listofallmembers:
+ showIndent(outfile, level)
+ outfile.write('listofallmembers=model_.listofallmembersType(\n')
+ self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'compoundname':
+ compoundname_ = ''
+ for text__content_ in child_.childNodes:
+ compoundname_ += text__content_.nodeValue
+ self.compoundname = compoundname_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ obj_ = docTitleType.factory()
+ obj_.build(child_)
+ self.set_title(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'basecompoundref':
+ obj_ = compoundRefType.factory()
+ obj_.build(child_)
+ self.basecompoundref.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'derivedcompoundref':
+ obj_ = compoundRefType.factory()
+ obj_.build(child_)
+ self.derivedcompoundref.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'includes':
+ obj_ = incType.factory()
+ obj_.build(child_)
+ self.includes.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'includedby':
+ obj_ = incType.factory()
+ obj_.build(child_)
+ self.includedby.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'incdepgraph':
+ obj_ = graphType.factory()
+ obj_.build(child_)
+ self.set_incdepgraph(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'invincdepgraph':
+ obj_ = graphType.factory()
+ obj_.build(child_)
+ self.set_invincdepgraph(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innerdir':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innerdir.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innerfile':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innerfile.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innerclass':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innerclass.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innernamespace':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innernamespace.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innerpage':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innerpage.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innergroup':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innergroup.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'templateparamlist':
+ obj_ = templateparamlistType.factory()
+ obj_.build(child_)
+ self.set_templateparamlist(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sectiondef':
+ obj_ = sectiondefType.factory()
+ obj_.build(child_)
+ self.sectiondef.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'briefdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_briefdescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'detaileddescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_detaileddescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'inheritancegraph':
+ obj_ = graphType.factory()
+ obj_.build(child_)
+ self.set_inheritancegraph(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'collaborationgraph':
+ obj_ = graphType.factory()
+ obj_.build(child_)
+ self.set_collaborationgraph(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'programlisting':
+ obj_ = listingType.factory()
+ obj_.build(child_)
+ self.set_programlisting(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'location':
+ obj_ = locationType.factory()
+ obj_.build(child_)
+ self.set_location(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'listofallmembers':
+ obj_ = listofallmembersType.factory()
+ obj_.build(child_)
+ self.set_listofallmembers(obj_)
+# end class compounddefType
+
+
+class listofallmembersType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, member=None):
+ if member is None:
+ self.member = []
+ else:
+ self.member = member
+ def factory(*args_, **kwargs_):
+ if listofallmembersType.subclass:
+ return listofallmembersType.subclass(*args_, **kwargs_)
+ else:
+ return listofallmembersType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_member(self): return self.member
+ def set_member(self, member): self.member = member
+ def add_member(self, value): self.member.append(value)
+ def insert_member(self, index, value): self.member[index] = value
+ def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'):
+ for member_ in self.member:
+ member_.export(outfile, level, namespace_, name_='member')
+ def hasContent_(self):
+ if (
+ self.member is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='listofallmembersType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('member=[\n')
+ level += 1
+ for member in self.member:
+ showIndent(outfile, level)
+ outfile.write('model_.member(\n')
+ member.exportLiteral(outfile, level, name_='member')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'member':
+ obj_ = memberRefType.factory()
+ obj_.build(child_)
+ self.member.append(obj_)
+# end class listofallmembersType
+
+
+class memberRefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None):
+ self.virt = virt
+ self.prot = prot
+ self.refid = refid
+ self.ambiguityscope = ambiguityscope
+ self.scope = scope
+ self.name = name
+ def factory(*args_, **kwargs_):
+ if memberRefType.subclass:
+ return memberRefType.subclass(*args_, **kwargs_)
+ else:
+ return memberRefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_scope(self): return self.scope
+ def set_scope(self, scope): self.scope = scope
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_virt(self): return self.virt
+ def set_virt(self, virt): self.virt = virt
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_ambiguityscope(self): return self.ambiguityscope
+ def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope
+ def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='memberRefType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'):
+ if self.virt is not None:
+ outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.ambiguityscope is not None:
+ outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'):
+ if self.scope is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sscope>%s</%sscope>\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_))
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ def hasContent_(self):
+ if (
+ self.scope is not None or
+ self.name is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='memberRefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.virt is not None:
+ showIndent(outfile, level)
+ outfile.write('virt = "%s",\n' % (self.virt,))
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.ambiguityscope is not None:
+ showIndent(outfile, level)
+ outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('virt'):
+ self.virt = attrs.get('virt').value
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('ambiguityscope'):
+ self.ambiguityscope = attrs.get('ambiguityscope').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'scope':
+ scope_ = ''
+ for text__content_ in child_.childNodes:
+ scope_ += text__content_.nodeValue
+ self.scope = scope_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ name_ = ''
+ for text__content_ in child_.childNodes:
+ name_ += text__content_.nodeValue
+ self.name = name_
+# end class memberRefType
+
+
+class scope(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if scope.subclass:
+ return scope.subclass(*args_, **kwargs_)
+ else:
+ return scope(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='scope')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='scope'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='scope'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='scope'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class scope
+
+
+class name(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if name.subclass:
+ return name.subclass(*args_, **kwargs_)
+ else:
+ return name(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='name')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='name'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='name'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='name'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class name
+
+
+class compoundRefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ self.virt = virt
+ self.prot = prot
+ self.refid = refid
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if compoundRefType.subclass:
+ return compoundRefType.subclass(*args_, **kwargs_)
+ else:
+ return compoundRefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_virt(self): return self.virt
+ def set_virt(self, virt): self.virt = virt
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='compoundRefType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'):
+ if self.virt is not None:
+ outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='compoundRefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.virt is not None:
+ showIndent(outfile, level)
+ outfile.write('virt = "%s",\n' % (self.virt,))
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('virt'):
+ self.virt = attrs.get('virt').value
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class compoundRefType
+
+
+class reimplementType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ self.refid = refid
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if reimplementType.subclass:
+ return reimplementType.subclass(*args_, **kwargs_)
+ else:
+ return reimplementType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='reimplementType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'):
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='reimplementType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class reimplementType
+
+
+class incType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ self.local = local
+ self.refid = refid
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if incType.subclass:
+ return incType.subclass(*args_, **kwargs_)
+ else:
+ return incType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_local(self): return self.local
+ def set_local(self, local): self.local = local
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='incType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='incType'):
+ if self.local is not None:
+ outfile.write(' local=%s' % (quote_attrib(self.local), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='incType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='incType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.local is not None:
+ showIndent(outfile, level)
+ outfile.write('local = "%s",\n' % (self.local,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('local'):
+ self.local = attrs.get('local').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class incType
+
+
+class refType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ self.prot = prot
+ self.refid = refid
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if refType.subclass:
+ return refType.subclass(*args_, **kwargs_)
+ else:
+ return refType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='refType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='refType'):
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='refType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='refType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class refType
+
+
+class refTextType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
+ self.refid = refid
+ self.kindref = kindref
+ self.external = external
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if refTextType.subclass:
+ return refTextType.subclass(*args_, **kwargs_)
+ else:
+ return refTextType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_kindref(self): return self.kindref
+ def set_kindref(self, kindref): self.kindref = kindref
+ def get_external(self): return self.external
+ def set_external(self, external): self.external = external
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='refTextType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'):
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.kindref is not None:
+ outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
+ if self.external is not None:
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='refTextType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='refTextType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.kindref is not None:
+ showIndent(outfile, level)
+ outfile.write('kindref = "%s",\n' % (self.kindref,))
+ if self.external is not None:
+ showIndent(outfile, level)
+ outfile.write('external = %s,\n' % (self.external,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('kindref'):
+ self.kindref = attrs.get('kindref').value
+ if attrs.get('external'):
+ self.external = attrs.get('external').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class refTextType
+
+
+class sectiondefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, header=None, description=None, memberdef=None):
+ self.kind = kind
+ self.header = header
+ self.description = description
+ if memberdef is None:
+ self.memberdef = []
+ else:
+ self.memberdef = memberdef
+ def factory(*args_, **kwargs_):
+ if sectiondefType.subclass:
+ return sectiondefType.subclass(*args_, **kwargs_)
+ else:
+ return sectiondefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_header(self): return self.header
+ def set_header(self, header): self.header = header
+ def get_description(self): return self.description
+ def set_description(self, description): self.description = description
+ def get_memberdef(self): return self.memberdef
+ def set_memberdef(self, memberdef): self.memberdef = memberdef
+ def add_memberdef(self, value): self.memberdef.append(value)
+ def insert_memberdef(self, index, value): self.memberdef[index] = value
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='sectiondefType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'):
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'):
+ if self.header is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sheader>%s</%sheader>\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_))
+ if self.description:
+ self.description.export(outfile, level, namespace_, name_='description')
+ for memberdef_ in self.memberdef:
+ memberdef_.export(outfile, level, namespace_, name_='memberdef')
+ def hasContent_(self):
+ if (
+ self.header is not None or
+ self.description is not None or
+ self.memberdef is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='sectiondefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding))
+ if self.description:
+ showIndent(outfile, level)
+ outfile.write('description=model_.descriptionType(\n')
+ self.description.exportLiteral(outfile, level, name_='description')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('memberdef=[\n')
+ level += 1
+ for memberdef in self.memberdef:
+ showIndent(outfile, level)
+ outfile.write('model_.memberdef(\n')
+ memberdef.exportLiteral(outfile, level, name_='memberdef')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'header':
+ header_ = ''
+ for text__content_ in child_.childNodes:
+ header_ += text__content_.nodeValue
+ self.header = header_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'description':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_description(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'memberdef':
+ obj_ = memberdefType.factory()
+ obj_.build(child_)
+ self.memberdef.append(obj_)
+# end class sectiondefType
+
+
+class memberdefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
+ self.initonly = initonly
+ self.kind = kind
+ self.volatile = volatile
+ self.const = const
+ self.raisexx = raisexx
+ self.virt = virt
+ self.readable = readable
+ self.prot = prot
+ self.explicit = explicit
+ self.new = new
+ self.final = final
+ self.writable = writable
+ self.add = add
+ self.static = static
+ self.remove = remove
+ self.sealed = sealed
+ self.mutable = mutable
+ self.gettable = gettable
+ self.inline = inline
+ self.settable = settable
+ self.id = id
+ self.templateparamlist = templateparamlist
+ self.type_ = type_
+ self.definition = definition
+ self.argsstring = argsstring
+ self.name = name
+ self.read = read
+ self.write = write
+ self.bitfield = bitfield
+ if reimplements is None:
+ self.reimplements = []
+ else:
+ self.reimplements = reimplements
+ if reimplementedby is None:
+ self.reimplementedby = []
+ else:
+ self.reimplementedby = reimplementedby
+ if param is None:
+ self.param = []
+ else:
+ self.param = param
+ if enumvalue is None:
+ self.enumvalue = []
+ else:
+ self.enumvalue = enumvalue
+ self.initializer = initializer
+ self.exceptions = exceptions
+ self.briefdescription = briefdescription
+ self.detaileddescription = detaileddescription
+ self.inbodydescription = inbodydescription
+ self.location = location
+ if references is None:
+ self.references = []
+ else:
+ self.references = references
+ if referencedby is None:
+ self.referencedby = []
+ else:
+ self.referencedby = referencedby
+ def factory(*args_, **kwargs_):
+ if memberdefType.subclass:
+ return memberdefType.subclass(*args_, **kwargs_)
+ else:
+ return memberdefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_templateparamlist(self): return self.templateparamlist
+ def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
+ def get_type(self): return self.type_
+ def set_type(self, type_): self.type_ = type_
+ def get_definition(self): return self.definition
+ def set_definition(self, definition): self.definition = definition
+ def get_argsstring(self): return self.argsstring
+ def set_argsstring(self, argsstring): self.argsstring = argsstring
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_read(self): return self.read
+ def set_read(self, read): self.read = read
+ def get_write(self): return self.write
+ def set_write(self, write): self.write = write
+ def get_bitfield(self): return self.bitfield
+ def set_bitfield(self, bitfield): self.bitfield = bitfield
+ def get_reimplements(self): return self.reimplements
+ def set_reimplements(self, reimplements): self.reimplements = reimplements
+ def add_reimplements(self, value): self.reimplements.append(value)
+ def insert_reimplements(self, index, value): self.reimplements[index] = value
+ def get_reimplementedby(self): return self.reimplementedby
+ def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby
+ def add_reimplementedby(self, value): self.reimplementedby.append(value)
+ def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value
+ def get_param(self): return self.param
+ def set_param(self, param): self.param = param
+ def add_param(self, value): self.param.append(value)
+ def insert_param(self, index, value): self.param[index] = value
+ def get_enumvalue(self): return self.enumvalue
+ def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue
+ def add_enumvalue(self, value): self.enumvalue.append(value)
+ def insert_enumvalue(self, index, value): self.enumvalue[index] = value
+ def get_initializer(self): return self.initializer
+ def set_initializer(self, initializer): self.initializer = initializer
+ def get_exceptions(self): return self.exceptions
+ def set_exceptions(self, exceptions): self.exceptions = exceptions
+ def get_briefdescription(self): return self.briefdescription
+ def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def get_detaileddescription(self): return self.detaileddescription
+ def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def get_inbodydescription(self): return self.inbodydescription
+ def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription
+ def get_location(self): return self.location
+ def set_location(self, location): self.location = location
+ def get_references(self): return self.references
+ def set_references(self, references): self.references = references
+ def add_references(self, value): self.references.append(value)
+ def insert_references(self, index, value): self.references[index] = value
+ def get_referencedby(self): return self.referencedby
+ def set_referencedby(self, referencedby): self.referencedby = referencedby
+ def add_referencedby(self, value): self.referencedby.append(value)
+ def insert_referencedby(self, index, value): self.referencedby[index] = value
+ def get_initonly(self): return self.initonly
+ def set_initonly(self, initonly): self.initonly = initonly
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def get_volatile(self): return self.volatile
+ def set_volatile(self, volatile): self.volatile = volatile
+ def get_const(self): return self.const
+ def set_const(self, const): self.const = const
+ def get_raise(self): return self.raisexx
+ def set_raise(self, raisexx): self.raisexx = raisexx
+ def get_virt(self): return self.virt
+ def set_virt(self, virt): self.virt = virt
+ def get_readable(self): return self.readable
+ def set_readable(self, readable): self.readable = readable
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_explicit(self): return self.explicit
+ def set_explicit(self, explicit): self.explicit = explicit
+ def get_new(self): return self.new
+ def set_new(self, new): self.new = new
+ def get_final(self): return self.final
+ def set_final(self, final): self.final = final
+ def get_writable(self): return self.writable
+ def set_writable(self, writable): self.writable = writable
+ def get_add(self): return self.add
+ def set_add(self, add): self.add = add
+ def get_static(self): return self.static
+ def set_static(self, static): self.static = static
+ def get_remove(self): return self.remove
+ def set_remove(self, remove): self.remove = remove
+ def get_sealed(self): return self.sealed
+ def set_sealed(self, sealed): self.sealed = sealed
+ def get_mutable(self): return self.mutable
+ def set_mutable(self, mutable): self.mutable = mutable
+ def get_gettable(self): return self.gettable
+ def set_gettable(self, gettable): self.gettable = gettable
+ def get_inline(self): return self.inline
+ def set_inline(self, inline): self.inline = inline
+ def get_settable(self): return self.settable
+ def set_settable(self, settable): self.settable = settable
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='memberdefType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'):
+ if self.initonly is not None:
+ outfile.write(' initonly=%s' % (quote_attrib(self.initonly), ))
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ if self.volatile is not None:
+ outfile.write(' volatile=%s' % (quote_attrib(self.volatile), ))
+ if self.const is not None:
+ outfile.write(' const=%s' % (quote_attrib(self.const), ))
+ if self.raisexx is not None:
+ outfile.write(' raise=%s' % (quote_attrib(self.raisexx), ))
+ if self.virt is not None:
+ outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
+ if self.readable is not None:
+ outfile.write(' readable=%s' % (quote_attrib(self.readable), ))
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.explicit is not None:
+ outfile.write(' explicit=%s' % (quote_attrib(self.explicit), ))
+ if self.new is not None:
+ outfile.write(' new=%s' % (quote_attrib(self.new), ))
+ if self.final is not None:
+ outfile.write(' final=%s' % (quote_attrib(self.final), ))
+ if self.writable is not None:
+ outfile.write(' writable=%s' % (quote_attrib(self.writable), ))
+ if self.add is not None:
+ outfile.write(' add=%s' % (quote_attrib(self.add), ))
+ if self.static is not None:
+ outfile.write(' static=%s' % (quote_attrib(self.static), ))
+ if self.remove is not None:
+ outfile.write(' remove=%s' % (quote_attrib(self.remove), ))
+ if self.sealed is not None:
+ outfile.write(' sealed=%s' % (quote_attrib(self.sealed), ))
+ if self.mutable is not None:
+ outfile.write(' mutable=%s' % (quote_attrib(self.mutable), ))
+ if self.gettable is not None:
+ outfile.write(' gettable=%s' % (quote_attrib(self.gettable), ))
+ if self.inline is not None:
+ outfile.write(' inline=%s' % (quote_attrib(self.inline), ))
+ if self.settable is not None:
+ outfile.write(' settable=%s' % (quote_attrib(self.settable), ))
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'):
+ if self.templateparamlist:
+ self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
+ if self.type_:
+ self.type_.export(outfile, level, namespace_, name_='type')
+ if self.definition is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sdefinition>%s</%sdefinition>\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_))
+ if self.argsstring is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sargsstring>%s</%sargsstring>\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_))
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ if self.read is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sread>%s</%sread>\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_))
+ if self.write is not None:
+ showIndent(outfile, level)
+ outfile.write('<%swrite>%s</%swrite>\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_))
+ if self.bitfield is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sbitfield>%s</%sbitfield>\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_))
+ for reimplements_ in self.reimplements:
+ reimplements_.export(outfile, level, namespace_, name_='reimplements')
+ for reimplementedby_ in self.reimplementedby:
+ reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby')
+ for param_ in self.param:
+ param_.export(outfile, level, namespace_, name_='param')
+ for enumvalue_ in self.enumvalue:
+ enumvalue_.export(outfile, level, namespace_, name_='enumvalue')
+ if self.initializer:
+ self.initializer.export(outfile, level, namespace_, name_='initializer')
+ if self.exceptions:
+ self.exceptions.export(outfile, level, namespace_, name_='exceptions')
+ if self.briefdescription:
+ self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ if self.detaileddescription:
+ self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
+ if self.inbodydescription:
+ self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription')
+ if self.location:
+ self.location.export(outfile, level, namespace_, name_='location', )
+ for references_ in self.references:
+ references_.export(outfile, level, namespace_, name_='references')
+ for referencedby_ in self.referencedby:
+ referencedby_.export(outfile, level, namespace_, name_='referencedby')
+ def hasContent_(self):
+ if (
+ self.templateparamlist is not None or
+ self.type_ is not None or
+ self.definition is not None or
+ self.argsstring is not None or
+ self.name is not None or
+ self.read is not None or
+ self.write is not None or
+ self.bitfield is not None or
+ self.reimplements is not None or
+ self.reimplementedby is not None or
+ self.param is not None or
+ self.enumvalue is not None or
+ self.initializer is not None or
+ self.exceptions is not None or
+ self.briefdescription is not None or
+ self.detaileddescription is not None or
+ self.inbodydescription is not None or
+ self.location is not None or
+ self.references is not None or
+ self.referencedby is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='memberdefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.initonly is not None:
+ showIndent(outfile, level)
+ outfile.write('initonly = "%s",\n' % (self.initonly,))
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ if self.volatile is not None:
+ showIndent(outfile, level)
+ outfile.write('volatile = "%s",\n' % (self.volatile,))
+ if self.const is not None:
+ showIndent(outfile, level)
+ outfile.write('const = "%s",\n' % (self.const,))
+ if self.raisexx is not None:
+ showIndent(outfile, level)
+ outfile.write('raisexx = "%s",\n' % (self.raisexx,))
+ if self.virt is not None:
+ showIndent(outfile, level)
+ outfile.write('virt = "%s",\n' % (self.virt,))
+ if self.readable is not None:
+ showIndent(outfile, level)
+ outfile.write('readable = "%s",\n' % (self.readable,))
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.explicit is not None:
+ showIndent(outfile, level)
+ outfile.write('explicit = "%s",\n' % (self.explicit,))
+ if self.new is not None:
+ showIndent(outfile, level)
+ outfile.write('new = "%s",\n' % (self.new,))
+ if self.final is not None:
+ showIndent(outfile, level)
+ outfile.write('final = "%s",\n' % (self.final,))
+ if self.writable is not None:
+ showIndent(outfile, level)
+ outfile.write('writable = "%s",\n' % (self.writable,))
+ if self.add is not None:
+ showIndent(outfile, level)
+ outfile.write('add = "%s",\n' % (self.add,))
+ if self.static is not None:
+ showIndent(outfile, level)
+ outfile.write('static = "%s",\n' % (self.static,))
+ if self.remove is not None:
+ showIndent(outfile, level)
+ outfile.write('remove = "%s",\n' % (self.remove,))
+ if self.sealed is not None:
+ showIndent(outfile, level)
+ outfile.write('sealed = "%s",\n' % (self.sealed,))
+ if self.mutable is not None:
+ showIndent(outfile, level)
+ outfile.write('mutable = "%s",\n' % (self.mutable,))
+ if self.gettable is not None:
+ showIndent(outfile, level)
+ outfile.write('gettable = "%s",\n' % (self.gettable,))
+ if self.inline is not None:
+ showIndent(outfile, level)
+ outfile.write('inline = "%s",\n' % (self.inline,))
+ if self.settable is not None:
+ showIndent(outfile, level)
+ outfile.write('settable = "%s",\n' % (self.settable,))
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.templateparamlist:
+ showIndent(outfile, level)
+ outfile.write('templateparamlist=model_.templateparamlistType(\n')
+ self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.type_:
+ showIndent(outfile, level)
+ outfile.write('type_=model_.linkedTextType(\n')
+ self.type_.exportLiteral(outfile, level, name_='type')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('reimplements=[\n')
+ level += 1
+ for reimplements in self.reimplements:
+ showIndent(outfile, level)
+ outfile.write('model_.reimplements(\n')
+ reimplements.exportLiteral(outfile, level, name_='reimplements')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('reimplementedby=[\n')
+ level += 1
+ for reimplementedby in self.reimplementedby:
+ showIndent(outfile, level)
+ outfile.write('model_.reimplementedby(\n')
+ reimplementedby.exportLiteral(outfile, level, name_='reimplementedby')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('param=[\n')
+ level += 1
+ for param in self.param:
+ showIndent(outfile, level)
+ outfile.write('model_.param(\n')
+ param.exportLiteral(outfile, level, name_='param')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('enumvalue=[\n')
+ level += 1
+ for enumvalue in self.enumvalue:
+ showIndent(outfile, level)
+ outfile.write('model_.enumvalue(\n')
+ enumvalue.exportLiteral(outfile, level, name_='enumvalue')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.initializer:
+ showIndent(outfile, level)
+ outfile.write('initializer=model_.linkedTextType(\n')
+ self.initializer.exportLiteral(outfile, level, name_='initializer')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.exceptions:
+ showIndent(outfile, level)
+ outfile.write('exceptions=model_.linkedTextType(\n')
+ self.exceptions.exportLiteral(outfile, level, name_='exceptions')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.briefdescription:
+ showIndent(outfile, level)
+ outfile.write('briefdescription=model_.descriptionType(\n')
+ self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.detaileddescription:
+ showIndent(outfile, level)
+ outfile.write('detaileddescription=model_.descriptionType(\n')
+ self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.inbodydescription:
+ showIndent(outfile, level)
+ outfile.write('inbodydescription=model_.descriptionType(\n')
+ self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.location:
+ showIndent(outfile, level)
+ outfile.write('location=model_.locationType(\n')
+ self.location.exportLiteral(outfile, level, name_='location')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('references=[\n')
+ level += 1
+ for references in self.references:
+ showIndent(outfile, level)
+ outfile.write('model_.references(\n')
+ references.exportLiteral(outfile, level, name_='references')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('referencedby=[\n')
+ level += 1
+ for referencedby in self.referencedby:
+ showIndent(outfile, level)
+ outfile.write('model_.referencedby(\n')
+ referencedby.exportLiteral(outfile, level, name_='referencedby')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('initonly'):
+ self.initonly = attrs.get('initonly').value
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ if attrs.get('volatile'):
+ self.volatile = attrs.get('volatile').value
+ if attrs.get('const'):
+ self.const = attrs.get('const').value
+ if attrs.get('raise'):
+ self.raisexx = attrs.get('raise').value
+ if attrs.get('virt'):
+ self.virt = attrs.get('virt').value
+ if attrs.get('readable'):
+ self.readable = attrs.get('readable').value
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('explicit'):
+ self.explicit = attrs.get('explicit').value
+ if attrs.get('new'):
+ self.new = attrs.get('new').value
+ if attrs.get('final'):
+ self.final = attrs.get('final').value
+ if attrs.get('writable'):
+ self.writable = attrs.get('writable').value
+ if attrs.get('add'):
+ self.add = attrs.get('add').value
+ if attrs.get('static'):
+ self.static = attrs.get('static').value
+ if attrs.get('remove'):
+ self.remove = attrs.get('remove').value
+ if attrs.get('sealed'):
+ self.sealed = attrs.get('sealed').value
+ if attrs.get('mutable'):
+ self.mutable = attrs.get('mutable').value
+ if attrs.get('gettable'):
+ self.gettable = attrs.get('gettable').value
+ if attrs.get('inline'):
+ self.inline = attrs.get('inline').value
+ if attrs.get('settable'):
+ self.settable = attrs.get('settable').value
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'templateparamlist':
+ obj_ = templateparamlistType.factory()
+ obj_.build(child_)
+ self.set_templateparamlist(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'type':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_type(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'definition':
+ definition_ = ''
+ for text__content_ in child_.childNodes:
+ definition_ += text__content_.nodeValue
+ self.definition = definition_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'argsstring':
+ argsstring_ = ''
+ for text__content_ in child_.childNodes:
+ argsstring_ += text__content_.nodeValue
+ self.argsstring = argsstring_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ name_ = ''
+ for text__content_ in child_.childNodes:
+ name_ += text__content_.nodeValue
+ self.name = name_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'read':
+ read_ = ''
+ for text__content_ in child_.childNodes:
+ read_ += text__content_.nodeValue
+ self.read = read_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'write':
+ write_ = ''
+ for text__content_ in child_.childNodes:
+ write_ += text__content_.nodeValue
+ self.write = write_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'bitfield':
+ bitfield_ = ''
+ for text__content_ in child_.childNodes:
+ bitfield_ += text__content_.nodeValue
+ self.bitfield = bitfield_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'reimplements':
+ obj_ = reimplementType.factory()
+ obj_.build(child_)
+ self.reimplements.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'reimplementedby':
+ obj_ = reimplementType.factory()
+ obj_.build(child_)
+ self.reimplementedby.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'param':
+ obj_ = paramType.factory()
+ obj_.build(child_)
+ self.param.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'enumvalue':
+ obj_ = enumvalueType.factory()
+ obj_.build(child_)
+ self.enumvalue.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'initializer':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_initializer(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'exceptions':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_exceptions(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'briefdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_briefdescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'detaileddescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_detaileddescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'inbodydescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_inbodydescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'location':
+ obj_ = locationType.factory()
+ obj_.build(child_)
+ self.set_location(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'references':
+ obj_ = referenceType.factory()
+ obj_.build(child_)
+ self.references.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'referencedby':
+ obj_ = referenceType.factory()
+ obj_.build(child_)
+ self.referencedby.append(obj_)
+# end class memberdefType
+
+
+class definition(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if definition.subclass:
+ return definition.subclass(*args_, **kwargs_)
+ else:
+ return definition(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='definition')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='definition'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='definition'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='definition'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class definition
+
+
+class argsstring(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if argsstring.subclass:
+ return argsstring.subclass(*args_, **kwargs_)
+ else:
+ return argsstring(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='argsstring')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='argsstring'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='argsstring'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class argsstring
+
+
+class read(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if read.subclass:
+ return read.subclass(*args_, **kwargs_)
+ else:
+ return read(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='read')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='read'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='read'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='read'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class read
+
+
+class write(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if write.subclass:
+ return write.subclass(*args_, **kwargs_)
+ else:
+ return write(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='write')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='write'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='write'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='write'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class write
+
+
+class bitfield(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if bitfield.subclass:
+ return bitfield.subclass(*args_, **kwargs_)
+ else:
+ return bitfield(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='bitfield')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='bitfield'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='bitfield'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class bitfield
+
+
+class descriptionType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if descriptionType.subclass:
+ return descriptionType.subclass(*args_, **kwargs_)
+ else:
+ return descriptionType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect1(self): return self.sect1
+ def set_sect1(self, sect1): self.sect1 = sect1
+ def add_sect1(self, value): self.sect1.append(value)
+ def insert_sect1(self, index, value): self.sect1[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='descriptionType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.sect1 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='descriptionType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect1':
+ childobj_ = docSect1Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect1', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class descriptionType
+
+
+class enumvalueType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
+ self.prot = prot
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if enumvalueType.subclass:
+ return enumvalueType.subclass(*args_, **kwargs_)
+ else:
+ return enumvalueType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_initializer(self): return self.initializer
+ def set_initializer(self, initializer): self.initializer = initializer
+ def get_briefdescription(self): return self.briefdescription
+ def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def get_detaileddescription(self): return self.detaileddescription
+ def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='enumvalueType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'):
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.name is not None or
+ self.initializer is not None or
+ self.briefdescription is not None or
+ self.detaileddescription is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='enumvalueType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ value_ = []
+ for text_ in child_.childNodes:
+ value_.append(text_.nodeValue)
+ valuestr_ = ''.join(value_)
+ obj_ = self.mixedclass_(MixedContainer.CategorySimple,
+ MixedContainer.TypeString, 'name', valuestr_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'initializer':
+ childobj_ = linkedTextType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'initializer', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'briefdescription':
+ childobj_ = descriptionType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'briefdescription', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'detaileddescription':
+ childobj_ = descriptionType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'detaileddescription', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class enumvalueType
+
+
+class templateparamlistType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, param=None):
+ if param is None:
+ self.param = []
+ else:
+ self.param = param
+ def factory(*args_, **kwargs_):
+ if templateparamlistType.subclass:
+ return templateparamlistType.subclass(*args_, **kwargs_)
+ else:
+ return templateparamlistType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_param(self): return self.param
+ def set_param(self, param): self.param = param
+ def add_param(self, value): self.param.append(value)
+ def insert_param(self, index, value): self.param[index] = value
+ def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'):
+ for param_ in self.param:
+ param_.export(outfile, level, namespace_, name_='param')
+ def hasContent_(self):
+ if (
+ self.param is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='templateparamlistType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('param=[\n')
+ level += 1
+ for param in self.param:
+ showIndent(outfile, level)
+ outfile.write('model_.param(\n')
+ param.exportLiteral(outfile, level, name_='param')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'param':
+ obj_ = paramType.factory()
+ obj_.build(child_)
+ self.param.append(obj_)
+# end class templateparamlistType
+
+
+class paramType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None):
+ self.type_ = type_
+ self.declname = declname
+ self.defname = defname
+ self.array = array
+ self.defval = defval
+ self.briefdescription = briefdescription
+ def factory(*args_, **kwargs_):
+ if paramType.subclass:
+ return paramType.subclass(*args_, **kwargs_)
+ else:
+ return paramType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_type(self): return self.type_
+ def set_type(self, type_): self.type_ = type_
+ def get_declname(self): return self.declname
+ def set_declname(self, declname): self.declname = declname
+ def get_defname(self): return self.defname
+ def set_defname(self, defname): self.defname = defname
+ def get_array(self): return self.array
+ def set_array(self, array): self.array = array
+ def get_defval(self): return self.defval
+ def set_defval(self, defval): self.defval = defval
+ def get_briefdescription(self): return self.briefdescription
+ def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='paramType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='paramType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='paramType'):
+ if self.type_:
+ self.type_.export(outfile, level, namespace_, name_='type')
+ if self.declname is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sdeclname>%s</%sdeclname>\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_))
+ if self.defname is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sdefname>%s</%sdefname>\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_))
+ if self.array is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sarray>%s</%sarray>\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_))
+ if self.defval:
+ self.defval.export(outfile, level, namespace_, name_='defval')
+ if self.briefdescription:
+ self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ def hasContent_(self):
+ if (
+ self.type_ is not None or
+ self.declname is not None or
+ self.defname is not None or
+ self.array is not None or
+ self.defval is not None or
+ self.briefdescription is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='paramType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.type_:
+ showIndent(outfile, level)
+ outfile.write('type_=model_.linkedTextType(\n')
+ self.type_.exportLiteral(outfile, level, name_='type')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding))
+ if self.defval:
+ showIndent(outfile, level)
+ outfile.write('defval=model_.linkedTextType(\n')
+ self.defval.exportLiteral(outfile, level, name_='defval')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.briefdescription:
+ showIndent(outfile, level)
+ outfile.write('briefdescription=model_.descriptionType(\n')
+ self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'type':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_type(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'declname':
+ declname_ = ''
+ for text__content_ in child_.childNodes:
+ declname_ += text__content_.nodeValue
+ self.declname = declname_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'defname':
+ defname_ = ''
+ for text__content_ in child_.childNodes:
+ defname_ += text__content_.nodeValue
+ self.defname = defname_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'array':
+ array_ = ''
+ for text__content_ in child_.childNodes:
+ array_ += text__content_.nodeValue
+ self.array = array_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'defval':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_defval(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'briefdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_briefdescription(obj_)
+# end class paramType
+
+
+class declname(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if declname.subclass:
+ return declname.subclass(*args_, **kwargs_)
+ else:
+ return declname(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='declname')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='declname'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='declname'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='declname'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class declname
+
+
+class defname(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if defname.subclass:
+ return defname.subclass(*args_, **kwargs_)
+ else:
+ return defname(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='defname')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='defname'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='defname'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='defname'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class defname
+
+
+class array(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if array.subclass:
+ return array.subclass(*args_, **kwargs_)
+ else:
+ return array(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='array')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='array'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='array'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='array'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class array
+
+
+class linkedTextType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, ref=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if linkedTextType.subclass:
+ return linkedTextType.subclass(*args_, **kwargs_)
+ else:
+ return linkedTextType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_ref(self): return self.ref
+ def set_ref(self, ref): self.ref = ref
+ def add_ref(self, value): self.ref.append(value)
+ def insert_ref(self, index, value): self.ref[index] = value
+ def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='linkedTextType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.ref is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='linkedTextType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'ref':
+ childobj_ = docRefTextType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'ref', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class linkedTextType
+
+
+class graphType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, node=None):
+ if node is None:
+ self.node = []
+ else:
+ self.node = node
+ def factory(*args_, **kwargs_):
+ if graphType.subclass:
+ return graphType.subclass(*args_, **kwargs_)
+ else:
+ return graphType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_node(self): return self.node
+ def set_node(self, node): self.node = node
+ def add_node(self, value): self.node.append(value)
+ def insert_node(self, index, value): self.node[index] = value
+ def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='graphType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='graphType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='graphType'):
+ for node_ in self.node:
+ node_.export(outfile, level, namespace_, name_='node')
+ def hasContent_(self):
+ if (
+ self.node is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='graphType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('node=[\n')
+ level += 1
+ for node in self.node:
+ showIndent(outfile, level)
+ outfile.write('model_.node(\n')
+ node.exportLiteral(outfile, level, name_='node')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'node':
+ obj_ = nodeType.factory()
+ obj_.build(child_)
+ self.node.append(obj_)
+# end class graphType
+
+
+class nodeType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, label=None, link=None, childnode=None):
+ self.id = id
+ self.label = label
+ self.link = link
+ if childnode is None:
+ self.childnode = []
+ else:
+ self.childnode = childnode
+ def factory(*args_, **kwargs_):
+ if nodeType.subclass:
+ return nodeType.subclass(*args_, **kwargs_)
+ else:
+ return nodeType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_label(self): return self.label
+ def set_label(self, label): self.label = label
+ def get_link(self): return self.link
+ def set_link(self, link): self.link = link
+ def get_childnode(self): return self.childnode
+ def set_childnode(self, childnode): self.childnode = childnode
+ def add_childnode(self, value): self.childnode.append(value)
+ def insert_childnode(self, index, value): self.childnode[index] = value
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='nodeType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='nodeType'):
+ if self.label is not None:
+ showIndent(outfile, level)
+ outfile.write('<%slabel>%s</%slabel>\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_))
+ if self.link:
+ self.link.export(outfile, level, namespace_, name_='link')
+ for childnode_ in self.childnode:
+ childnode_.export(outfile, level, namespace_, name_='childnode')
+ def hasContent_(self):
+ if (
+ self.label is not None or
+ self.link is not None or
+ self.childnode is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='nodeType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding))
+ if self.link:
+ showIndent(outfile, level)
+ outfile.write('link=model_.linkType(\n')
+ self.link.exportLiteral(outfile, level, name_='link')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('childnode=[\n')
+ level += 1
+ for childnode in self.childnode:
+ showIndent(outfile, level)
+ outfile.write('model_.childnode(\n')
+ childnode.exportLiteral(outfile, level, name_='childnode')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'label':
+ label_ = ''
+ for text__content_ in child_.childNodes:
+ label_ += text__content_.nodeValue
+ self.label = label_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'link':
+ obj_ = linkType.factory()
+ obj_.build(child_)
+ self.set_link(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'childnode':
+ obj_ = childnodeType.factory()
+ obj_.build(child_)
+ self.childnode.append(obj_)
+# end class nodeType
+
+
+class label(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if label.subclass:
+ return label.subclass(*args_, **kwargs_)
+ else:
+ return label(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='label')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='label'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='label'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='label'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class label
+
+
+class childnodeType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, relation=None, refid=None, edgelabel=None):
+ self.relation = relation
+ self.refid = refid
+ if edgelabel is None:
+ self.edgelabel = []
+ else:
+ self.edgelabel = edgelabel
+ def factory(*args_, **kwargs_):
+ if childnodeType.subclass:
+ return childnodeType.subclass(*args_, **kwargs_)
+ else:
+ return childnodeType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_edgelabel(self): return self.edgelabel
+ def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel
+ def add_edgelabel(self, value): self.edgelabel.append(value)
+ def insert_edgelabel(self, index, value): self.edgelabel[index] = value
+ def get_relation(self): return self.relation
+ def set_relation(self, relation): self.relation = relation
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='childnodeType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'):
+ if self.relation is not None:
+ outfile.write(' relation=%s' % (quote_attrib(self.relation), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'):
+ for edgelabel_ in self.edgelabel:
+ showIndent(outfile, level)
+ outfile.write('<%sedgelabel>%s</%sedgelabel>\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_))
+ def hasContent_(self):
+ if (
+ self.edgelabel is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='childnodeType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.relation is not None:
+ showIndent(outfile, level)
+ outfile.write('relation = "%s",\n' % (self.relation,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('edgelabel=[\n')
+ level += 1
+ for edgelabel in self.edgelabel:
+ showIndent(outfile, level)
+ outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding))
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('relation'):
+ self.relation = attrs.get('relation').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'edgelabel':
+ edgelabel_ = ''
+ for text__content_ in child_.childNodes:
+ edgelabel_ += text__content_.nodeValue
+ self.edgelabel.append(edgelabel_)
+# end class childnodeType
+
+
+class edgelabel(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if edgelabel.subclass:
+ return edgelabel.subclass(*args_, **kwargs_)
+ else:
+ return edgelabel(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='edgelabel')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='edgelabel'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class edgelabel
+
+
+class linkType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, refid=None, external=None, valueOf_=''):
+ self.refid = refid
+ self.external = external
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if linkType.subclass:
+ return linkType.subclass(*args_, **kwargs_)
+ else:
+ return linkType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_external(self): return self.external
+ def set_external(self, external): self.external = external
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='linkType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='linkType'):
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.external is not None:
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='linkType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='linkType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.external is not None:
+ showIndent(outfile, level)
+ outfile.write('external = %s,\n' % (self.external,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('external'):
+ self.external = attrs.get('external').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class linkType
+
+
+class listingType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, codeline=None):
+ if codeline is None:
+ self.codeline = []
+ else:
+ self.codeline = codeline
+ def factory(*args_, **kwargs_):
+ if listingType.subclass:
+ return listingType.subclass(*args_, **kwargs_)
+ else:
+ return listingType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_codeline(self): return self.codeline
+ def set_codeline(self, codeline): self.codeline = codeline
+ def add_codeline(self, value): self.codeline.append(value)
+ def insert_codeline(self, index, value): self.codeline[index] = value
+ def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='listingType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='listingType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='listingType'):
+ for codeline_ in self.codeline:
+ codeline_.export(outfile, level, namespace_, name_='codeline')
+ def hasContent_(self):
+ if (
+ self.codeline is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='listingType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('codeline=[\n')
+ level += 1
+ for codeline in self.codeline:
+ showIndent(outfile, level)
+ outfile.write('model_.codeline(\n')
+ codeline.exportLiteral(outfile, level, name_='codeline')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'codeline':
+ obj_ = codelineType.factory()
+ obj_.build(child_)
+ self.codeline.append(obj_)
+# end class listingType
+
+
+class codelineType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
+ self.external = external
+ self.lineno = lineno
+ self.refkind = refkind
+ self.refid = refid
+ if highlight is None:
+ self.highlight = []
+ else:
+ self.highlight = highlight
+ def factory(*args_, **kwargs_):
+ if codelineType.subclass:
+ return codelineType.subclass(*args_, **kwargs_)
+ else:
+ return codelineType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_highlight(self): return self.highlight
+ def set_highlight(self, highlight): self.highlight = highlight
+ def add_highlight(self, value): self.highlight.append(value)
+ def insert_highlight(self, index, value): self.highlight[index] = value
+ def get_external(self): return self.external
+ def set_external(self, external): self.external = external
+ def get_lineno(self): return self.lineno
+ def set_lineno(self, lineno): self.lineno = lineno
+ def get_refkind(self): return self.refkind
+ def set_refkind(self, refkind): self.refkind = refkind
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='codelineType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'):
+ if self.external is not None:
+ outfile.write(' external=%s' % (quote_attrib(self.external), ))
+ if self.lineno is not None:
+ outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno'))
+ if self.refkind is not None:
+ outfile.write(' refkind=%s' % (quote_attrib(self.refkind), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='codelineType'):
+ for highlight_ in self.highlight:
+ highlight_.export(outfile, level, namespace_, name_='highlight')
+ def hasContent_(self):
+ if (
+ self.highlight is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='codelineType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.external is not None:
+ showIndent(outfile, level)
+ outfile.write('external = "%s",\n' % (self.external,))
+ if self.lineno is not None:
+ showIndent(outfile, level)
+ outfile.write('lineno = %s,\n' % (self.lineno,))
+ if self.refkind is not None:
+ showIndent(outfile, level)
+ outfile.write('refkind = "%s",\n' % (self.refkind,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('highlight=[\n')
+ level += 1
+ for highlight in self.highlight:
+ showIndent(outfile, level)
+ outfile.write('model_.highlight(\n')
+ highlight.exportLiteral(outfile, level, name_='highlight')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('external'):
+ self.external = attrs.get('external').value
+ if attrs.get('lineno'):
+ try:
+ self.lineno = int(attrs.get('lineno').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (lineno): %s' % exp)
+ if attrs.get('refkind'):
+ self.refkind = attrs.get('refkind').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'highlight':
+ obj_ = highlightType.factory()
+ obj_.build(child_)
+ self.highlight.append(obj_)
+# end class codelineType
+
+
+class highlightType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None):
+ self.classxx = classxx
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if highlightType.subclass:
+ return highlightType.subclass(*args_, **kwargs_)
+ else:
+ return highlightType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_sp(self): return self.sp
+ def set_sp(self, sp): self.sp = sp
+ def add_sp(self, value): self.sp.append(value)
+ def insert_sp(self, index, value): self.sp[index] = value
+ def get_ref(self): return self.ref
+ def set_ref(self, ref): self.ref = ref
+ def add_ref(self, value): self.ref.append(value)
+ def insert_ref(self, index, value): self.ref[index] = value
+ def get_class(self): return self.classxx
+ def set_class(self, classxx): self.classxx = classxx
+ def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='highlightType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'):
+ if self.classxx is not None:
+ outfile.write(' class=%s' % (quote_attrib(self.classxx), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='highlightType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.sp is not None or
+ self.ref is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='highlightType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.classxx is not None:
+ showIndent(outfile, level)
+ outfile.write('classxx = "%s",\n' % (self.classxx,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('class'):
+ self.classxx = attrs.get('class').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sp':
+ value_ = []
+ for text_ in child_.childNodes:
+ value_.append(text_.nodeValue)
+ valuestr_ = ''.join(value_)
+ obj_ = self.mixedclass_(MixedContainer.CategorySimple,
+ MixedContainer.TypeString, 'sp', valuestr_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'ref':
+ childobj_ = docRefTextType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'ref', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class highlightType
+
+
+class sp(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if sp.subclass:
+ return sp.subclass(*args_, **kwargs_)
+ else:
+ return sp(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='sp')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='sp'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='sp'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='sp'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class sp
+
+
+class referenceType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
+ self.endline = endline
+ self.startline = startline
+ self.refid = refid
+ self.compoundref = compoundref
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if referenceType.subclass:
+ return referenceType.subclass(*args_, **kwargs_)
+ else:
+ return referenceType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_endline(self): return self.endline
+ def set_endline(self, endline): self.endline = endline
+ def get_startline(self): return self.startline
+ def set_startline(self, startline): self.startline = startline
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_compoundref(self): return self.compoundref
+ def set_compoundref(self, compoundref): self.compoundref = compoundref
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='referenceType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'):
+ if self.endline is not None:
+ outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline'))
+ if self.startline is not None:
+ outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline'))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.compoundref is not None:
+ outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='referenceType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='referenceType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.endline is not None:
+ showIndent(outfile, level)
+ outfile.write('endline = %s,\n' % (self.endline,))
+ if self.startline is not None:
+ showIndent(outfile, level)
+ outfile.write('startline = %s,\n' % (self.startline,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.compoundref is not None:
+ showIndent(outfile, level)
+ outfile.write('compoundref = %s,\n' % (self.compoundref,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('endline'):
+ try:
+ self.endline = int(attrs.get('endline').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (endline): %s' % exp)
+ if attrs.get('startline'):
+ try:
+ self.startline = int(attrs.get('startline').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (startline): %s' % exp)
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('compoundref'):
+ self.compoundref = attrs.get('compoundref').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class referenceType
+
+
+class locationType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
+ self.bodystart = bodystart
+ self.line = line
+ self.bodyend = bodyend
+ self.bodyfile = bodyfile
+ self.file = file
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if locationType.subclass:
+ return locationType.subclass(*args_, **kwargs_)
+ else:
+ return locationType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_bodystart(self): return self.bodystart
+ def set_bodystart(self, bodystart): self.bodystart = bodystart
+ def get_line(self): return self.line
+ def set_line(self, line): self.line = line
+ def get_bodyend(self): return self.bodyend
+ def set_bodyend(self, bodyend): self.bodyend = bodyend
+ def get_bodyfile(self): return self.bodyfile
+ def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile
+ def get_file(self): return self.file
+ def set_file(self, file): self.file = file
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='locationType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='locationType'):
+ if self.bodystart is not None:
+ outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart'))
+ if self.line is not None:
+ outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line'))
+ if self.bodyend is not None:
+ outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend'))
+ if self.bodyfile is not None:
+ outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), ))
+ if self.file is not None:
+ outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='locationType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='locationType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.bodystart is not None:
+ showIndent(outfile, level)
+ outfile.write('bodystart = %s,\n' % (self.bodystart,))
+ if self.line is not None:
+ showIndent(outfile, level)
+ outfile.write('line = %s,\n' % (self.line,))
+ if self.bodyend is not None:
+ showIndent(outfile, level)
+ outfile.write('bodyend = %s,\n' % (self.bodyend,))
+ if self.bodyfile is not None:
+ showIndent(outfile, level)
+ outfile.write('bodyfile = %s,\n' % (self.bodyfile,))
+ if self.file is not None:
+ showIndent(outfile, level)
+ outfile.write('file = %s,\n' % (self.file,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('bodystart'):
+ try:
+ self.bodystart = int(attrs.get('bodystart').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (bodystart): %s' % exp)
+ if attrs.get('line'):
+ try:
+ self.line = int(attrs.get('line').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (line): %s' % exp)
+ if attrs.get('bodyend'):
+ try:
+ self.bodyend = int(attrs.get('bodyend').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (bodyend): %s' % exp)
+ if attrs.get('bodyfile'):
+ self.bodyfile = attrs.get('bodyfile').value
+ if attrs.get('file'):
+ self.file = attrs.get('file').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class locationType
+
+
+class docSect1Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docSect1Type.subclass:
+ return docSect1Type.subclass(*args_, **kwargs_)
+ else:
+ return docSect1Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect2(self): return self.sect2
+ def set_sect2(self, sect2): self.sect2 = sect2
+ def add_sect2(self, value): self.sect2.append(value)
+ def insert_sect2(self, index, value): self.sect2[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSect1Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.sect2 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSect1Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect2':
+ childobj_ = docSect2Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect2', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalS1Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docSect1Type
+
+
+class docSect2Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docSect2Type.subclass:
+ return docSect2Type.subclass(*args_, **kwargs_)
+ else:
+ return docSect2Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect3(self): return self.sect3
+ def set_sect3(self, sect3): self.sect3 = sect3
+ def add_sect3(self, value): self.sect3.append(value)
+ def insert_sect3(self, index, value): self.sect3[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSect2Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.sect3 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSect2Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect3':
+ childobj_ = docSect3Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect3', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalS2Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docSect2Type
+
+
+class docSect3Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docSect3Type.subclass:
+ return docSect3Type.subclass(*args_, **kwargs_)
+ else:
+ return docSect3Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect4(self): return self.sect4
+ def set_sect4(self, sect4): self.sect4 = sect4
+ def add_sect4(self, value): self.sect4.append(value)
+ def insert_sect4(self, index, value): self.sect4[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSect3Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.sect4 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSect3Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect4':
+ childobj_ = docSect4Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect4', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalS3Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docSect3Type
+
+
+class docSect4Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docSect4Type.subclass:
+ return docSect4Type.subclass(*args_, **kwargs_)
+ else:
+ return docSect4Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSect4Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSect4Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalS4Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docSect4Type
+
+
+class docInternalType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalType.subclass:
+ return docInternalType.subclass(*args_, **kwargs_)
+ else:
+ return docInternalType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect1(self): return self.sect1
+ def set_sect1(self, sect1): self.sect1 = sect1
+ def add_sect1(self, value): self.sect1.append(value)
+ def insert_sect1(self, index, value): self.sect1[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect1 is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect1':
+ childobj_ = docSect1Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect1', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalType
+
+
+class docInternalS1Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalS1Type.subclass:
+ return docInternalS1Type.subclass(*args_, **kwargs_)
+ else:
+ return docInternalS1Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect2(self): return self.sect2
+ def set_sect2(self, sect2): self.sect2 = sect2
+ def add_sect2(self, value): self.sect2.append(value)
+ def insert_sect2(self, index, value): self.sect2[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect2 is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalS1Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect2':
+ childobj_ = docSect2Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect2', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalS1Type
+
+
+class docInternalS2Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalS2Type.subclass:
+ return docInternalS2Type.subclass(*args_, **kwargs_)
+ else:
+ return docInternalS2Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect3(self): return self.sect3
+ def set_sect3(self, sect3): self.sect3 = sect3
+ def add_sect3(self, value): self.sect3.append(value)
+ def insert_sect3(self, index, value): self.sect3[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect3 is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalS2Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect3':
+ childobj_ = docSect3Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect3', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalS2Type
+
+
+class docInternalS3Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalS3Type.subclass:
+ return docInternalS3Type.subclass(*args_, **kwargs_)
+ else:
+ return docInternalS3Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect3(self): return self.sect3
+ def set_sect3(self, sect3): self.sect3 = sect3
+ def add_sect3(self, value): self.sect3.append(value)
+ def insert_sect3(self, index, value): self.sect3[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect3 is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalS3Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect3':
+ childobj_ = docSect4Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect3', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalS3Type
+
+
+class docInternalS4Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalS4Type.subclass:
+ return docInternalS4Type.subclass(*args_, **kwargs_)
+ else:
+ return docInternalS4Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalS4Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalS4Type
+
+
+class docTitleType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docTitleType.subclass:
+ return docTitleType.subclass(*args_, **kwargs_)
+ else:
+ return docTitleType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docTitleType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docTitleType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docTitleType
+
+
+class docParaType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docParaType.subclass:
+ return docParaType.subclass(*args_, **kwargs_)
+ else:
+ return docParaType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParaType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docParaType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParaType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docParaType
+
+
+class docMarkupType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docMarkupType.subclass:
+ return docMarkupType.subclass(*args_, **kwargs_)
+ else:
+ return docMarkupType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docMarkupType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docMarkupType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docMarkupType
+
+
+class docURLLink(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
+ self.url = url
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docURLLink.subclass:
+ return docURLLink.subclass(*args_, **kwargs_)
+ else:
+ return docURLLink(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_url(self): return self.url
+ def set_url(self, url): self.url = url
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docURLLink')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'):
+ if self.url is not None:
+ outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docURLLink'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.url is not None:
+ showIndent(outfile, level)
+ outfile.write('url = %s,\n' % (self.url,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('url'):
+ self.url = attrs.get('url').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docURLLink
+
+
+class docAnchorType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docAnchorType.subclass:
+ return docAnchorType.subclass(*args_, **kwargs_)
+ else:
+ return docAnchorType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docAnchorType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docAnchorType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docAnchorType
+
+
+class docFormulaType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docFormulaType.subclass:
+ return docFormulaType.subclass(*args_, **kwargs_)
+ else:
+ return docFormulaType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docFormulaType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docFormulaType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docFormulaType
+
+
+class docIndexEntryType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, primaryie=None, secondaryie=None):
+ self.primaryie = primaryie
+ self.secondaryie = secondaryie
+ def factory(*args_, **kwargs_):
+ if docIndexEntryType.subclass:
+ return docIndexEntryType.subclass(*args_, **kwargs_)
+ else:
+ return docIndexEntryType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_primaryie(self): return self.primaryie
+ def set_primaryie(self, primaryie): self.primaryie = primaryie
+ def get_secondaryie(self): return self.secondaryie
+ def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie
+ def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'):
+ if self.primaryie is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sprimaryie>%s</%sprimaryie>\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_))
+ if self.secondaryie is not None:
+ showIndent(outfile, level)
+ outfile.write('<%ssecondaryie>%s</%ssecondaryie>\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_))
+ def hasContent_(self):
+ if (
+ self.primaryie is not None or
+ self.secondaryie is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docIndexEntryType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'primaryie':
+ primaryie_ = ''
+ for text__content_ in child_.childNodes:
+ primaryie_ += text__content_.nodeValue
+ self.primaryie = primaryie_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'secondaryie':
+ secondaryie_ = ''
+ for text__content_ in child_.childNodes:
+ secondaryie_ += text__content_.nodeValue
+ self.secondaryie = secondaryie_
+# end class docIndexEntryType
+
+
+class docListType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, listitem=None):
+ if listitem is None:
+ self.listitem = []
+ else:
+ self.listitem = listitem
+ def factory(*args_, **kwargs_):
+ if docListType.subclass:
+ return docListType.subclass(*args_, **kwargs_)
+ else:
+ return docListType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_listitem(self): return self.listitem
+ def set_listitem(self, listitem): self.listitem = listitem
+ def add_listitem(self, value): self.listitem.append(value)
+ def insert_listitem(self, index, value): self.listitem[index] = value
+ def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docListType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docListType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docListType'):
+ for listitem_ in self.listitem:
+ listitem_.export(outfile, level, namespace_, name_='listitem')
+ def hasContent_(self):
+ if (
+ self.listitem is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docListType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('listitem=[\n')
+ level += 1
+ for listitem in self.listitem:
+ showIndent(outfile, level)
+ outfile.write('model_.listitem(\n')
+ listitem.exportLiteral(outfile, level, name_='listitem')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'listitem':
+ obj_ = docListItemType.factory()
+ obj_.build(child_)
+ self.listitem.append(obj_)
+# end class docListType
+
+
+class docListItemType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None):
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ def factory(*args_, **kwargs_):
+ if docListItemType.subclass:
+ return docListItemType.subclass(*args_, **kwargs_)
+ else:
+ return docListItemType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docListItemType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'):
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ def hasContent_(self):
+ if (
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docListItemType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+# end class docListItemType
+
+
+class docSimpleSectType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, title=None, para=None):
+ self.kind = kind
+ self.title = title
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ def factory(*args_, **kwargs_):
+ if docSimpleSectType.subclass:
+ return docSimpleSectType.subclass(*args_, **kwargs_)
+ else:
+ return docSimpleSectType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'):
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'):
+ if self.title:
+ self.title.export(outfile, level, namespace_, name_='title')
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSimpleSectType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.title:
+ showIndent(outfile, level)
+ outfile.write('title=model_.docTitleType(\n')
+ self.title.exportLiteral(outfile, level, name_='title')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ obj_ = docTitleType.factory()
+ obj_.build(child_)
+ self.set_title(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+# end class docSimpleSectType
+
+
+class docVarListEntryType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, term=None):
+ self.term = term
+ def factory(*args_, **kwargs_):
+ if docVarListEntryType.subclass:
+ return docVarListEntryType.subclass(*args_, **kwargs_)
+ else:
+ return docVarListEntryType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_term(self): return self.term
+ def set_term(self, term): self.term = term
+ def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'):
+ if self.term:
+ self.term.export(outfile, level, namespace_, name_='term', )
+ def hasContent_(self):
+ if (
+ self.term is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docVarListEntryType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.term:
+ showIndent(outfile, level)
+ outfile.write('term=model_.docTitleType(\n')
+ self.term.exportLiteral(outfile, level, name_='term')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'term':
+ obj_ = docTitleType.factory()
+ obj_.build(child_)
+ self.set_term(obj_)
+# end class docVarListEntryType
+
+
+class docVariableListType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if docVariableListType.subclass:
+ return docVariableListType.subclass(*args_, **kwargs_)
+ else:
+ return docVariableListType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docVariableListType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docVariableListType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docVariableListType
+
+
+class docRefTextType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
+ self.refid = refid
+ self.kindref = kindref
+ self.external = external
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docRefTextType.subclass:
+ return docRefTextType.subclass(*args_, **kwargs_)
+ else:
+ return docRefTextType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_kindref(self): return self.kindref
+ def set_kindref(self, kindref): self.kindref = kindref
+ def get_external(self): return self.external
+ def set_external(self, external): self.external = external
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docRefTextType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'):
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.kindref is not None:
+ outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
+ if self.external is not None:
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docRefTextType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.kindref is not None:
+ showIndent(outfile, level)
+ outfile.write('kindref = "%s",\n' % (self.kindref,))
+ if self.external is not None:
+ showIndent(outfile, level)
+ outfile.write('external = %s,\n' % (self.external,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('kindref'):
+ self.kindref = attrs.get('kindref').value
+ if attrs.get('external'):
+ self.external = attrs.get('external').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docRefTextType
+
+
+class docTableType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, rows=None, cols=None, row=None, caption=None):
+ self.rows = rows
+ self.cols = cols
+ if row is None:
+ self.row = []
+ else:
+ self.row = row
+ self.caption = caption
+ def factory(*args_, **kwargs_):
+ if docTableType.subclass:
+ return docTableType.subclass(*args_, **kwargs_)
+ else:
+ return docTableType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_row(self): return self.row
+ def set_row(self, row): self.row = row
+ def add_row(self, value): self.row.append(value)
+ def insert_row(self, index, value): self.row[index] = value
+ def get_caption(self): return self.caption
+ def set_caption(self, caption): self.caption = caption
+ def get_rows(self): return self.rows
+ def set_rows(self, rows): self.rows = rows
+ def get_cols(self): return self.cols
+ def set_cols(self, cols): self.cols = cols
+ def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docTableType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'):
+ if self.rows is not None:
+ outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows'))
+ if self.cols is not None:
+ outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols'))
+ def exportChildren(self, outfile, level, namespace_='', name_='docTableType'):
+ for row_ in self.row:
+ row_.export(outfile, level, namespace_, name_='row')
+ if self.caption:
+ self.caption.export(outfile, level, namespace_, name_='caption')
+ def hasContent_(self):
+ if (
+ self.row is not None or
+ self.caption is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docTableType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.rows is not None:
+ showIndent(outfile, level)
+ outfile.write('rows = %s,\n' % (self.rows,))
+ if self.cols is not None:
+ showIndent(outfile, level)
+ outfile.write('cols = %s,\n' % (self.cols,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('row=[\n')
+ level += 1
+ for row in self.row:
+ showIndent(outfile, level)
+ outfile.write('model_.row(\n')
+ row.exportLiteral(outfile, level, name_='row')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.caption:
+ showIndent(outfile, level)
+ outfile.write('caption=model_.docCaptionType(\n')
+ self.caption.exportLiteral(outfile, level, name_='caption')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('rows'):
+ try:
+ self.rows = int(attrs.get('rows').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (rows): %s' % exp)
+ if attrs.get('cols'):
+ try:
+ self.cols = int(attrs.get('cols').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (cols): %s' % exp)
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'row':
+ obj_ = docRowType.factory()
+ obj_.build(child_)
+ self.row.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'caption':
+ obj_ = docCaptionType.factory()
+ obj_.build(child_)
+ self.set_caption(obj_)
+# end class docTableType
+
+
+class docRowType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, entry=None):
+ if entry is None:
+ self.entry = []
+ else:
+ self.entry = entry
+ def factory(*args_, **kwargs_):
+ if docRowType.subclass:
+ return docRowType.subclass(*args_, **kwargs_)
+ else:
+ return docRowType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_entry(self): return self.entry
+ def set_entry(self, entry): self.entry = entry
+ def add_entry(self, value): self.entry.append(value)
+ def insert_entry(self, index, value): self.entry[index] = value
+ def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docRowType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docRowType'):
+ for entry_ in self.entry:
+ entry_.export(outfile, level, namespace_, name_='entry')
+ def hasContent_(self):
+ if (
+ self.entry is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docRowType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('entry=[\n')
+ level += 1
+ for entry in self.entry:
+ showIndent(outfile, level)
+ outfile.write('model_.entry(\n')
+ entry.exportLiteral(outfile, level, name_='entry')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'entry':
+ obj_ = docEntryType.factory()
+ obj_.build(child_)
+ self.entry.append(obj_)
+# end class docRowType
+
+
+class docEntryType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, thead=None, para=None):
+ self.thead = thead
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ def factory(*args_, **kwargs_):
+ if docEntryType.subclass:
+ return docEntryType.subclass(*args_, **kwargs_)
+ else:
+ return docEntryType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_thead(self): return self.thead
+ def set_thead(self, thead): self.thead = thead
+ def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docEntryType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'):
+ if self.thead is not None:
+ outfile.write(' thead=%s' % (quote_attrib(self.thead), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'):
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ def hasContent_(self):
+ if (
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docEntryType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.thead is not None:
+ showIndent(outfile, level)
+ outfile.write('thead = "%s",\n' % (self.thead,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('thead'):
+ self.thead = attrs.get('thead').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+# end class docEntryType
+
+
+class docCaptionType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docCaptionType.subclass:
+ return docCaptionType.subclass(*args_, **kwargs_)
+ else:
+ return docCaptionType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docCaptionType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docCaptionType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docCaptionType
+
+
+class docHeadingType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
+ self.level = level
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docHeadingType.subclass:
+ return docHeadingType.subclass(*args_, **kwargs_)
+ else:
+ return docHeadingType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_level(self): return self.level
+ def set_level(self, level): self.level = level
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docHeadingType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'):
+ if self.level is not None:
+ outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level'))
+ def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docHeadingType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.level is not None:
+ showIndent(outfile, level)
+ outfile.write('level = %s,\n' % (self.level,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('level'):
+ try:
+ self.level = int(attrs.get('level').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (level): %s' % exp)
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docHeadingType
+
+
+class docImageType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
+ self.width = width
+ self.type_ = type_
+ self.name = name
+ self.height = height
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docImageType.subclass:
+ return docImageType.subclass(*args_, **kwargs_)
+ else:
+ return docImageType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_width(self): return self.width
+ def set_width(self, width): self.width = width
+ def get_type(self): return self.type_
+ def set_type(self, type_): self.type_ = type_
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_height(self): return self.height
+ def set_height(self, height): self.height = height
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docImageType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'):
+ if self.width is not None:
+ outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), ))
+ if self.type_ is not None:
+ outfile.write(' type=%s' % (quote_attrib(self.type_), ))
+ if self.name is not None:
+ outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
+ if self.height is not None:
+ outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docImageType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docImageType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.width is not None:
+ showIndent(outfile, level)
+ outfile.write('width = %s,\n' % (self.width,))
+ if self.type_ is not None:
+ showIndent(outfile, level)
+ outfile.write('type_ = "%s",\n' % (self.type_,))
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('name = %s,\n' % (self.name,))
+ if self.height is not None:
+ showIndent(outfile, level)
+ outfile.write('height = %s,\n' % (self.height,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('width'):
+ self.width = attrs.get('width').value
+ if attrs.get('type'):
+ self.type_ = attrs.get('type').value
+ if attrs.get('name'):
+ self.name = attrs.get('name').value
+ if attrs.get('height'):
+ self.height = attrs.get('height').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docImageType
+
+
+class docDotFileType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
+ self.name = name
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docDotFileType.subclass:
+ return docDotFileType.subclass(*args_, **kwargs_)
+ else:
+ return docDotFileType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docDotFileType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'):
+ if self.name is not None:
+ outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docDotFileType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('name = %s,\n' % (self.name,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('name'):
+ self.name = attrs.get('name').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docDotFileType
+
+
+class docTocItemType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docTocItemType.subclass:
+ return docTocItemType.subclass(*args_, **kwargs_)
+ else:
+ return docTocItemType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docTocItemType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docTocItemType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docTocItemType
+
+
+class docTocListType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, tocitem=None):
+ if tocitem is None:
+ self.tocitem = []
+ else:
+ self.tocitem = tocitem
+ def factory(*args_, **kwargs_):
+ if docTocListType.subclass:
+ return docTocListType.subclass(*args_, **kwargs_)
+ else:
+ return docTocListType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_tocitem(self): return self.tocitem
+ def set_tocitem(self, tocitem): self.tocitem = tocitem
+ def add_tocitem(self, value): self.tocitem.append(value)
+ def insert_tocitem(self, index, value): self.tocitem[index] = value
+ def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docTocListType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'):
+ for tocitem_ in self.tocitem:
+ tocitem_.export(outfile, level, namespace_, name_='tocitem')
+ def hasContent_(self):
+ if (
+ self.tocitem is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docTocListType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('tocitem=[\n')
+ level += 1
+ for tocitem in self.tocitem:
+ showIndent(outfile, level)
+ outfile.write('model_.tocitem(\n')
+ tocitem.exportLiteral(outfile, level, name_='tocitem')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'tocitem':
+ obj_ = docTocItemType.factory()
+ obj_.build(child_)
+ self.tocitem.append(obj_)
+# end class docTocListType
+
+
+class docLanguageType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, langid=None, para=None):
+ self.langid = langid
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ def factory(*args_, **kwargs_):
+ if docLanguageType.subclass:
+ return docLanguageType.subclass(*args_, **kwargs_)
+ else:
+ return docLanguageType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_langid(self): return self.langid
+ def set_langid(self, langid): self.langid = langid
+ def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docLanguageType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'):
+ if self.langid is not None:
+ outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'):
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ def hasContent_(self):
+ if (
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docLanguageType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.langid is not None:
+ showIndent(outfile, level)
+ outfile.write('langid = %s,\n' % (self.langid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('langid'):
+ self.langid = attrs.get('langid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+# end class docLanguageType
+
+
+class docParamListType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, parameteritem=None):
+ self.kind = kind
+ if parameteritem is None:
+ self.parameteritem = []
+ else:
+ self.parameteritem = parameteritem
+ def factory(*args_, **kwargs_):
+ if docParamListType.subclass:
+ return docParamListType.subclass(*args_, **kwargs_)
+ else:
+ return docParamListType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_parameteritem(self): return self.parameteritem
+ def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem
+ def add_parameteritem(self, value): self.parameteritem.append(value)
+ def insert_parameteritem(self, index, value): self.parameteritem[index] = value
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParamListType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'):
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'):
+ for parameteritem_ in self.parameteritem:
+ parameteritem_.export(outfile, level, namespace_, name_='parameteritem')
+ def hasContent_(self):
+ if (
+ self.parameteritem is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParamListType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('parameteritem=[\n')
+ level += 1
+ for parameteritem in self.parameteritem:
+ showIndent(outfile, level)
+ outfile.write('model_.parameteritem(\n')
+ parameteritem.exportLiteral(outfile, level, name_='parameteritem')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parameteritem':
+ obj_ = docParamListItem.factory()
+ obj_.build(child_)
+ self.parameteritem.append(obj_)
+# end class docParamListType
+
+
+class docParamListItem(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, parameternamelist=None, parameterdescription=None):
+ if parameternamelist is None:
+ self.parameternamelist = []
+ else:
+ self.parameternamelist = parameternamelist
+ self.parameterdescription = parameterdescription
+ def factory(*args_, **kwargs_):
+ if docParamListItem.subclass:
+ return docParamListItem.subclass(*args_, **kwargs_)
+ else:
+ return docParamListItem(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_parameternamelist(self): return self.parameternamelist
+ def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist
+ def add_parameternamelist(self, value): self.parameternamelist.append(value)
+ def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value
+ def get_parameterdescription(self): return self.parameterdescription
+ def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription
+ def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParamListItem')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'):
+ for parameternamelist_ in self.parameternamelist:
+ parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist')
+ if self.parameterdescription:
+ self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', )
+ def hasContent_(self):
+ if (
+ self.parameternamelist is not None or
+ self.parameterdescription is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParamListItem'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('parameternamelist=[\n')
+ level += 1
+ for parameternamelist in self.parameternamelist:
+ showIndent(outfile, level)
+ outfile.write('model_.parameternamelist(\n')
+ parameternamelist.exportLiteral(outfile, level, name_='parameternamelist')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.parameterdescription:
+ showIndent(outfile, level)
+ outfile.write('parameterdescription=model_.descriptionType(\n')
+ self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parameternamelist':
+ obj_ = docParamNameList.factory()
+ obj_.build(child_)
+ self.parameternamelist.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parameterdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_parameterdescription(obj_)
+# end class docParamListItem
+
+
+class docParamNameList(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, parametername=None):
+ if parametername is None:
+ self.parametername = []
+ else:
+ self.parametername = parametername
+ def factory(*args_, **kwargs_):
+ if docParamNameList.subclass:
+ return docParamNameList.subclass(*args_, **kwargs_)
+ else:
+ return docParamNameList(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_parametername(self): return self.parametername
+ def set_parametername(self, parametername): self.parametername = parametername
+ def add_parametername(self, value): self.parametername.append(value)
+ def insert_parametername(self, index, value): self.parametername[index] = value
+ def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParamNameList')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'):
+ for parametername_ in self.parametername:
+ parametername_.export(outfile, level, namespace_, name_='parametername')
+ def hasContent_(self):
+ if (
+ self.parametername is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParamNameList'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('parametername=[\n')
+ level += 1
+ for parametername in self.parametername:
+ showIndent(outfile, level)
+ outfile.write('model_.parametername(\n')
+ parametername.exportLiteral(outfile, level, name_='parametername')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parametername':
+ obj_ = docParamName.factory()
+ obj_.build(child_)
+ self.parametername.append(obj_)
+# end class docParamNameList
+
+
+class docParamName(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
+ self.direction = direction
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docParamName.subclass:
+ return docParamName.subclass(*args_, **kwargs_)
+ else:
+ return docParamName(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_ref(self): return self.ref
+ def set_ref(self, ref): self.ref = ref
+ def get_direction(self): return self.direction
+ def set_direction(self, direction): self.direction = direction
+ def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParamName')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'):
+ if self.direction is not None:
+ outfile.write(' direction=%s' % (quote_attrib(self.direction), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docParamName'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.ref is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParamName'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.direction is not None:
+ showIndent(outfile, level)
+ outfile.write('direction = "%s",\n' % (self.direction,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('direction'):
+ self.direction = attrs.get('direction').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'ref':
+ childobj_ = docRefTextType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'ref', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docParamName
+
+
+class docXRefSectType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, xreftitle=None, xrefdescription=None):
+ self.id = id
+ if xreftitle is None:
+ self.xreftitle = []
+ else:
+ self.xreftitle = xreftitle
+ self.xrefdescription = xrefdescription
+ def factory(*args_, **kwargs_):
+ if docXRefSectType.subclass:
+ return docXRefSectType.subclass(*args_, **kwargs_)
+ else:
+ return docXRefSectType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_xreftitle(self): return self.xreftitle
+ def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle
+ def add_xreftitle(self, value): self.xreftitle.append(value)
+ def insert_xreftitle(self, index, value): self.xreftitle[index] = value
+ def get_xrefdescription(self): return self.xrefdescription
+ def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'):
+ for xreftitle_ in self.xreftitle:
+ showIndent(outfile, level)
+ outfile.write('<%sxreftitle>%s</%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_))
+ if self.xrefdescription:
+ self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', )
+ def hasContent_(self):
+ if (
+ self.xreftitle is not None or
+ self.xrefdescription is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docXRefSectType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('xreftitle=[\n')
+ level += 1
+ for xreftitle in self.xreftitle:
+ showIndent(outfile, level)
+ outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding))
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.xrefdescription:
+ showIndent(outfile, level)
+ outfile.write('xrefdescription=model_.descriptionType(\n')
+ self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'xreftitle':
+ xreftitle_ = ''
+ for text__content_ in child_.childNodes:
+ xreftitle_ += text__content_.nodeValue
+ self.xreftitle.append(xreftitle_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'xrefdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_xrefdescription(obj_)
+# end class docXRefSectType
+
+
+class docCopyType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, link=None, para=None, sect1=None, internal=None):
+ self.link = link
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ if sect1 is None:
+ self.sect1 = []
+ else:
+ self.sect1 = sect1
+ self.internal = internal
+ def factory(*args_, **kwargs_):
+ if docCopyType.subclass:
+ return docCopyType.subclass(*args_, **kwargs_)
+ else:
+ return docCopyType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect1(self): return self.sect1
+ def set_sect1(self, sect1): self.sect1 = sect1
+ def add_sect1(self, value): self.sect1.append(value)
+ def insert_sect1(self, index, value): self.sect1[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_link(self): return self.link
+ def set_link(self, link): self.link = link
+ def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docCopyType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'):
+ if self.link is not None:
+ outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'):
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ for sect1_ in self.sect1:
+ sect1_.export(outfile, level, namespace_, name_='sect1')
+ if self.internal:
+ self.internal.export(outfile, level, namespace_, name_='internal')
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect1 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docCopyType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.link is not None:
+ showIndent(outfile, level)
+ outfile.write('link = %s,\n' % (self.link,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('sect1=[\n')
+ level += 1
+ for sect1 in self.sect1:
+ showIndent(outfile, level)
+ outfile.write('model_.sect1(\n')
+ sect1.exportLiteral(outfile, level, name_='sect1')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.internal:
+ showIndent(outfile, level)
+ outfile.write('internal=model_.docInternalType(\n')
+ self.internal.exportLiteral(outfile, level, name_='internal')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('link'):
+ self.link = attrs.get('link').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect1':
+ obj_ = docSect1Type.factory()
+ obj_.build(child_)
+ self.sect1.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ obj_ = docInternalType.factory()
+ obj_.build(child_)
+ self.set_internal(obj_)
+# end class docCopyType
+
+
+class docCharType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, char=None, valueOf_=''):
+ self.char = char
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if docCharType.subclass:
+ return docCharType.subclass(*args_, **kwargs_)
+ else:
+ return docCharType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_char(self): return self.char
+ def set_char(self, char): self.char = char
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docCharType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'):
+ if self.char is not None:
+ outfile.write(' char=%s' % (quote_attrib(self.char), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docCharType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docCharType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.char is not None:
+ showIndent(outfile, level)
+ outfile.write('char = "%s",\n' % (self.char,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('char'):
+ self.char = attrs.get('char').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docCharType
+
+
+class docEmptyType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if docEmptyType.subclass:
+ return docEmptyType.subclass(*args_, **kwargs_)
+ else:
+ return docEmptyType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docEmptyType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docEmptyType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docEmptyType
+
+
+USAGE_TEXT = """
+Usage: python <Parser>.py [ -s ] <in_xml_file>
+Options:
+ -s Use the SAX parser, not the minidom parser.
+"""
+
+def usage():
+ print USAGE_TEXT
+ sys.exit(1)
+
+
+def parse(inFileName):
+ doc = minidom.parse(inFileName)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('<?xml version="1.0" ?>\n')
+ rootObj.export(sys.stdout, 0, name_="doxygen",
+ namespacedef_='')
+ return rootObj
+
+
+def parseString(inString):
+ doc = minidom.parseString(inString)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('<?xml version="1.0" ?>\n')
+ rootObj.export(sys.stdout, 0, name_="doxygen",
+ namespacedef_='')
+ return rootObj
+
+
+def parseLiteral(inFileName):
+ doc = minidom.parse(inFileName)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('from compound import *\n\n')
+ sys.stdout.write('rootObj = doxygen(\n')
+ rootObj.exportLiteral(sys.stdout, 0, name_="doxygen")
+ sys.stdout.write(')\n')
+ return rootObj
+
+
+def main():
+ args = sys.argv[1:]
+ if len(args) == 1:
+ parse(args[0])
+ else:
+ usage()
+
+
+if __name__ == '__main__':
+ main()
+ #import pdb
+ #pdb.run('main()')
+
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/index.py b/gr-aistx/docs/doxygen/doxyxml/generated/index.py
new file mode 100644
index 0000000..7a70e14
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/index.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+"""
+Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
+"""
+
+from xml.dom import minidom
+
+import os
+import sys
+import compound
+
+import indexsuper as supermod
+
+class DoxygenTypeSub(supermod.DoxygenType):
+ def __init__(self, version=None, compound=None):
+ supermod.DoxygenType.__init__(self, version, compound)
+
+ def find_compounds_and_members(self, details):
+ """
+ Returns a list of all compounds and their members which match details
+ """
+
+ results = []
+ for compound in self.compound:
+ members = compound.find_members(details)
+ if members:
+ results.append([compound, members])
+ else:
+ if details.match(compound):
+ results.append([compound, []])
+
+ return results
+
+supermod.DoxygenType.subclass = DoxygenTypeSub
+# end class DoxygenTypeSub
+
+
+class CompoundTypeSub(supermod.CompoundType):
+ def __init__(self, kind=None, refid=None, name='', member=None):
+ supermod.CompoundType.__init__(self, kind, refid, name, member)
+
+ def find_members(self, details):
+ """
+ Returns a list of all members which match details
+ """
+
+ results = []
+
+ for member in self.member:
+ if details.match(member):
+ results.append(member)
+
+ return results
+
+supermod.CompoundType.subclass = CompoundTypeSub
+# end class CompoundTypeSub
+
+
+class MemberTypeSub(supermod.MemberType):
+
+ def __init__(self, kind=None, refid=None, name=''):
+ supermod.MemberType.__init__(self, kind, refid, name)
+
+supermod.MemberType.subclass = MemberTypeSub
+# end class MemberTypeSub
+
+
+def parse(inFilename):
+
+ doc = minidom.parse(inFilename)
+ rootNode = doc.documentElement
+ rootObj = supermod.DoxygenType.factory()
+ rootObj.build(rootNode)
+
+ return rootObj
+
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/indexsuper.py b/gr-aistx/docs/doxygen/doxyxml/generated/indexsuper.py
new file mode 100644
index 0000000..a991530
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/indexsuper.py
@@ -0,0 +1,523 @@
+#!/usr/bin/env python
+
+#
+# Generated Thu Jun 11 18:43:54 2009 by generateDS.py.
+#
+
+import sys
+import getopt
+from string import lower as str_lower
+from xml.dom import minidom
+from xml.dom import Node
+
+#
+# User methods
+#
+# Calls to the methods in these classes are generated by generateDS.py.
+# You can replace these methods by re-implementing the following class
+# in a module named generatedssuper.py.
+
+try:
+ from generatedssuper import GeneratedsSuper
+except ImportError, exp:
+
+ class GeneratedsSuper:
+ def format_string(self, input_data, input_name=''):
+ return input_data
+ def format_integer(self, input_data, input_name=''):
+ return '%d' % input_data
+ def format_float(self, input_data, input_name=''):
+ return '%f' % input_data
+ def format_double(self, input_data, input_name=''):
+ return '%e' % input_data
+ def format_boolean(self, input_data, input_name=''):
+ return '%s' % input_data
+
+
+#
+# If you have installed IPython you can uncomment and use the following.
+# IPython is available from http://ipython.scipy.org/.
+#
+
+## from IPython.Shell import IPShellEmbed
+## args = ''
+## ipshell = IPShellEmbed(args,
+## banner = 'Dropping into IPython',
+## exit_msg = 'Leaving Interpreter, back to program.')
+
+# Then use the following line where and when you want to drop into the
+# IPython shell:
+# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
+
+#
+# Globals
+#
+
+ExternalEncoding = 'ascii'
+
+#
+# Support/utility functions.
+#
+
+def showIndent(outfile, level):
+ for idx in range(level):
+ outfile.write(' ')
+
+def quote_xml(inStr):
+ s1 = (isinstance(inStr, basestring) and inStr or
+ '%s' % inStr)
+ s1 = s1.replace('&', '&amp;')
+ s1 = s1.replace('<', '&lt;')
+ s1 = s1.replace('>', '&gt;')
+ return s1
+
+def quote_attrib(inStr):
+ s1 = (isinstance(inStr, basestring) and inStr or
+ '%s' % inStr)
+ s1 = s1.replace('&', '&amp;')
+ s1 = s1.replace('<', '&lt;')
+ s1 = s1.replace('>', '&gt;')
+ if '"' in s1:
+ if "'" in s1:
+ s1 = '"%s"' % s1.replace('"', "&quot;")
+ else:
+ s1 = "'%s'" % s1
+ else:
+ s1 = '"%s"' % s1
+ return s1
+
+def quote_python(inStr):
+ s1 = inStr
+ if s1.find("'") == -1:
+ if s1.find('\n') == -1:
+ return "'%s'" % s1
+ else:
+ return "'''%s'''" % s1
+ else:
+ if s1.find('"') != -1:
+ s1 = s1.replace('"', '\\"')
+ if s1.find('\n') == -1:
+ return '"%s"' % s1
+ else:
+ return '"""%s"""' % s1
+
+
+class MixedContainer:
+ # Constants for category:
+ CategoryNone = 0
+ CategoryText = 1
+ CategorySimple = 2
+ CategoryComplex = 3
+ # Constants for content_type:
+ TypeNone = 0
+ TypeText = 1
+ TypeString = 2
+ TypeInteger = 3
+ TypeFloat = 4
+ TypeDecimal = 5
+ TypeDouble = 6
+ TypeBoolean = 7
+ def __init__(self, category, content_type, name, value):
+ self.category = category
+ self.content_type = content_type
+ self.name = name
+ self.value = value
+ def getCategory(self):
+ return self.category
+ def getContenttype(self, content_type):
+ return self.content_type
+ def getValue(self):
+ return self.value
+ def getName(self):
+ return self.name
+ def export(self, outfile, level, name, namespace):
+ if self.category == MixedContainer.CategoryText:
+ outfile.write(self.value)
+ elif self.category == MixedContainer.CategorySimple:
+ self.exportSimple(outfile, level, name)
+ else: # category == MixedContainer.CategoryComplex
+ self.value.export(outfile, level, namespace,name)
+ def exportSimple(self, outfile, level, name):
+ if self.content_type == MixedContainer.TypeString:
+ outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeInteger or \
+ self.content_type == MixedContainer.TypeBoolean:
+ outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeFloat or \
+ self.content_type == MixedContainer.TypeDecimal:
+ outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeDouble:
+ outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
+ def exportLiteral(self, outfile, level, name):
+ if self.category == MixedContainer.CategoryText:
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
+ (self.category, self.content_type, self.name, self.value))
+ elif self.category == MixedContainer.CategorySimple:
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
+ (self.category, self.content_type, self.name, self.value))
+ else: # category == MixedContainer.CategoryComplex
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s",\n' % \
+ (self.category, self.content_type, self.name,))
+ self.value.exportLiteral(outfile, level + 1)
+ showIndent(outfile, level)
+ outfile.write(')\n')
+
+
+class _MemberSpec(object):
+ def __init__(self, name='', data_type='', container=0):
+ self.name = name
+ self.data_type = data_type
+ self.container = container
+ def set_name(self, name): self.name = name
+ def get_name(self): return self.name
+ def set_data_type(self, data_type): self.data_type = data_type
+ def get_data_type(self): return self.data_type
+ def set_container(self, container): self.container = container
+ def get_container(self): return self.container
+
+
+#
+# Data representation classes.
+#
+
+class DoxygenType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, version=None, compound=None):
+ self.version = version
+ if compound is None:
+ self.compound = []
+ else:
+ self.compound = compound
+ def factory(*args_, **kwargs_):
+ if DoxygenType.subclass:
+ return DoxygenType.subclass(*args_, **kwargs_)
+ else:
+ return DoxygenType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_compound(self): return self.compound
+ def set_compound(self, compound): self.compound = compound
+ def add_compound(self, value): self.compound.append(value)
+ def insert_compound(self, index, value): self.compound[index] = value
+ def get_version(self): return self.version
+ def set_version(self, version): self.version = version
+ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
+ outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
+ for compound_ in self.compound:
+ compound_.export(outfile, level, namespace_, name_='compound')
+ def hasContent_(self):
+ if (
+ self.compound is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='DoxygenType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.version is not None:
+ showIndent(outfile, level)
+ outfile.write('version = %s,\n' % (self.version,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('compound=[\n')
+ level += 1
+ for compound in self.compound:
+ showIndent(outfile, level)
+ outfile.write('model_.compound(\n')
+ compound.exportLiteral(outfile, level, name_='compound')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('version'):
+ self.version = attrs.get('version').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'compound':
+ obj_ = CompoundType.factory()
+ obj_.build(child_)
+ self.compound.append(obj_)
+# end class DoxygenType
+
+
+class CompoundType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, refid=None, name=None, member=None):
+ self.kind = kind
+ self.refid = refid
+ self.name = name
+ if member is None:
+ self.member = []
+ else:
+ self.member = member
+ def factory(*args_, **kwargs_):
+ if CompoundType.subclass:
+ return CompoundType.subclass(*args_, **kwargs_)
+ else:
+ return CompoundType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_member(self): return self.member
+ def set_member(self, member): self.member = member
+ def add_member(self, value): self.member.append(value)
+ def insert_member(self, index, value): self.member[index] = value
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='CompoundType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'):
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'):
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ for member_ in self.member:
+ member_.export(outfile, level, namespace_, name_='member')
+ def hasContent_(self):
+ if (
+ self.name is not None or
+ self.member is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='CompoundType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('member=[\n')
+ level += 1
+ for member in self.member:
+ showIndent(outfile, level)
+ outfile.write('model_.member(\n')
+ member.exportLiteral(outfile, level, name_='member')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ name_ = ''
+ for text__content_ in child_.childNodes:
+ name_ += text__content_.nodeValue
+ self.name = name_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'member':
+ obj_ = MemberType.factory()
+ obj_.build(child_)
+ self.member.append(obj_)
+# end class CompoundType
+
+
+class MemberType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, refid=None, name=None):
+ self.kind = kind
+ self.refid = refid
+ self.name = name
+ def factory(*args_, **kwargs_):
+ if MemberType.subclass:
+ return MemberType.subclass(*args_, **kwargs_)
+ else:
+ return MemberType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='MemberType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'):
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='MemberType'):
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ def hasContent_(self):
+ if (
+ self.name is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='MemberType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ name_ = ''
+ for text__content_ in child_.childNodes:
+ name_ += text__content_.nodeValue
+ self.name = name_
+# end class MemberType
+
+
+USAGE_TEXT = """
+Usage: python <Parser>.py [ -s ] <in_xml_file>
+Options:
+ -s Use the SAX parser, not the minidom parser.
+"""
+
+def usage():
+ print USAGE_TEXT
+ sys.exit(1)
+
+
+def parse(inFileName):
+ doc = minidom.parse(inFileName)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('<?xml version="1.0" ?>\n')
+ rootObj.export(sys.stdout, 0, name_="doxygenindex",
+ namespacedef_='')
+ return rootObj
+
+
+def parseString(inString):
+ doc = minidom.parseString(inString)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('<?xml version="1.0" ?>\n')
+ rootObj.export(sys.stdout, 0, name_="doxygenindex",
+ namespacedef_='')
+ return rootObj
+
+
+def parseLiteral(inFileName):
+ doc = minidom.parse(inFileName)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('from index import *\n\n')
+ sys.stdout.write('rootObj = doxygenindex(\n')
+ rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex")
+ sys.stdout.write(')\n')
+ return rootObj
+
+
+def main():
+ args = sys.argv[1:]
+ if len(args) == 1:
+ parse(args[0])
+ else:
+ usage()
+
+
+
+
+if __name__ == '__main__':
+ main()
+ #import pdb
+ #pdb.run('main()')
+
diff --git a/gr-aistx/docs/doxygen/doxyxml/text.py b/gr-aistx/docs/doxygen/doxyxml/text.py
new file mode 100644
index 0000000..629edd1
--- /dev/null
+++ b/gr-aistx/docs/doxygen/doxyxml/text.py
@@ -0,0 +1,56 @@
+#
+# Copyright 2010 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+Utilities for extracting text from generated classes.
+"""
+
+def is_string(txt):
+ if isinstance(txt, str):
+ return True
+ try:
+ if isinstance(txt, unicode):
+ return True
+ except NameError:
+ pass
+ return False
+
+def description(obj):
+ if obj is None:
+ return None
+ return description_bit(obj).strip()
+
+def description_bit(obj):
+ if hasattr(obj, 'content'):
+ contents = [description_bit(item) for item in obj.content]
+ result = ''.join(contents)
+ elif hasattr(obj, 'content_'):
+ contents = [description_bit(item) for item in obj.content_]
+ result = ''.join(contents)
+ elif hasattr(obj, 'value'):
+ result = description_bit(obj.value)
+ elif is_string(obj):
+ return obj
+ else:
+ raise StandardError('Expecting a string or something with content, content_ or value attribute')
+ # If this bit is a paragraph then add one some line breaks.
+ if hasattr(obj, 'name') and obj.name == 'para':
+ result += "\n\n"
+ return result
diff --git a/gr-aistx/docs/doxygen/other/group_defs.dox b/gr-aistx/docs/doxygen/other/group_defs.dox
new file mode 100644
index 0000000..45e2026
--- /dev/null
+++ b/gr-aistx/docs/doxygen/other/group_defs.dox
@@ -0,0 +1,7 @@
+/*!
+ * \defgroup block GNU Radio AISTX C++ Signal Processing Blocks
+ * \brief All C++ blocks that can be used from the AISTX GNU Radio
+ * module are listed here or in the subcategories below.
+ *
+ */
+
diff --git a/gr-aistx/docs/doxygen/other/main_page.dox b/gr-aistx/docs/doxygen/other/main_page.dox
new file mode 100644
index 0000000..cd27f2c
--- /dev/null
+++ b/gr-aistx/docs/doxygen/other/main_page.dox
@@ -0,0 +1,10 @@
+/*! \mainpage
+
+Welcome to the GNU Radio AISTX Block
+
+This is the intro page for the Doxygen manual generated for the AISTX
+block (docs/doxygen/other/main_page.dox). Edit it to add more detailed
+documentation about the new GNU Radio modules contained in this
+project.
+
+*/
diff --git a/gr-aistx/docs/doxygen/swig_doc.py b/gr-aistx/docs/doxygen/swig_doc.py
new file mode 100644
index 0000000..4e1ce2e
--- /dev/null
+++ b/gr-aistx/docs/doxygen/swig_doc.py
@@ -0,0 +1,255 @@
+#
+# Copyright 2010,2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+Creates the swig_doc.i SWIG interface file.
+Execute using: python swig_doc.py xml_path outputfilename
+
+The file instructs SWIG to transfer the doxygen comments into the
+python docstrings.
+
+"""
+
+import sys
+
+try:
+ from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
+except ImportError:
+ from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
+
+
+def py_name(name):
+ bits = name.split('_')
+ return '_'.join(bits[1:])
+
+def make_name(name):
+ bits = name.split('_')
+ return bits[0] + '_make_' + '_'.join(bits[1:])
+
+
+class Block(object):
+ """
+ Checks if doxyxml produced objects correspond to a gnuradio block.
+ """
+
+ @classmethod
+ def includes(cls, item):
+ if not isinstance(item, DoxyClass):
+ return False
+ # Check for a parsing error.
+ if item.error():
+ return False
+ return item.has_member(make_name(item.name()), DoxyFriend)
+
+
+def utoascii(text):
+ """
+ Convert unicode text into ascii and escape quotes.
+ """
+ if text is None:
+ return ''
+ out = text.encode('ascii', 'replace')
+ out = out.replace('"', '\\"')
+ return out
+
+
+def combine_descriptions(obj):
+ """
+ Combines the brief and detailed descriptions of an object together.
+ """
+ description = []
+ bd = obj.brief_description.strip()
+ dd = obj.detailed_description.strip()
+ if bd:
+ description.append(bd)
+ if dd:
+ description.append(dd)
+ return utoascii('\n\n'.join(description)).strip()
+
+
+entry_templ = '%feature("docstring") {name} "{docstring}"'
+def make_entry(obj, name=None, templ="{description}", description=None):
+ """
+ Create a docstring entry for a swig interface file.
+
+ obj - a doxyxml object from which documentation will be extracted.
+ name - the name of the C object (defaults to obj.name())
+ templ - an optional template for the docstring containing only one
+ variable named 'description'.
+ description - if this optional variable is set then it's value is
+ used as the description instead of extracting it from obj.
+ """
+ if name is None:
+ name=obj.name()
+ if "operator " in name:
+ return ''
+ if description is None:
+ description = combine_descriptions(obj)
+ docstring = templ.format(description=description)
+ if not docstring:
+ return ''
+ return entry_templ.format(
+ name=name,
+ docstring=docstring,
+ )
+
+
+def make_func_entry(func, name=None, description=None, params=None):
+ """
+ Create a function docstring entry for a swig interface file.
+
+ func - a doxyxml object from which documentation will be extracted.
+ name - the name of the C object (defaults to func.name())
+ description - if this optional variable is set then it's value is
+ used as the description instead of extracting it from func.
+ params - a parameter list that overrides using func.params.
+ """
+ if params is None:
+ params = func.params
+ params = [prm.declname for prm in params]
+ if params:
+ sig = "Params: (%s)" % ", ".join(params)
+ else:
+ sig = "Params: (NONE)"
+ templ = "{description}\n\n" + sig
+ return make_entry(func, name=name, templ=utoascii(templ),
+ description=description)
+
+
+def make_class_entry(klass, description=None):
+ """
+ Create a class docstring for a swig interface file.
+ """
+ output = []
+ output.append(make_entry(klass, description=description))
+ for func in klass.in_category(DoxyFunction):
+ name = klass.name() + '::' + func.name()
+ output.append(make_func_entry(func, name=name))
+ return "\n\n".join(output)
+
+
+def make_block_entry(di, block):
+ """
+ Create class and function docstrings of a gnuradio block for a
+ swig interface file.
+ """
+ descriptions = []
+ # Get the documentation associated with the class.
+ class_desc = combine_descriptions(block)
+ if class_desc:
+ descriptions.append(class_desc)
+ # Get the documentation associated with the make function
+ make_func = di.get_member(make_name(block.name()), DoxyFunction)
+ make_func_desc = combine_descriptions(make_func)
+ if make_func_desc:
+ descriptions.append(make_func_desc)
+ # Get the documentation associated with the file
+ try:
+ block_file = di.get_member(block.name() + ".h", DoxyFile)
+ file_desc = combine_descriptions(block_file)
+ if file_desc:
+ descriptions.append(file_desc)
+ except base.Base.NoSuchMember:
+ # Don't worry if we can't find a matching file.
+ pass
+ # And join them all together to make a super duper description.
+ super_description = "\n\n".join(descriptions)
+ # Associate the combined description with the class and
+ # the make function.
+ output = []
+ output.append(make_class_entry(block, description=super_description))
+ creator = block.get_member(block.name(), DoxyFunction)
+ output.append(make_func_entry(make_func, description=super_description,
+ params=creator.params))
+ return "\n\n".join(output)
+
+
+def make_swig_interface_file(di, swigdocfilename, custom_output=None):
+
+ output = ["""
+/*
+ * This file was automatically generated using swig_doc.py.
+ *
+ * Any changes to it will be lost next time it is regenerated.
+ */
+"""]
+
+ if custom_output is not None:
+ output.append(custom_output)
+
+ # Create docstrings for the blocks.
+ blocks = di.in_category(Block)
+ make_funcs = set([])
+ for block in blocks:
+ try:
+ make_func = di.get_member(make_name(block.name()), DoxyFunction)
+ make_funcs.add(make_func.name())
+ output.append(make_block_entry(di, block))
+ except block.ParsingError:
+ print('Parsing error for block %s' % block.name())
+
+ # Create docstrings for functions
+ # Don't include the make functions since they have already been dealt with.
+ funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs]
+ for f in funcs:
+ try:
+ output.append(make_func_entry(f))
+ except f.ParsingError:
+ print('Parsing error for function %s' % f.name())
+
+ # Create docstrings for classes
+ block_names = [block.name() for block in blocks]
+ klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names]
+ for k in klasses:
+ try:
+ output.append(make_class_entry(k))
+ except k.ParsingError:
+ print('Parsing error for class %s' % k.name())
+
+ # Docstrings are not created for anything that is not a function or a class.
+ # If this excludes anything important please add it here.
+
+ output = "\n\n".join(output)
+
+ swig_doc = file(swigdocfilename, 'w')
+ swig_doc.write(output)
+ swig_doc.close()
+
+if __name__ == "__main__":
+ # Parse command line options and set up doxyxml.
+ err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
+ if len(sys.argv) != 3:
+ raise StandardError(err_msg)
+ xml_path = sys.argv[1]
+ swigdocfilename = sys.argv[2]
+ di = DoxyIndex(xml_path)
+
+ # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
+ # This is presumably a bug in SWIG.
+ #msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
+ #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
+ #delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
+ output = []
+ #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
+ #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
+ custom_output = "\n\n".join(output)
+
+ # Generate the docstrings interface file.
+ make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
diff --git a/gr-aistx/grc/AISTX_Build_Frame.xml b/gr-aistx/grc/AISTX_Build_Frame.xml
new file mode 100644
index 0000000..61f7198
--- /dev/null
+++ b/gr-aistx/grc/AISTX_Build_Frame.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<block>
+ <name>AIS Frame Builder</name>
+ <key>AISTX_Build_Frame</key>
+ <category>AISTX</category>
+ <import>import AISTX</import>
+ <make>AISTX.Build_Frame($sentence, $repeat, $enable_NRZI)</make>
+ <param>
+ <name>Sentence</name>
+ <key>sentence</key>
+ <value>010010000011101011110111001110011000100000000000000000100000001011001000001011000101000110100010010100001101011001111011000011111111111011100101110011100000000000000110</value>
+ <type>string</type>
+ </param>
+ <param>
+ <name>Repeat</name>
+ <key>repeat</key>
+ <value>True</value>
+ <type>enum</type>
+ <option>
+ <name>Yes</name>
+ <key>True</key>
+ </option>
+ <option>
+ <name>No</name>
+ <key>False</key>
+ </option>
+ </param>
+ <param>
+ <name>Enable_NRZI_Conversion</name>
+ <key>enable_NRZI</key>
+ <value>True</value>
+ <type>enum</type>
+ <option>
+ <name>Yes</name>
+ <key>True</key>
+ </option>
+ <option>
+ <name>No</name>
+ <key>False</key>
+ </option>
+ </param>
+
+ <source>
+ <name>out</name>
+ <type>byte</type>
+ </source>
+</block>
diff --git a/gr-aistx/grc/AISTX_DebugME.xml b/gr-aistx/grc/AISTX_DebugME.xml
new file mode 100644
index 0000000..9dcdc31
--- /dev/null
+++ b/gr-aistx/grc/AISTX_DebugME.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<block>
+ <name>DebugME</name>
+ <key>AISTX_DebugME</key>
+ <category>AISTX</category>
+ <import>import AISTX</import>
+ <make>AISTX.DebugME($type.size)</make>
+ <param>
+ <name>Input Type</name>
+ <key>type</key>
+ <type>enum</type>
+ <option>
+ <name>Complex</name>
+ <key>complex</key>
+ <opt>size:gr.sizeof_gr_complex</opt>
+ </option>
+ <option>
+ <name>Float</name>
+ <key>float</key>
+ <opt>size:gr.sizeof_float</opt>
+ </option>
+ <option>
+ <name>Byte</name>
+ <key>byte</key>
+ <opt>size:gr.sizeof_char</opt>
+ </option>
+ </param>
+ <sink>
+ <name>in</name>
+ <type>$type</type>
+ </sink>
+</block>
diff --git a/gr-aistx/grc/AISTX_nrz_to_nrzi.xml b/gr-aistx/grc/AISTX_nrz_to_nrzi.xml
new file mode 100644
index 0000000..d2191bb
--- /dev/null
+++ b/gr-aistx/grc/AISTX_nrz_to_nrzi.xml
@@ -0,0 +1,15 @@
+<block>
+ <name>Nrz to nrzi</name>
+ <key>AISTX_nrz_to_nrzi</key>
+ <category>AISTX</category>
+ <import>import AISTX</import>
+ <make>AISTX.nrz_to_nrzi()</make>
+ <sink>
+ <name>in</name>
+ <type>byte</type>
+ </sink>
+ <source>
+ <name>out</name>
+ <type>byte</type>
+ </source>
+</block>
diff --git a/gr-aistx/grc/CMakeLists.txt b/gr-aistx/grc/CMakeLists.txt
new file mode 100644
index 0000000..2ae27c1
--- /dev/null
+++ b/gr-aistx/grc/CMakeLists.txt
@@ -0,0 +1,23 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+install(FILES
+ AISTX_nrz_to_nrzi.xml
+ AISTX_Build_Frame.xml
+ AISTX_DebugME.xml DESTINATION share/gnuradio/grc/blocks
+)
diff --git a/gr-aistx/include/AISTX/Build_Frame.h b/gr-aistx/include/AISTX/Build_Frame.h
new file mode 100644
index 0000000..3ce6beb
--- /dev/null
+++ b/gr-aistx/include/AISTX/Build_Frame.h
@@ -0,0 +1,60 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef INCLUDED_AISTX_BUILD_FRAME_H
+#define INCLUDED_AISTX_BUILD_FRAME_H
+
+#include <AISTX/api.h>
+#include <gr_sync_block.h>
+
+namespace gr {
+ namespace AISTX {
+
+ /*!
+ * \brief Builds AIS Frame
+ * \ingroup AISTX
+ *
+ */
+ class AISTX_API Build_Frame : virtual public gr_sync_block
+ {
+ public:
+ typedef boost::shared_ptr<Build_Frame> sptr;
+
+ /*!
+ * \brief Builds an AIS Frame of 256 bytes.
+ *
+ * This module does, in order:
+ * 1. Payload (NMEA sentence) encoding (6 bits per ASCII)
+ * 2. CRC generation [16]
+ * 3. Reverse bit order (payload + crc)
+ * 4. Stuffing (payload + crc)
+ * 5. Headers (Preamble [24], Start [8], Trailer [8], 0x00 Padding)
+ * 6. NRZI conversion (enabled by default)
+ *
+ */
+ static sptr make(const char *sentence, bool repeat, bool enable_NRZI);
+ };
+
+ } // namespace AISTX
+} // namespace gr
+
+#endif /* INCLUDED_AISTX_BUILD_FRAME_H */
+
diff --git a/gr-aistx/include/AISTX/CMakeLists.txt b/gr-aistx/include/AISTX/CMakeLists.txt
new file mode 100644
index 0000000..257680e
--- /dev/null
+++ b/gr-aistx/include/AISTX/CMakeLists.txt
@@ -0,0 +1,28 @@
+# Copyright 2011,2012 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Install public header files
+########################################################################
+install(FILES
+ api.h
+ nrz_to_nrzi.h
+ Build_Frame.h
+ DebugME.h DESTINATION include/AISTX
+)
diff --git a/gr-aistx/include/AISTX/DebugME.h b/gr-aistx/include/AISTX/DebugME.h
new file mode 100644
index 0000000..ee0fa41
--- /dev/null
+++ b/gr-aistx/include/AISTX/DebugME.h
@@ -0,0 +1,53 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef INCLUDED_AISTX_DEBUGME_H
+#define INCLUDED_AISTX_DEBUGME_H
+
+#include <AISTX/api.h>
+#include <gr_block.h>
+
+namespace gr {
+ namespace AISTX {
+
+ /*!
+ * \brief Print the incoming BYTE sequence
+ * \ingroup AISTX
+ *
+ */
+ class AISTX_API DebugME : virtual public gr_block
+ {
+ public:
+ typedef boost::shared_ptr<DebugME> sptr;
+
+ /*!
+ * \brief Print the incoming BYTE sequence as sequence of HEXs
+ *
+ * Goes through the incoming sequence (const char *in) and prints it in HEXs
+ */
+ static sptr make(size_t itemsize);
+ };
+
+ } // namespace AISTX
+} // namespace gr
+
+#endif /* INCLUDED_AISTX_DEBUGME_H */
+
diff --git a/gr-aistx/include/AISTX/api.h b/gr-aistx/include/AISTX/api.h
new file mode 100644
index 0000000..bcbb7b9
--- /dev/null
+++ b/gr-aistx/include/AISTX/api.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2011 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef INCLUDED_AISTX_API_H
+#define INCLUDED_AISTX_API_H
+
+#include <gruel/attributes.h>
+
+#ifdef gnuradio_AISTX_EXPORTS
+# define AISTX_API __GR_ATTR_EXPORT
+#else
+# define AISTX_API __GR_ATTR_IMPORT
+#endif
+
+#endif /* INCLUDED_AISTX_API_H */
diff --git a/gr-aistx/include/AISTX/nrz_to_nrzi.h b/gr-aistx/include/AISTX/nrz_to_nrzi.h
new file mode 100644
index 0000000..3fb2821
--- /dev/null
+++ b/gr-aistx/include/AISTX/nrz_to_nrzi.h
@@ -0,0 +1,54 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef INCLUDED_AISTX_NRZ_TO_NRZI_H
+#define INCLUDED_AISTX_NRZ_TO_NRZI_H
+
+#include <AISTX/api.h>
+#include <gr_block.h>
+
+namespace gr {
+ namespace AISTX {
+
+ /*!
+ * \brief Convert from NRZ to NRZI
+ * \ingroup AISTX
+ *
+ */
+ class AISTX_API nrz_to_nrzi : virtual public gr_block
+ {
+ public:
+ typedef boost::shared_ptr<nrz_to_nrzi> sptr;
+
+ /*!
+ * \brief Convert from NRZ to NRZI
+ *
+ * Convert a BYTE sequence from NRZ to NRZI. Note: This is also implemented in the Build_Frame component.
+ *
+ */
+ static sptr make();
+ };
+
+ } // namespace AISTX
+} // namespace gr
+
+#endif /* INCLUDED_AISTX_NRZ_TO_NRZI_H */
+
diff --git a/gr-aistx/lib/Build_Frame_impl.cc b/gr-aistx/lib/Build_Frame_impl.cc
new file mode 100644
index 0000000..a0daf46
--- /dev/null
+++ b/gr-aistx/lib/Build_Frame_impl.cc
@@ -0,0 +1,469 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <gr_io_signature.h>
+#include "Build_Frame_impl.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define LEN_PREAMBLE 24
+#define LEN_START 8
+#define LEN_CRC 16
+#define LEN_FRAME_MAX 256
+
+#define PREAMBLE_MARK 101010101010101010101010 (24 bits)
+#define START_MARK 01111110 (8 bits)
+
+#define DEBUG 0
+
+namespace gr {
+ namespace AISTX {
+
+ Build_Frame::sptr
+ Build_Frame::make(const char *sentence, bool repeat, bool enable_NRZI)
+ {
+ return gnuradio::get_initial_sptr
+ (new Build_Frame_impl(sentence, repeat, enable_NRZI));
+ }
+
+ /*
+ * The private constructor
+ */
+ Build_Frame_impl::Build_Frame_impl(const char *sentence, bool repeat, bool enable_NRZI)
+ : gr_sync_block("Build_Frame",
+ gr_make_io_signature(0, 0, 0),
+ gr_make_io_signature(1, 1, sizeof(unsigned char))),
+ d_repeat(repeat),
+ d_enable_NRZI(enable_NRZI)
+ {
+ unsigned short REMAINDER_TO_EIGHT, PADDING_TO_EIGHT; // to pad the payload to a multiple of 8
+
+ LEN_PAYLOAD = strlen(sentence);
+ if (LEN_PAYLOAD>168)
+ printf ("Frame padding disabled. Multiple packets.\n");
+
+ // IMPORTANT
+ REMAINDER_TO_EIGHT = LEN_PAYLOAD%8;
+ if (REMAINDER_TO_EIGHT==0) {
+ payload = (char *) malloc(LEN_PAYLOAD + LEN_CRC);
+ // nb. It comes in in ASCII
+ for (int i=0; i<LEN_PAYLOAD; i++)
+ payload[i]=sentence[i]-48;
+ }
+ else if (REMAINDER_TO_EIGHT>0){
+
+ PADDING_TO_EIGHT = 8-REMAINDER_TO_EIGHT;
+ payload = (char *) malloc(LEN_PAYLOAD + PADDING_TO_EIGHT + LEN_CRC);
+
+ for (int i=0; i<LEN_PAYLOAD; i++)
+ payload[i]=sentence[i]-48;
+
+ printf ("Detected a payload which is *not* multiple of 8 (%d bits). Padding with %d bits to %d\n", LEN_PAYLOAD, PADDING_TO_EIGHT, LEN_PAYLOAD + PADDING_TO_EIGHT);
+ memset (payload + LEN_PAYLOAD, 0x0, PADDING_TO_EIGHT);
+
+ LEN_PAYLOAD += PADDING_TO_EIGHT; // update PAYLOAD LENGHT
+ }
+
+ dump_buffer(payload, LEN_PAYLOAD);
+
+ // crc
+ char crc[16]; // 2 gnuradio bytes of CRC
+ char input_crc[LEN_PAYLOAD];
+ memcpy (input_crc, payload, LEN_PAYLOAD);
+ compute_crc (input_crc, crc, LEN_PAYLOAD);
+ memcpy (payload+LEN_PAYLOAD, crc, LEN_CRC);
+
+ // reverse
+ reverse_bit_order (payload, LEN_PAYLOAD+LEN_CRC);
+
+ }
+
+ /*
+ * Our virtual destructor.
+ */
+ Build_Frame_impl::~Build_Frame_impl()
+ {
+ }
+
+
+ void Build_Frame_impl::dump_buffer(const char *b, int buffer_size)
+ {
+ int k = 0;
+ for(; k < buffer_size; k++)
+ printf("%d", b[k]);
+ printf("\n");
+ }
+
+ // buffer must have length >= sizeof(int) + 1
+ // Write to the buffer backwards so that the binary representation
+ // is in the correct order i.e. the LSB is on the far right
+ // instead of the far left of the printed string
+ char * Build_Frame_impl::int2bin(int a, char *buffer, int buf_size) {
+ buffer += (buf_size - 1);
+
+ for (int i = 31; i >= 0; i--) {
+ *buffer-- = (a & 1) + '0';
+
+ a >>= 1;
+ }
+
+ return buffer;
+ }
+
+ // staffing function
+ int Build_Frame_impl::stuff (const char *in, char *out, int l_in)
+ {
+ int i=0, j=0, consecutives=0, l_out=0;
+
+ while(i<l_in) {
+
+ if (in[i] & 0x01)
+ consecutives++;
+ else
+ consecutives=0;
+
+ out[j++]=in[i++];
+ l_out++;
+
+ if (consecutives==5) {
+ out[j++]=0x0;
+ l_out++;
+ consecutives=0;
+ }
+ }
+ return l_out;
+ }
+
+ void Build_Frame_impl::pack (int orig_ascii, char *ret, int bits_per_byte)
+ {
+ // go down to fit in 6 bits
+ int ascii = orig_ascii-48;
+ if(ascii > 39)
+ ascii -= 8;
+
+ /* if (DEBUG)*/
+ /* printf ("\nAscii: orig=%d scaled=%d\n", orig_ascii, ascii);*/
+
+ char binary[6];
+ int y = 0;
+
+ if (ascii==0)
+ memset (binary, 0x0, 6);
+ else
+ while (ascii!=1) {
+ if (ascii % 2 == 0)
+ binary[y] = 0x0;
+ else if (ascii % 2 == 1)
+ binary[y] = 0x1;
+ ascii /= 2;
+ y++;
+ }
+
+ if (ascii==1) {
+ binary[y] = 0x1;
+ y++;
+ }
+
+ if(y < 6) { // fill in space
+ for(; y < 6; y++)
+ binary[y] = 0x0;
+ }
+
+ for(y = 0; y < 6; y++) // reverse*/
+ ret[y] = binary[5 - y];
+ ret[y]='\0';
+
+ /* if (DEBUG)*/
+ /* printf("Binary = %s, Ret = %s\n", binary, ret );*/
+
+ }
+
+ void Build_Frame_impl::nrz_to_nrzi(char *data, int length)
+ {
+ unsigned short d_prev_nrzi_bit = 0;
+ unsigned short nrz_bit, nrzi_bit;
+
+ for (int i = 0; i < length; i++)
+ {
+ nrz_bit = data[i];
+
+ if(nrz_bit == 0)
+ {
+ nrzi_bit = d_prev_nrzi_bit ^ 1;
+ }
+ else
+ {
+ nrzi_bit = d_prev_nrzi_bit;
+ }
+
+ data[i] = nrzi_bit;
+ d_prev_nrzi_bit = nrzi_bit;
+ }
+ }
+
+ void Build_Frame_impl::reverse_bit_order(char *data, int length)
+ {
+ int tmp = 0;
+ for(int i = 0; i < length/8; i++) {
+ for(int j = 0; j < 4; j++) {
+ tmp = data[i*8 + j];
+ data[i*8 + j] = data[i*8 + 7-j];
+ data[i*8 + 7-j] = tmp;
+ }
+ }
+ }
+
+ unsigned long Build_Frame_impl::unpack(char *buffer, int start, int length)
+ {
+ unsigned long ret = 0;
+ for(int i = start; i < (start+length); i++) {
+ ret <<= 1;
+ ret |= (buffer[i] & 0x01);
+ }
+ return ret;
+ }
+
+ void Build_Frame_impl::compute_crc(char *buffer, char *ret, unsigned int len) // Calculates CRC-checksum from unpacked data
+ {
+ static const unsigned short crc_itu16_table[] =
+ {
+ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+ };
+
+ int crc=0xffff;
+ int i = 0;
+ char temp[8];
+ int datalen = len/8;
+
+// // go to char (this can be optimized)
+// printf ("INPUT:");
+// for(int j=0;j<len;j++) {
+// buffer[j]=buffer[j]+0x30;
+// printf ("%x", buffer[j]); }
+// printf ("\n");
+
+ char data[256];
+ for(int j=0;j<datalen;j++) //this unpacks the data in preparation for calculating CRC
+ {
+ data[j] = unpack(buffer, j*8, 8);
+ }
+
+ for (i = 0; i < datalen; i++)
+ crc = (crc >> 8) ^ crc_itu16_table[(crc ^ data[i]) & 0xFF];
+
+ crc=(crc & 0xFFFF)^0xFFFF;
+// printf("%X\n", crc);
+
+ int2bin(crc, ret, 16);
+// printf ("CRC ASCII1 = %s\n", ret);
+ //dump_buffer(ret, 16);
+
+ reverse_bit_order (ret, 16); //revert crc bit in byte
+
+ int2bin(crc, ret, 16);
+ strncpy(temp,ret+8,8); //swap the two crc byte
+ strncpy(ret+8,ret,8);
+ strncpy(ret,temp,8);
+
+ // back to binary
+ for(int j=0;j<16;j++)
+ ret[j]=ret[j]-0x30;
+
+// if (DEBUG) {
+// printf("CRC 2=\n");
+// dump_buffer(ret,16);
+// }
+}
+
+ void Build_Frame_impl::byte_packing(char *input_frame, unsigned char *out_byte, unsigned int len) {
+ for (int i = 0; i < len/8; i++) {
+ char tmp[8];
+ memcpy(tmp, &input_frame[i*8], 8);
+ out_byte[i] = tmp[0]*128+tmp[1]*64+tmp[2]*32+tmp[3]*16+tmp[4]*8+tmp[5]*4+tmp[6]*2+tmp[7];
+
+ //out_byte[i] = input_frame[i*8]*128+input_frame[i*8+1]*64+input_frame[i*8+2]*32+input_frame[i*8+3]*16+input_frame[i*8+4]*8+input_frame[i*8+5]*4+input_frame[i*8+6]*2+input_frame[i*8+7];
+// printf ("%X", out_byte[i]);
+ }
+// printf("\n");
+ }
+
+ int
+ Build_Frame_impl::work(int noutput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items)
+ {
+ unsigned char *out = (unsigned char *) output_items[0];
+
+// B3co>HP00 P ;8 ;56 RD =Is3 w sU kP06 CRC
+// 010010000011101011110111001110011000100000000000000000 100000 001011001000 001011000101000110 100010010100 001101011001111011000011 111111 111011100101 110011100000000000000110 0011000010001111
+// printf("\nENCODING:\t%s\n", sentence);
+// printf("LEN_SENTENCE:\t%d\n", LEN_SENTENCE);
+// printf("LEN_PAYLOAD:\t%d\n", LEN_PAYLOAD);
+// printf("LEN_CRC:\t%d\n", LEN_CRC);
+
+// // payload encoding
+// char buffer[6];
+
+// for (int i=0; i<LEN_SENTENCE; i++) {
+// pack((int)d_sentence[i], buffer, 6);
+// memcpy (payload+(i*6), buffer , 6);
+// //p = payload+(i*6); //PASTA moved and corrected in the next IF
+//// if (DEBUG) printf("(len(payload)=%d) (len(sentence)=%d) %c -> %s \n", strlen(payload), i+1, sentence[i], payload);
+// }
+
+
+// // crc
+// char crc[16]; // 2 gnuradio bytes of CRC
+// char input_crc[LEN_PAYLOAD];
+// memcpy (input_crc, payload, LEN_PAYLOAD);
+// compute_crc (input_crc, crc, LEN_PAYLOAD);
+// memcpy (payload+LEN_PAYLOAD, crc, LEN_CRC);
+
+ // reverse bits (payload + crc)
+// reverse_bit_order (payload, LEN_PAYLOAD+LEN_CRC);
+//// printf("Dump buffer after reverse + crc= ");
+//// dump_buffer(payload, LEN_PAYLOAD+LEN_CRC);
+
+ // stuffing (payload + crc)
+ if (LEN_PAYLOAD<=168) {
+
+ char stuffed_payload[LEN_FRAME_MAX];
+ int LEN_STUFFED_PAYLOAD = stuff (payload, stuffed_payload, LEN_PAYLOAD+LEN_CRC);
+
+ //// frame generation /////
+ char frame[LEN_FRAME_MAX];
+ unsigned char byte_frame[LEN_FRAME_MAX/8]; //PASTA
+ memset (frame, 0x0, LEN_FRAME_MAX);
+
+ // headers
+ memcpy (frame, "\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0", LEN_PREAMBLE);
+ memcpy (frame+LEN_PREAMBLE, "\0\1\1\1\1\1\1\0", LEN_START);
+ // payload + crc
+ memcpy (frame+LEN_PREAMBLE+LEN_START, stuffed_payload, LEN_STUFFED_PAYLOAD);
+ // trailer
+ memcpy (frame+LEN_PREAMBLE+LEN_START+LEN_STUFFED_PAYLOAD, "\0\1\1\1\1\1\1\0", 8);
+
+ // padding
+ int LEN_PADDING = LEN_FRAME_MAX-(LEN_PREAMBLE+LEN_START+LEN_STUFFED_PAYLOAD+LEN_START);
+ memset (frame+LEN_PREAMBLE+LEN_START+LEN_STUFFED_PAYLOAD+LEN_START, 0x0, LEN_PADDING);
+ int len_frame_real = LEN_FRAME_MAX; // 256
+
+ // NRZI Conversion
+ nrz_to_nrzi (frame, len_frame_real);
+ printf ("Sent Frame (NRZI enabled) = ");
+
+ dump_buffer(frame, len_frame_real);
+
+ // Binary conversion (to use with GMSK mod's byte_to_symb
+ byte_packing(frame, byte_frame, len_frame_real);
+
+ // output
+ memcpy (out, byte_frame, len_frame_real/8);
+ noutput_items = len_frame_real/8;
+
+
+ }
+ else {
+
+ char stuffed_payload[1024];
+ int LEN_STUFFED_PAYLOAD = stuff (payload, stuffed_payload, LEN_PAYLOAD+LEN_CRC);
+
+ //// frame generation /////
+ int LEN_FRAME = LEN_PREAMBLE + LEN_START*2 + LEN_STUFFED_PAYLOAD;
+ char frame[LEN_FRAME];
+ unsigned char byte_frame[LEN_FRAME/8]; //PASTA
+ memset (frame, 0x0, LEN_FRAME);
+
+ // headers
+ memcpy (frame, "\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0\1\0", LEN_PREAMBLE);
+ memcpy (frame+LEN_PREAMBLE, "\0\1\1\1\1\1\1\0", LEN_START);
+ // payload + crc
+ memcpy (frame+LEN_PREAMBLE+LEN_START, stuffed_payload, LEN_STUFFED_PAYLOAD);
+ // trailer
+ memcpy (frame+LEN_PREAMBLE+LEN_START+LEN_STUFFED_PAYLOAD, "\0\1\1\1\1\1\1\0", 8);
+
+ int len_frame_real = LEN_FRAME;
+
+ // NRZI Conversion
+ nrz_to_nrzi (frame, len_frame_real);
+ printf ("Sent Frame (NRZI enabled) = ");
+
+ dump_buffer(frame, len_frame_real);
+
+ // Binary conversion (to use with GMSK mod's byte_to_symb
+ byte_packing(frame, byte_frame, len_frame_real);
+
+ // output
+ memcpy (out, byte_frame, len_frame_real/8);
+ noutput_items = len_frame_real/8;
+
+ }
+
+
+ // some sleep here
+// int r = (int) rand() % 1000;
+// usleep(1000*r); // -6
+ //sleep(1);
+ usleep(100000);
+
+ // Tell runtime system how many output items we produced.
+ return noutput_items;
+ }
+
+ } /* namespace AISTX */
+} /* namespace gr */
+
diff --git a/gr-aistx/lib/Build_Frame_impl.h b/gr-aistx/lib/Build_Frame_impl.h
new file mode 100644
index 0000000..6a6648f
--- /dev/null
+++ b/gr-aistx/lib/Build_Frame_impl.h
@@ -0,0 +1,65 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef INCLUDED_AISTX_BUILD_FRAME_IMPL_H
+#define INCLUDED_AISTX_BUILD_FRAME_IMPL_H
+
+#include <AISTX/Build_Frame.h>
+
+#define __VERSION 0.3
+
+namespace gr {
+ namespace AISTX {
+
+ class Build_Frame_impl : public Build_Frame
+ {
+ private:
+ bool d_repeat;
+ bool d_enable_NRZI;
+ //char * d_sentence;
+ char *payload; // [the 01 rapresentation of the sentence as taken from input]
+ unsigned short LEN_SENTENCE;
+ unsigned short LEN_PAYLOAD;
+
+ public:
+ Build_Frame_impl(const char *sentence, bool repeat, bool enable_NRZI);
+ ~Build_Frame_impl();
+
+ void dump_buffer(const char *b, int buffer_size);
+ char * int2bin(int a, char *buffer, int buf_size);
+ int stuff (const char *in, char *out, int l_in);
+ void pack (int orig_ascii, char *ret, int bits_per_byte);
+ void nrz_to_nrzi(char *data, int length);
+ void reverse_bit_order(char *data, int length);
+ unsigned long unpack(char *buffer, int start, int length);
+ void compute_crc(char *buffer, char *ret, unsigned int len);
+ void byte_packing(char *input_frame, unsigned char *out_byte, unsigned int len);
+
+ // Where all the action really happens
+ int work(int noutput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items);
+ };
+
+ } // namespace AISTX
+} // namespace gr
+
+#endif /* INCLUDED_AISTX_BUILD_FRAME_IMPL_H */
+
diff --git a/gr-aistx/lib/CMakeLists.txt b/gr-aistx/lib/CMakeLists.txt
new file mode 100644
index 0000000..c520e81
--- /dev/null
+++ b/gr-aistx/lib/CMakeLists.txt
@@ -0,0 +1,67 @@
+# Copyright 2011,2012 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Setup library
+########################################################################
+include(GrPlatform) #define LIB_SUFFIX
+
+include_directories(${Boost_INCLUDE_DIR})
+link_directories(${Boost_LIBRARY_DIRS})
+list(APPEND AISTX_sources
+ nrz_to_nrzi_impl.cc
+ Build_Frame_impl.cc
+ DebugME_impl.cc )
+
+add_library(gnuradio-AISTX SHARED ${AISTX_sources})
+target_link_libraries(gnuradio-AISTX ${Boost_LIBRARIES} ${GRUEL_LIBRARIES} ${GNURADIO_CORE_LIBRARIES})
+set_target_properties(gnuradio-AISTX PROPERTIES DEFINE_SYMBOL "gnuradio_AISTX_EXPORTS")
+
+########################################################################
+# Install built library files
+########################################################################
+install(TARGETS gnuradio-AISTX
+ LIBRARY DESTINATION lib${LIB_SUFFIX} # .so/.dylib file
+ ARCHIVE DESTINATION lib${LIB_SUFFIX} # .lib file
+ RUNTIME DESTINATION bin # .dll file
+)
+
+########################################################################
+# Build and register unit test
+########################################################################
+include(GrTest)
+
+include_directories(${CPPUNIT_INCLUDE_DIRS})
+
+list(APPEND test_AISTX_sources
+ ${CMAKE_CURRENT_SOURCE_DIR}/test_AISTX.cc
+ ${CMAKE_CURRENT_SOURCE_DIR}/qa_AISTX.cc
+)
+
+add_executable(test-AISTX ${test_AISTX_sources})
+
+target_link_libraries(
+ test-AISTX
+ ${GNURADIO_CORE_LIBRARIES}
+ ${Boost_LIBRARIES}
+ ${CPPUNIT_LIBRARIES}
+ gnuradio-AISTX
+)
+
+GR_ADD_TEST(test_AISTX test-AISTX)
diff --git a/gr-aistx/lib/DebugME_impl.cc b/gr-aistx/lib/DebugME_impl.cc
new file mode 100644
index 0000000..9b587be
--- /dev/null
+++ b/gr-aistx/lib/DebugME_impl.cc
@@ -0,0 +1,98 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <gr_io_signature.h>
+#include "DebugME_impl.h"
+#include <stdio.h>
+
+namespace gr {
+ namespace AISTX {
+
+ DebugME::sptr
+ DebugME::make(size_t itemsize)
+ {
+ return gnuradio::get_initial_sptr
+ (new DebugME_impl(itemsize));
+ }
+
+ /*
+ * The private constructor
+ */
+ DebugME_impl::DebugME_impl(size_t itemsize)
+ : gr_block("DebugME",
+ gr_make_io_signature(1, 1, itemsize),
+ gr_make_io_signature(0, 0, 0)),
+ d_itemsize(itemsize)
+ {}
+
+ /*
+ * Our virtual destructor.
+ */
+ DebugME_impl::~DebugME_impl()
+ {
+ }
+
+ void
+ DebugME_impl::forecast (int noutput_items, gr_vector_int &ninput_items_required)
+ {
+ /* <+forecast+> e.g. ninput_items_required[0] = noutput_items */
+ }
+
+ int
+ DebugME_impl::general_work (int noutput_items,
+ gr_vector_int &ninput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items)
+ {
+
+ // char / byte
+ if (d_itemsize == 1) {
+ const unsigned char *in = (const unsigned char *) input_items[0];
+ for(int i = 0; i<ninput_items[0]; ++i)
+ printf ("\\x%.2X", in[i]);
+ }
+ // float
+ else if (d_itemsize == 4) {
+ const float *in = (const float *) input_items[0];
+ for(int i = 0; i<ninput_items[0]; ++i)
+ printf ("\\%.0f", in[i]);
+ }
+ // complex
+ else
+ printf ("Complexes not supported yet!");
+
+ std::cout << std::endl;
+
+ // Do <+signal processing+>
+ // Tell runtime system how many input items we consumed on
+ // each input stream.
+ consume_each (noutput_items);
+
+ // Tell runtime system how many output items we produced.
+ return 0;
+ }
+
+ } /* namespace AISTX */
+} /* namespace gr */
+
diff --git a/gr-aistx/lib/DebugME_impl.h b/gr-aistx/lib/DebugME_impl.h
new file mode 100644
index 0000000..ae973cb
--- /dev/null
+++ b/gr-aistx/lib/DebugME_impl.h
@@ -0,0 +1,51 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef INCLUDED_AISTX_DEBUGME_IMPL_H
+#define INCLUDED_AISTX_DEBUGME_IMPL_H
+
+#include <AISTX/DebugME.h>
+
+namespace gr {
+ namespace AISTX {
+
+ class DebugME_impl : public DebugME
+ {
+ private:
+ size_t d_itemsize;
+
+ public:
+ DebugME_impl(size_t itemsize);
+ ~DebugME_impl();
+
+ // Where all the action really happens
+ void forecast (int noutput_items, gr_vector_int &ninput_items_required);
+
+ int general_work(int noutput_items,
+ gr_vector_int &ninput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items);
+ };
+
+ } // namespace AISTX
+} // namespace gr
+
+#endif /* INCLUDED_AISTX_DEBUGME_IMPL_H */
+
diff --git a/gr-aistx/lib/nrz_to_nrzi_impl.cc b/gr-aistx/lib/nrz_to_nrzi_impl.cc
new file mode 100644
index 0000000..98e7e4d
--- /dev/null
+++ b/gr-aistx/lib/nrz_to_nrzi_impl.cc
@@ -0,0 +1,108 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <gr_io_signature.h>
+#include "nrz_to_nrzi_impl.h"
+#include <stdio.h>
+
+namespace gr {
+ namespace AISTX {
+
+ nrz_to_nrzi::sptr
+ nrz_to_nrzi::make()
+ {
+ return gnuradio::get_initial_sptr
+ (new nrz_to_nrzi_impl());
+ }
+
+ /*
+ * The private constructor
+ */
+ nrz_to_nrzi_impl::nrz_to_nrzi_impl()
+ : gr_block("nrz_to_nrzi",
+ gr_make_io_signature(1, 1, sizeof(unsigned char)),
+ gr_make_io_signature(1, 1, sizeof(unsigned char)))
+ {}
+
+ /*
+ * Our virtual destructor.
+ */
+ nrz_to_nrzi_impl::~nrz_to_nrzi_impl()
+ {
+ }
+
+ void
+ nrz_to_nrzi_impl::forecast (int noutput_items, gr_vector_int &ninput_items_required)
+ {
+ /* <+forecast+> e.g. ninput_items_required[0] = noutput_items */
+ }
+
+ int
+ nrz_to_nrzi_impl::general_work (int noutput_items,
+ gr_vector_int &ninput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items)
+ {
+ const unsigned char *in = (const unsigned char *) input_items[0];
+ unsigned char *out = (unsigned char *) output_items[0];
+ unsigned char nrzi_bit;
+ unsigned char nrz_bit;
+ unsigned char d_prev_nrzi_bit = 0;
+
+ for(int i = 0;i<noutput_items;++i)
+ printf("%d", in[i]);
+ printf("\n");
+
+ for (int i = 0; i < noutput_items; i++)
+ {
+ nrz_bit = in[i];
+
+ if(nrz_bit == 0)
+ {
+ nrzi_bit = d_prev_nrzi_bit ^ 1;
+ }
+ else
+ {
+ nrzi_bit = d_prev_nrzi_bit;
+ }
+ out[i] = nrzi_bit;
+ d_prev_nrzi_bit = nrzi_bit;
+
+ }
+
+ for(int i = 0;i<noutput_items;++i)
+ printf("%d", out[i]);
+ printf("\n");
+
+ // Tell runtime system how many input items we consumed on
+ // each input stream.
+ consume_each (noutput_items);
+
+ // Tell runtime system how many output items we produced.
+ return noutput_items;
+ }
+
+ } /* namespace AISTX */
+} /* namespace gr */
+
diff --git a/gr-aistx/lib/nrz_to_nrzi_impl.h b/gr-aistx/lib/nrz_to_nrzi_impl.h
new file mode 100644
index 0000000..6484f53
--- /dev/null
+++ b/gr-aistx/lib/nrz_to_nrzi_impl.h
@@ -0,0 +1,51 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2013 <+YOU OR YOUR COMPANY+>.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef INCLUDED_AISTX_NRZ_TO_NRZI_IMPL_H
+#define INCLUDED_AISTX_NRZ_TO_NRZI_IMPL_H
+
+#include <AISTX/nrz_to_nrzi.h>
+
+namespace gr {
+ namespace AISTX {
+
+ class nrz_to_nrzi_impl : public nrz_to_nrzi
+ {
+ private:
+ // Nothing to declare in this block.
+
+ public:
+ nrz_to_nrzi_impl();
+ ~nrz_to_nrzi_impl();
+
+ // Where all the action really happens
+ void forecast (int noutput_items, gr_vector_int &ninput_items_required);
+
+ int general_work(int noutput_items,
+ gr_vector_int &ninput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items);
+ };
+
+ } // namespace AISTX
+} // namespace gr
+
+#endif /* INCLUDED_AISTX_NRZ_TO_NRZI_IMPL_H */
+
diff --git a/gr-aistx/lib/qa_AISTX.cc b/gr-aistx/lib/qa_AISTX.cc
new file mode 100644
index 0000000..1a101f1
--- /dev/null
+++ b/gr-aistx/lib/qa_AISTX.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * This class gathers together all the test cases for the gr-filter
+ * directory into a single test suite. As you create new test cases,
+ * add them here.
+ */
+
+#include "qa_AISTX.h"
+
+CppUnit::TestSuite *
+qa_AISTX::suite()
+{
+ CppUnit::TestSuite *s = new CppUnit::TestSuite("AISTX");
+
+ return s;
+}
diff --git a/gr-aistx/lib/qa_AISTX.h b/gr-aistx/lib/qa_AISTX.h
new file mode 100644
index 0000000..7cdf000
--- /dev/null
+++ b/gr-aistx/lib/qa_AISTX.h
@@ -0,0 +1,38 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2012 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _QA_AISTX_H_
+#define _QA_AISTX_H_
+
+#include <gruel/attributes.h>
+#include <cppunit/TestSuite.h>
+
+//! collect all the tests for the gr-filter directory
+
+class __GR_ATTR_EXPORT qa_AISTX
+{
+ public:
+ //! return suite of tests for all of gr-filter directory
+ static CppUnit::TestSuite *suite();
+};
+
+#endif /* _QA_AISTX_H_ */
diff --git a/gr-aistx/lib/test_AISTX.cc b/gr-aistx/lib/test_AISTX.cc
new file mode 100644
index 0000000..53ce43a
--- /dev/null
+++ b/gr-aistx/lib/test_AISTX.cc
@@ -0,0 +1,43 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2012 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include <cppunit/TextTestRunner.h>
+#include <cppunit/XmlOutputter.h>
+
+#include <gr_unittests.h>
+#include "qa_AISTX.h"
+#include <iostream>
+
+int
+main (int argc, char **argv)
+{
+ CppUnit::TextTestRunner runner;
+ std::ofstream xmlfile(get_unittest_path("AISTX.xml").c_str());
+ CppUnit::XmlOutputter *xmlout = new CppUnit::XmlOutputter(&runner.result(), xmlfile);
+
+ runner.addTest(qa_AISTX::suite());
+ runner.setOutputter(xmlout);
+
+ bool was_successful = runner.run("", false);
+
+ return was_successful ? 0 : 1;
+}
diff --git a/gr-aistx/python/CMakeLists.txt b/gr-aistx/python/CMakeLists.txt
new file mode 100644
index 0000000..b79d4bf
--- /dev/null
+++ b/gr-aistx/python/CMakeLists.txt
@@ -0,0 +1,46 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Include python install macros
+########################################################################
+include(GrPython)
+if(NOT PYTHONINTERP_FOUND)
+ return()
+endif()
+
+########################################################################
+# Install python sources
+########################################################################
+GR_PYTHON_INSTALL(
+ FILES
+ __init__.py
+ DESTINATION ${GR_PYTHON_DIR}/AISTX
+)
+
+########################################################################
+# Handle the unit tests
+########################################################################
+include(GrTest)
+
+set(GR_TEST_TARGET_DEPS gnuradio-AISTX)
+set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig)
+GR_ADD_TEST(qa_nrz_to_nrzi ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_nrz_to_nrzi.py)
+GR_ADD_TEST(qa_Build_Frame ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_Build_Frame.py)
+GR_ADD_TEST(qa_DebugME ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_DebugME.py)
diff --git a/gr-aistx/python/__init__.py b/gr-aistx/python/__init__.py
new file mode 100644
index 0000000..c1d8e3f
--- /dev/null
+++ b/gr-aistx/python/__init__.py
@@ -0,0 +1,54 @@
+#
+# Copyright 2008,2009 Free Software Foundation, Inc.
+#
+# This application is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# This application is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+# The presence of this file turns this directory into a Python package
+
+'''
+This is the GNU Radio AISTX module. Place your Python package
+description here (python/__init__.py).
+'''
+
+# ----------------------------------------------------------------
+# Temporary workaround for ticket:181 (swig+python problem)
+import sys
+_RTLD_GLOBAL = 0
+try:
+ from dl import RTLD_GLOBAL as _RTLD_GLOBAL
+except ImportError:
+ try:
+ from DLFCN import RTLD_GLOBAL as _RTLD_GLOBAL
+ except ImportError:
+ pass
+
+if _RTLD_GLOBAL != 0:
+ _dlopenflags = sys.getdlopenflags()
+ sys.setdlopenflags(_dlopenflags|_RTLD_GLOBAL)
+# ----------------------------------------------------------------
+
+
+# import swig generated symbols into the AISTX namespace
+from AISTX_swig import *
+
+# import any pure python here
+#
+
+# ----------------------------------------------------------------
+# Tail of workaround
+if _RTLD_GLOBAL != 0:
+ sys.setdlopenflags(_dlopenflags) # Restore original flags
+# ----------------------------------------------------------------
diff --git a/gr-aistx/python/qa_Build_Frame.py b/gr-aistx/python/qa_Build_Frame.py
new file mode 100755
index 0000000..8542990
--- /dev/null
+++ b/gr-aistx/python/qa_Build_Frame.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 <+YOU OR YOUR COMPANY+>.
+#
+# This is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+
+from gnuradio import gr, gr_unittest
+import AISTX_swig as AISTX
+
+class qa_Build_Frame (gr_unittest.TestCase):
+
+ def setUp (self):
+ self.tb = gr.top_block ()
+
+ def tearDown (self):
+ self.tb = None
+
+ def test_001_t (self):
+ # set up fg
+ self.tb.run ()
+ # check data
+
+
+if __name__ == '__main__':
+ gr_unittest.run(qa_Build_Frame, "qa_Build_Frame.xml")
diff --git a/gr-aistx/python/qa_DebugME.py b/gr-aistx/python/qa_DebugME.py
new file mode 100755
index 0000000..1781be1
--- /dev/null
+++ b/gr-aistx/python/qa_DebugME.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 <+YOU OR YOUR COMPANY+>.
+#
+# This is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+
+from gnuradio import gr, gr_unittest
+import AISTX_swig as AISTX
+
+class qa_DebugME (gr_unittest.TestCase):
+
+ def setUp (self):
+ self.tb = gr.top_block ()
+
+ def tearDown (self):
+ self.tb = None
+
+ def test_001_t (self):
+ # set up fg
+ self.tb.run ()
+ # check data
+
+
+if __name__ == '__main__':
+ gr_unittest.run(qa_DebugME, "qa_DebugME.xml")
diff --git a/gr-aistx/python/qa_nrz_to_nrzi.py b/gr-aistx/python/qa_nrz_to_nrzi.py
new file mode 100755
index 0000000..3951919
--- /dev/null
+++ b/gr-aistx/python/qa_nrz_to_nrzi.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 <+YOU OR YOUR COMPANY+>.
+#
+# This is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+
+from gnuradio import gr, gr_unittest
+import AISTX_swig as AISTX
+
+class qa_nrz_to_nrzi (gr_unittest.TestCase):
+
+ def setUp (self):
+ self.tb = gr.top_block ()
+
+ def tearDown (self):
+ self.tb = None
+
+ def test_001_t (self):
+ # set up fg
+ self.tb.run ()
+ # check data
+
+
+if __name__ == '__main__':
+ gr_unittest.run(qa_nrz_to_nrzi, "qa_nrz_to_nrzi.xml")
diff --git a/gr-aistx/swig/AISTX_swig.i b/gr-aistx/swig/AISTX_swig.i
new file mode 100644
index 0000000..e3326b3
--- /dev/null
+++ b/gr-aistx/swig/AISTX_swig.i
@@ -0,0 +1,24 @@
+/* -*- c++ -*- */
+
+#define AISTX_API
+
+%include "gnuradio.i" // the common stuff
+
+//load generated python docstrings
+%include "AISTX_swig_doc.i"
+
+%{
+#include "AISTX/nrz_to_nrzi.h"
+#include "AISTX/Build_Frame.h"
+#include "AISTX/DebugME.h"
+%}
+
+
+%include "AISTX/nrz_to_nrzi.h"
+GR_SWIG_BLOCK_MAGIC2(AISTX, nrz_to_nrzi);
+
+%include "AISTX/Build_Frame.h"
+GR_SWIG_BLOCK_MAGIC2(AISTX, Build_Frame);
+
+%include "AISTX/DebugME.h"
+GR_SWIG_BLOCK_MAGIC2(AISTX, DebugME);
diff --git a/gr-aistx/swig/CMakeLists.txt b/gr-aistx/swig/CMakeLists.txt
new file mode 100644
index 0000000..bb491df
--- /dev/null
+++ b/gr-aistx/swig/CMakeLists.txt
@@ -0,0 +1,61 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Include swig generation macros
+########################################################################
+find_package(SWIG)
+find_package(PythonLibs)
+if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND)
+ return()
+endif()
+include(GrSwig)
+include(GrPython)
+
+########################################################################
+# Setup swig generation
+########################################################################
+foreach(incdir ${GNURADIO_CORE_INCLUDE_DIRS})
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${incdir}/swig)
+endforeach(incdir)
+
+foreach(incdir ${GRUEL_INCLUDE_DIRS})
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${incdir}/gruel/swig)
+endforeach(incdir)
+
+set(GR_SWIG_LIBRARIES gnuradio-AISTX)
+set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/AISTX_swig_doc.i)
+set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include)
+
+GR_SWIG_MAKE(AISTX_swig AISTX_swig.i)
+
+########################################################################
+# Install the build swig module
+########################################################################
+GR_SWIG_INSTALL(TARGETS AISTX_swig DESTINATION ${GR_PYTHON_DIR}/AISTX)
+
+########################################################################
+# Install swig .i files for development
+########################################################################
+install(
+ FILES
+ AISTX_swig.i
+ ${CMAKE_CURRENT_BINARY_DIR}/AISTX_swig_doc.i
+ DESTINATION ${GR_INCLUDE_DIR}/AISTX/swig
+)
diff --git a/unpacker.c b/unpacker.c
new file mode 100644
index 0000000..9d0e14b
--- /dev/null
+++ b/unpacker.c
@@ -0,0 +1,86 @@
+/*
+
+This source code is part of the AIS BlackToolkit.
+Unpacker.c allows you to build a NMEA sentece out of its payload. Normally used in combination with AIVDM_Encoder.
+
+Copyright 2013-2014 -- Embyte & Pastus
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2
+of the License, or (at your option) any later version.
+
+Usage example:
+$ ./AIVDM_Encoder.py --type=1 --vsize=30x10 | xargs -IA ./unpacker A 1 A
+
+*/
+
+
+#include <sstream>
+#include <ctype.h>
+#include <stdio.h>
+#include <iostream>
+#include <iomanip>
+#include <string.h>
+
+unsigned long unpack(char *buffer, int start, int length)
+{
+ unsigned long ret = 0;
+ for(int i = start; i < (start+length); i++) {
+ ret <<= 1;
+ ret |= (buffer[i] & 0x01);
+ }
+ return ret;
+}
+
+char nmea_checksum(std::string buffer)
+{
+ unsigned int i = 0;
+ char sum = 0x00;
+ if(buffer[0] == '!') i++;
+ for(; i < buffer.length(); i++) sum ^= buffer[i];
+ return sum;
+}
+
+
+int main(int argc, char *argv[]) {
+
+
+ if (argc<4) {
+ std::cout << "\nUsage: ./unpacker binary_string enable_nmea channel\n";
+ std::cout << "\nenable_nmea = 1 for full NMEA sentence, otherwise only payload is printed\n";
+ std::cout << "channel = A/B\n\n";
+ return 0;
+ }
+
+ int i;
+ char asciidata[255];
+
+ int len = strlen(argv[1]);
+
+ int enable_nmea = (int) *argv[2] - 48;
+ char channel = *argv[3];
+ std::ostringstream d_payload;
+ d_payload.str("");
+
+ for(i = 0; i < len/6; i++) {
+ asciidata[i] = unpack(argv[1], i*6, 6);
+ if(asciidata[i] > 39) asciidata[i] += 8;
+ asciidata[i] += 48;
+ }
+
+ if (enable_nmea)
+ d_payload << "!AIVDM,1,1,," << channel << ",";
+
+ for(int i = 0; i < len/6; i++)
+ d_payload << asciidata[i];
+
+ if (enable_nmea) {
+ d_payload << ",0"; //number of bits to fill out 6-bit boundary
+ char checksum = nmea_checksum(std::string(d_payload.str()));
+ d_payload << "*" << std::setw(2) << std::setfill('0') << std::hex << std::uppercase << int(checksum);
+ }
+
+ std::cout << std::string(d_payload.str()) << std::endl;
+ return 1;
+}