summaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
authorMicah Anderson <micah@riseup.net>2014-08-11 16:33:29 -0400
committerMicah Anderson <micah@riseup.net>2014-08-11 16:33:29 -0400
commitcce638a8adf4e045ca5505afea4bda57753c31dd (patch)
treeb5e139d3359ac5b8c7b1afa8acbb1b5b6051c626 /docs
initial import of debian package
Diffstat (limited to 'docs')
-rw-r--r--docs/Makefile117
-rwxr-xr-xdocs/autogen_api.py48
-rw-r--r--docs/source/_static/default.css553
-rw-r--r--docs/source/_static/logo.pngbin0 -> 8686 bytes
-rw-r--r--docs/source/_static/zeromq.icobin0 -> 1150 bytes
-rw-r--r--docs/source/_templates/layout.html23
-rw-r--r--docs/source/api/index.rst25
-rw-r--r--docs/source/api/zmq.auth.ioloop.rst20
-rw-r--r--docs/source/api/zmq.auth.rst30
-rw-r--r--docs/source/api/zmq.auth.thread.rst24
-rw-r--r--docs/source/api/zmq.devices.rst86
-rw-r--r--docs/source/api/zmq.green.rst7
-rw-r--r--docs/source/api/zmq.rst140
-rw-r--r--docs/source/changelog.rst446
-rw-r--r--docs/source/conf.py209
-rw-r--r--docs/source/devices.rst82
-rw-r--r--docs/source/eventloop.rst197
-rw-r--r--docs/source/index.rst95
-rw-r--r--docs/source/logging.rst84
-rw-r--r--docs/source/morethanbindings.rst166
-rw-r--r--docs/source/pyversions.rst195
-rw-r--r--docs/source/serialization.rst90
-rw-r--r--docs/source/ssh.rst80
-rw-r--r--docs/source/unicode.rst188
-rw-r--r--docs/sphinxext/apigen.py441
-rw-r--r--docs/sphinxext/docscrape.py497
-rw-r--r--docs/sphinxext/docscrape_sphinx.py136
-rw-r--r--docs/sphinxext/inheritance_diagram.py407
-rw-r--r--docs/sphinxext/ipython_console_highlighting.py114
-rw-r--r--docs/sphinxext/numpydoc.py116
-rw-r--r--docs/sphinxext/sphinx_cython.py115
-rwxr-xr-xdocs/update_ghpages.sh33
32 files changed, 4764 insertions, 0 deletions
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..87988f8
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,117 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = build
+SRCDIR = source
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SRCDIR)
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+default: html
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+ -rm -rf $(SRCDIR)/api/generated/*
+ -rm -rf dist
+ -rm -rf gh-pages
+
+api: $(SRCDIR)/api/generated/gen.rst
+
+$(SRCDIR)/api/generated/gen.rst:
+ python autogen_api.py
+ @echo "Build API docs finished."
+
+dist: all
+ mkdir -p dist
+ rm -rf dist/*
+ ln $(BUILDDIR)/latex/PyZMQ.pdf dist/
+ cp -a $(BUILDDIR)/html dist/
+ @echo "Build finished. Final docs are in dist/"
+
+pdf: latex
+ cd $(BUILDDIR)/latex && make all-pdf
+
+all: html pdf
+
+html: api
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyZMQ.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyZMQ.qhc"
+
+latex: api
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+gh-pages: html
+ sh update_ghpages.sh
+
diff --git a/docs/autogen_api.py b/docs/autogen_api.py
new file mode 100755
index 0000000..004c461
--- /dev/null
+++ b/docs/autogen_api.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+"""Script to auto-generate our API docs.
+"""
+# stdlib imports
+import os
+import sys
+
+# local imports
+sys.path.append(os.path.abspath('sphinxext'))
+# import sphinx_cython
+from apigen import ApiDocWriter
+
+#*****************************************************************************
+if __name__ == '__main__':
+ pjoin = os.path.join
+ package = 'zmq'
+ outdir = pjoin('source','api','generated')
+ docwriter = ApiDocWriter(package,rst_extension='.rst')
+ # You have to escape the . here because . is a special char for regexps.
+ # You must do make clean if you change this!
+ docwriter.package_skip_patterns += [
+ r'\.tests$',
+ r'\.backend$',
+ r'\.auth$',
+ r'\.eventloop\.minitornado$',
+ r'\.green\.eventloop$',
+ r'\.sugar$',
+ r'\.devices$',
+ ]
+
+ docwriter.module_skip_patterns += [
+ r'\.eventloop\.stack_context$',
+ r'\.error$',
+ r'\.green\..+$',
+ r'\.utils\.initthreads$',
+ r'\.utils\.constant_names$',
+ r'\.utils\.garbage$',
+ r'\.utils\.rebuffer$',
+ r'\.utils\.strtypes$',
+ ]
+
+ # Now, generate the outputs
+ docwriter.write_api_docs(outdir)
+ docwriter.write_index(outdir, 'gen',
+ relative_to = pjoin('source','api')
+ )
+
+ print('%d files written' % len(docwriter.written_modules))
diff --git a/docs/source/_static/default.css b/docs/source/_static/default.css
new file mode 100644
index 0000000..71c9d5e
--- /dev/null
+++ b/docs/source/_static/default.css
@@ -0,0 +1,553 @@
+/**
+ * Alternate Sphinx design
+ * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl.
+ */
+
+body {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
+ font-size: 14px;
+ letter-spacing: -0.01em;
+ line-height: 150%;
+ text-align: center;
+ /*background-color: #AFC1C4; */
+ background-color: #BFD1D4;
+ color: black;
+ padding: 0;
+ border: 1px solid #aaa;
+
+ margin: 0px 80px 0px 80px;
+ min-width: 740px;
+}
+
+a {
+ color: #CA7900;
+ text-decoration: none;
+}
+
+a:hover {
+ color: #2491CF;
+}
+
+pre {
+ font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.015em;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ background-color: #f8f8f8;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+ border: 0;
+ background-color: transparent;
+ color: #aaa;
+}
+
+table.highlighttable {
+ margin-left: 0.5em;
+}
+
+table.highlighttable td {
+ padding: 0 0.5em 0 0.5em;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.01em;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border-bottom: 1px solid #ddd;
+ color: #333;
+}
+
+tt.descname {
+ background-color: transparent;
+ font-weight: bold;
+ font-size: 1.2em;
+ border: 0;
+}
+
+tt.descclassname {
+ background-color: transparent;
+ border: 0;
+}
+
+tt.xref {
+ background-color: transparent;
+ font-weight: bold;
+ border: 0;
+}
+
+a tt {
+ background-color: transparent;
+ font-weight: bold;
+ border: 0;
+ color: #CA7900;
+}
+
+a tt:hover {
+ color: #2491CF;
+}
+
+dl {
+ margin-bottom: 15px;
+}
+
+dd p {
+ margin-top: 0px;
+}
+
+dd ul, dd table {
+ margin-bottom: 10px;
+}
+
+dd {
+ margin-top: 3px;
+ margin-bottom: 10px;
+ margin-left: 30px;
+}
+
+.refcount {
+ color: #060;
+}
+
+dt:target,
+.highlight {
+ background-color: #fbe54e;
+}
+
+dl.class, dl.function {
+ border-top: 2px solid #888;
+}
+
+dl.method, dl.attribute {
+ border-top: 1px solid #aaa;
+}
+
+dl.glossary dt {
+ font-weight: bold;
+ font-size: 1.1em;
+}
+
+pre {
+ line-height: 120%;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+.first {
+ margin-top: 0 !important;
+}
+
+div.document {
+ background-color: white;
+ text-align: left;
+ background-image: url(contents.png);
+ background-repeat: repeat-x;
+}
+
+/*
+div.documentwrapper {
+ width: 100%;
+}
+*/
+
+div.clearer {
+ clear: both;
+}
+
+div.related h3 {
+ display: none;
+}
+
+div.related ul {
+ background-image: url(navigation.png);
+ height: 2em;
+ list-style: none;
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+ margin: 0;
+ padding-left: 10px;
+}
+
+div.related ul li {
+ margin: 0;
+ padding: 0;
+ height: 2em;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: #EE9816;
+}
+
+div.related ul li a:hover {
+ color: #3CA8E7;
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.bodywrapper {
+ margin: 0 240px 0 0;
+ border-right: 1px solid #ccc;
+}
+
+div.body a {
+ text-decoration: underline;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0.5em 15px 15px 0;
+ width: 210px;
+ float: right;
+ text-align: left;
+/* margin-left: -100%; */
+}
+
+div.sphinxsidebar h4, div.sphinxsidebar h3 {
+ margin: 1em 0 0.5em 0;
+ font-size: 0.9em;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border: 1px solid #86989B;
+ background-color: #AFC1C4;
+}
+
+div.sphinxsidebar ul {
+ padding-left: 1.5em;
+ margin-top: 7px;
+ list-style: none;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ list-style: square;
+ margin-left: 20px;
+}
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+p.rubric {
+ font-weight: bold;
+}
+
+h1 {
+ margin: 0;
+ padding: 0.7em 0 0.3em 0;
+ font-size: 1.5em;
+ color: #11557C;
+}
+
+h2 {
+ margin: 1.3em 0 0.2em 0;
+ font-size: 1.35em;
+ padding: 0;
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.2em;
+}
+
+h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
+ color: black!important;
+}
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+ display: none;
+ margin: 0 0 0 0.3em;
+ padding: 0 0.2em 0 0.2em;
+ color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+ display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+ color: #777;
+ background-color: #eee;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.footer {
+ background-color: #E3EFF1;
+ color: #86989B;
+ padding: 3px 8px 3px 0;
+ clear: both;
+ font-size: 0.8em;
+ text-align: right;
+}
+
+div.footer a {
+ color: #86989B;
+ text-decoration: underline;
+}
+
+div.pagination {
+ margin-top: 2em;
+ padding-top: 0.5em;
+ border-top: 1px solid black;
+ text-align: center;
+}
+
+div.sphinxsidebar ul.toc {
+ margin: 1em 0 1em 0;
+ padding: 0 0 0 0.5em;
+ list-style: none;
+}
+
+div.sphinxsidebar ul.toc li {
+ margin: 0.5em 0 0.5em 0;
+ font-size: 0.9em;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul.toc li p {
+ margin: 0;
+ padding: 0;
+}
+
+div.sphinxsidebar ul.toc ul {
+ margin: 0.2em 0 0.2em 0;
+ padding: 0 0 0 1.8em;
+}
+
+div.sphinxsidebar ul.toc ul li {
+ padding: 0;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 0 0;
+ border: 1px solid #86989B;
+ background-color: #f7f7f7;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ margin: 0;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border-bottom: 1px solid #86989B;
+ font-weight: bold;
+ background-color: #AFC1C4;
+}
+
+div.warning {
+ border: 1px solid #940000;
+}
+
+div.warning p.admonition-title {
+ background-color: #CF0000;
+ border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+div.versioninfo {
+ margin: 1em 0 0 0;
+ border: 1px solid #ccc;
+ background-color: #DDEAF0;
+ padding: 8px;
+ line-height: 1.3em;
+ font-size: 0.9em;
+}
+
+
+a.headerlink {
+ color: #c60f0f!important;
+ font-size: 1em;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none!important;
+ visibility: hidden;
+}
+
+h1:hover > a.headerlink,
+h2:hover > a.headerlink,
+h3:hover > a.headerlink,
+h4:hover > a.headerlink,
+h5:hover > a.headerlink,
+h6:hover > a.headerlink,
+dt:hover > a.headerlink {
+ visibility: visible;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+table.indextable td {
+ text-align: left;
+ vertical-align: top;
+}
+
+table.indextable dl, table.indextable dd {
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+table.indextable tr.pcap {
+ height: 10px;
+}
+
+table.indextable tr.cap {
+ margin-top: 10px;
+ background-color: #f2f2f2;
+}
+
+img.toggler {
+ margin-right: 3px;
+ margin-top: 3px;
+ cursor: pointer;
+}
+
+img.inheritance {
+ border: 0px
+}
+
+form.pfform {
+ margin: 10px 0 20px 0;
+}
+
+table.contentstable {
+ width: 90%;
+}
+
+table.contentstable p.biglink {
+ line-height: 150%;
+}
+
+a.biglink {
+ font-size: 1.3em;
+}
+
+span.linkdescr {
+ font-style: italic;
+ padding-top: 5px;
+ font-size: 90%;
+}
+
+ul.search {
+ margin: 10px 0 0 20px;
+ padding: 0;
+}
+
+ul.search li {
+ padding: 5px 0 5px 20px;
+ background-image: url(file.png);
+ background-repeat: no-repeat;
+ background-position: 0 7px;
+}
+
+ul.search li a {
+ font-weight: bold;
+}
+
+ul.search li div.context {
+ color: #888;
+ margin: 2px 0 0 30px;
+ text-align: left;
+}
+
+ul.keywordmatches li.goodmatch a {
+ font-weight: bold;
+}
+/* from numpy:*/
+td.field-body > blockquote {
+ margin-top: 0.1em;
+ margin-bottom: 0.5em;
+}
+/* spacing in see also definition lists */
+dl.last > dd {
+ margin-top: 1px;
+ margin-bottom: 5px;
+ margin-left: 30px;
+}
+
+.field-list th {
+ color: rgb(0,102,204);
+ background: #eee;
+}
+
+table.field-list th {
+/* border-left: 1px solid #aaa !important;*/
+ padding-left: 5px;
+ white-space: nowrap;
+}
+
+table.field-list {
+ border-collapse: separate;
+ border-spacing: 10px;
+}
+
+th.field-name {
+/* border-left: 1px solid #aaa !important;*/
+padding-left: .5em;
+padding-right: .5em;
+text-align: right;
+
+}
+
+.rubric {
+ color: rgb(0,50,150);
+ background-color: ;
+ font-style: italic;
+}
+
+.versionadded {
+ font-style: italic;
+ background: #ffe;
+} \ No newline at end of file
diff --git a/docs/source/_static/logo.png b/docs/source/_static/logo.png
new file mode 100644
index 0000000..c3e8dab
--- /dev/null
+++ b/docs/source/_static/logo.png
Binary files differ
diff --git a/docs/source/_static/zeromq.ico b/docs/source/_static/zeromq.ico
new file mode 100644
index 0000000..3f810a1
--- /dev/null
+++ b/docs/source/_static/zeromq.ico
Binary files differ
diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html
new file mode 100644
index 0000000..a3eebdb
--- /dev/null
+++ b/docs/source/_templates/layout.html
@@ -0,0 +1,23 @@
+{% extends "!layout.html" %}
+
+
+{% block rootrellink %}
+ <li><a href="{{ pathto('index') }}">home</a>|&nbsp;</li>
+ <li><a href="{{ pathto('search') }}">search</a>|&nbsp;</li>
+ <li><a href="{{ pathto('api/index') }}">API</a> &raquo;</li>
+{% endblock %}
+
+
+{% block relbar1 %}
+
+<div style="background-color: white; text-align: left; padding: 10px 10px 15px 15px">
+<a href="{{ pathto('index') }}"><img src="{{
+pathto("_static/logo.png", 1) }}" border="0" alt="PyZMQ Documentation"/></a>
+</div>
+{{ super() }}
+{% endblock %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
+
diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst
new file mode 100644
index 0000000..6b51f0c
--- /dev/null
+++ b/docs/source/api/index.rst
@@ -0,0 +1,25 @@
+.. _api-index:
+
+###################
+ The PyZMQ API
+###################
+
+:Release: |release|
+:Date: |today|
+
+.. toctree::
+
+ zmq
+ zmq.devices
+ zmq.green
+ generated/zmq.eventloop.ioloop
+ generated/zmq.eventloop.zmqstream
+ zmq.auth
+ zmq.auth.thread
+ zmq.auth.ioloop
+ generated/zmq.log.handlers
+ generated/zmq.ssh.tunnel
+ generated/zmq.utils.jsonapi
+ generated/zmq.utils.monitor
+ generated/zmq.utils.z85
+
diff --git a/docs/source/api/zmq.auth.ioloop.rst b/docs/source/api/zmq.auth.ioloop.rst
new file mode 100644
index 0000000..efcb026
--- /dev/null
+++ b/docs/source/api/zmq.auth.ioloop.rst
@@ -0,0 +1,20 @@
+.. AUTO-GENERATED FILE -- DO NOT EDIT!
+
+auth.ioloop
+===========
+
+Module: :mod:`auth.ioloop`
+--------------------------
+.. automodule:: zmq.auth.ioloop
+
+.. currentmodule:: zmq.auth.ioloop
+
+:class:`IOLoopAuthenticator`
+----------------------------
+
+
+.. autoclass:: IOLoopAuthenticator
+ :members:
+ :undoc-members:
+ :inherited-members:
+
diff --git a/docs/source/api/zmq.auth.rst b/docs/source/api/zmq.auth.rst
new file mode 100644
index 0000000..6ca29d7
--- /dev/null
+++ b/docs/source/api/zmq.auth.rst
@@ -0,0 +1,30 @@
+auth
+====
+
+Module: :mod:`auth`
+-------------------
+.. automodule:: zmq.auth
+
+.. currentmodule:: zmq.auth
+
+
+:class:`Authenticator`
+----------------------
+
+
+.. autoclass:: Authenticator
+ :members:
+ :undoc-members:
+ :inherited-members:
+
+
+Functions
+---------
+
+
+.. autofunction:: create_certificates
+
+.. autofunction:: load_certificate
+
+.. autofunction:: load_certificates
+
diff --git a/docs/source/api/zmq.auth.thread.rst b/docs/source/api/zmq.auth.thread.rst
new file mode 100644
index 0000000..e2c0b1b
--- /dev/null
+++ b/docs/source/api/zmq.auth.thread.rst
@@ -0,0 +1,24 @@
+.. AUTO-GENERATED FILE -- DO NOT EDIT!
+
+auth.thread
+===========
+
+Module: :mod:`auth.thread`
+--------------------------
+.. automodule:: zmq.auth.thread
+
+.. currentmodule:: zmq.auth.thread
+
+Classes
+-------
+
+
+:class:`ThreadAuthenticator`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+.. autoclass:: ThreadAuthenticator
+ :members:
+ :undoc-members:
+ :inherited-members:
+
diff --git a/docs/source/api/zmq.devices.rst b/docs/source/api/zmq.devices.rst
new file mode 100644
index 0000000..690b0b5
--- /dev/null
+++ b/docs/source/api/zmq.devices.rst
@@ -0,0 +1,86 @@
+devices
+=======
+
+Functions
+---------
+
+.. autofunction:: zmq.device
+
+.. autofunction:: zmq.proxy
+
+Module: :mod:`zmq.devices`
+--------------------------
+.. automodule:: zmq.devices
+
+.. currentmodule:: zmq.devices
+
+
+Base Devices
+------------
+
+:class:`Device`
+***************
+
+.. autoclass:: Device
+ :members:
+ :exclude-members: context_factory, run, run_device
+
+:class:`ThreadDevice`
+*********************
+
+.. autoclass:: ThreadDevice
+ :members:
+
+:class:`ProcessDevice`
+**********************
+
+.. autoclass:: ProcessDevice
+ :members:
+
+
+Proxy Devices
+-------------
+
+:class:`Proxy`
+********************
+
+.. autoclass:: Proxy
+ :members: bind_mon, connect_mon, setsockopt_mon
+
+:class:`ThreadProxy`
+********************
+
+.. autoclass:: ThreadProxy
+ :members:
+
+:class:`ProcessProxy`
+*********************
+
+.. autoclass:: ProcessProxy
+ :members:
+
+
+MonitoredQueue Devices
+----------------------
+
+.. autofunction:: zmq.devices.monitored_queue
+
+:class:`MonitoredQueue`
+*****************************
+
+.. autoclass:: MonitoredQueue
+ :members:
+
+:class:`ThreadMonitoredQueue`
+*****************************
+
+.. autoclass:: ThreadMonitoredQueue
+ :members:
+
+:class:`ProcessMonitoredQueue`
+******************************
+
+.. autoclass:: ProcessMonitoredQueue
+ :members:
+
+
diff --git a/docs/source/api/zmq.green.rst b/docs/source/api/zmq.green.rst
new file mode 100644
index 0000000..6d0667a
--- /dev/null
+++ b/docs/source/api/zmq.green.rst
@@ -0,0 +1,7 @@
+green
+=====
+
+Module: :mod:`green`
+--------------------
+
+.. automodule:: zmq.green
diff --git a/docs/source/api/zmq.rst b/docs/source/api/zmq.rst
new file mode 100644
index 0000000..0a0f7fe
--- /dev/null
+++ b/docs/source/api/zmq.rst
@@ -0,0 +1,140 @@
+zmq
+===
+
+.. automodule:: zmq
+
+.. currentmodule:: zmq
+
+Basic Classes
+-------------
+
+:class:`Context`
+****************
+
+
+.. autoclass:: Context
+ :members:
+ :undoc-members:
+ :inherited-members:
+ :exclude-members: sockopts, closed
+
+ .. attribute:: closed
+
+ boolean - whether the context has been terminated.
+ If True, you can no longer use this Context.
+
+
+:class:`Socket`
+***************
+
+
+.. autoclass:: Socket
+ :members:
+ :undoc-members:
+ :inherited-members:
+ :exclude-members: closed, context, getsockopt_unicode, recv_unicode, setsockopt_unicode, send_unicode
+
+ .. attribute:: closed
+
+ boolean - whether the socket has been closed.
+ If True, you can no longer use this Socket.
+
+
+
+:class:`Frame`
+**************
+
+
+.. autoclass:: Frame
+ :members:
+ :inherited-members:
+
+
+:class:`MessageTracker`
+***********************
+
+
+.. autoclass:: MessageTracker
+ :members:
+ :inherited-members:
+
+
+Polling
+-------
+
+:class:`Poller`
+***************
+
+.. autoclass:: Poller
+ :members:
+ :inherited-members:
+
+
+.. autofunction:: zmq.select
+
+
+Exceptions
+----------
+
+:class:`ZMQError`
+*****************
+
+.. autoclass:: ZMQError
+ :members:
+ :inherited-members:
+
+
+:class:`ZMQVersionError`
+*****************
+
+.. autoclass:: ZMQVersionError
+ :members:
+ :inherited-members:
+
+:class:`Again`
+**************
+
+
+.. autoclass:: Again
+
+
+:class:`ContextTerminated`
+**************************
+
+
+.. autoclass:: ContextTerminated
+
+
+:class:`NotDone`
+****************
+
+
+.. autoclass:: NotDone
+
+
+:class:`ZMQBindError`
+*********************
+
+
+.. autoclass:: ZMQBindError
+
+
+
+Functions
+---------
+
+.. autofunction:: zmq.zmq_version
+
+.. autofunction:: zmq.pyzmq_version
+
+.. autofunction:: zmq.zmq_version_info
+
+.. autofunction:: zmq.pyzmq_version_info
+
+.. autofunction:: zmq.device
+
+.. autofunction:: zmq.proxy
+
+.. autofunction:: zmq.curve_keypair
+
+.. autofunction:: zmq.get_includes \ No newline at end of file
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
new file mode 100644
index 0000000..8af0652
--- /dev/null
+++ b/docs/source/changelog.rst
@@ -0,0 +1,446 @@
+.. PyZMQ changelog summary, started by Min Ragan-Kelley, 2011
+
+.. _changelog:
+
+================
+Changes in PyZMQ
+================
+
+This is a coarse summary of changes in pyzmq versions. For a real changelog, consult the
+`git log <https://github.com/zeromq/pyzmq/commits>`_
+
+14.3.1
+======
+
+Minor bugfixes to pyzmq 14.3:
+
+- Fixes to building bundled libzmq on OS X < 10.9
+- Fixes to import-failure warnings on Python 3.4
+- Fixes to tests
+- Pull upstream fixes to zmq.ssh for ssh multiplexing
+
+14.3.0
+======
+
+- PyZMQ no longer calls :meth:`.Socket.close` or :meth:`.Context.term` during process cleanup.
+ Changes to garbage collection in Python 3.4 make this impossible to do sensibly.
+- :meth:`ZMQStream.close` closes its socket immediately, rather than scheduling a timeout.
+- Raise the original ImportError when importing zmq fails.
+ Should be more informative than `no module cffi...`.
+
+.. warning::
+
+ Users of Python 3.4 should not use pyzmq < 14.3, due to changes in garbage collection.
+
+
+14.2.0
+======
+
+New Stuff
+---------
+
+- Raise new ZMQVersionError when a requested method is not supported by the linked libzmq.
+ For backward compatibility, this subclasses NotImplementedError.
+
+
+Bugs Fixed
+----------
+
+- Memory leak introduced in pyzmq-14.0 in zero copy.
+- OverflowError on 32 bit systems in zero copy.
+
+
+14.1.0
+======
+
+Security
+--------
+
+The headline features for 14.1 are adding better support for libzmq's
+security features.
+
+- When libzmq is bundled as a Python extension (e.g. wheels, eggs),
+ libsodium is also bundled (excluding Windows),
+ ensuring that libzmq security is available to users who install from wheels
+- New :mod:`zmq.auth`, implementing zeromq's ZAP authentication,
+ modeled on czmq zauth.
+ For more information, see the `examples <https://github.com/zeromq/pyzmq/tree/master/examples/>`_.
+
+
+Other New Stuff
+---------------
+
+- Add PYZMQ_BACKEND for enabling use of backends outside the pyzmq codebase.
+- Add :attr:`~.Context.underlying` property and :meth:`~.Context.shadow`
+ method to Context and Socket, for handing off sockets and contexts.
+ between pyzmq and other bindings (mainly pyczmq_).
+- Add TOS, ROUTER_HANDOVER, and IPC_FILTER constants from libzmq-4.1-dev.
+- Add Context option support in the CFFI backend.
+- Various small unicode and build fixes, as always.
+- :meth:`~.Socket.send_json` and :meth:`~.Socket.recv_json` pass any extra kwargs to ``json.dumps/loads``.
+
+
+.. _pyczmq: https://github.com/zeromq/pyczmq
+
+
+Deprecations
+------------
+
+- ``Socket.socket_type`` is deprecated, in favor of ``Socket.type``,
+ which has been available since 2.1.
+
+
+14.0.1
+======
+
+Bugfix release
+
+- Update bundled libzmq to current (4.0.3).
+- Fix bug in :meth:`.Context.destroy` with no open sockets.
+- Threadsafety fixes in the garbage collector.
+- Python 3 fixes in :mod:`zmq.ssh`.
+
+
+14.0.0
+======
+
+* Update bundled libzmq to current (4.0.1).
+* Backends are now implemented in ``zmq.backend`` instead of ``zmq.core``.
+ This has no effect on public APIs.
+* Various build improvements for Cython and CFFI backends (PyPy compiles at build time).
+* Various GIL-related performance improvements - the GIL is no longer touched from a zmq IO thread.
+* Adding a constant should now be a bit easier - only zmq/sugar/constant_names should need updating,
+ all other constant-related files should be automatically updated by ``setup.py constants``.
+* add support for latest libzmq-4.0.1
+ (includes ZMQ_CURVE security and socket event monitoring).
+
+New stuff
+---------
+
+- :meth:`.Socket.monitor`
+- :meth:`.Socket.get_monitor_socket`
+- :func:`zmq.curve_keypair`
+- :mod:`zmq.utils.monitor`
+- :mod:`zmq.utils.z85`
+
+
+13.1.0
+======
+
+The main new feature is improved tornado 3 compatibility.
+PyZMQ ships a 'minitornado' submodule, which contains a small subset of tornado 3.0.1,
+in order to get the IOLoop base class. zmq.eventloop.ioloop.IOLoop is now a simple subclass,
+and if the system tornado is ≥ 3.0, then the zmq IOLoop is a proper registered subclass
+of the tornado one itself, and minitornado is entirely unused.
+
+13.0.2
+======
+
+Bugfix release!
+
+A few things were broken in 13.0.0, so this is a quick bugfix release.
+
+* **FIXED** EAGAIN was unconditionally turned into KeyboardInterrupt
+* **FIXED** we used totally deprecated ctypes_configure to generate constants in CFFI backend
+* **FIXED** memory leak in CFFI backend for PyPy
+* **FIXED** typo prevented IPC_PATH_MAX_LEN from ever being defined
+* **FIXED** various build fixes - linking with librt, Cython compatibility, etc.
+
+13.0.1
+======
+
+defunct bugfix. We do not speak of this...
+
+13.0.0
+======
+
+PyZMQ now officially targets libzmq-3 (3.2.2),
+0MQ ≥ 2.1.4 is still supported for the indefinite future, but 3.x is recommended.
+PyZMQ has detached from libzmq versioning,
+and will just follow its own regular versioning scheme from now on.
+PyZMQ bdists will include whatever is the latest stable libzmq release (3.2.2 for pyzmq-13.0).
+
+.. note::
+
+ set/get methods are exposed via get/setattr on all Context, Socket, and Frame classes.
+ This means that subclasses of these classes that require extra attributes
+ **must declare these attributes at the class level**.
+
+Experiments Removed
+-------------------
+
+* The Threadsafe ZMQStream experiment in 2.2.0.1 was deemed inappropriate and not useful,
+ and has been removed.
+* The :mod:`zmq.web` experiment has been removed,
+ to be developed as a `standalone project <https://github.com/ellisonbg/zmqweb>`_.
+
+New Stuff
+---------
+
+* Support for PyPy via CFFI backend (requires py, ctypes-configure, and cffi).
+* Add support for new APIs in libzmq-3
+
+ - :meth:`.Socket.disconnect`
+ - :meth:`.Socket.unbind`
+ - :meth:`.Context.set`
+ - :meth:`.Context.get`
+ - :meth:`.Frame.set`
+ - :meth:`.Frame.get`
+ - :func:`zmq.proxy`
+ - :class:`zmq.devices.Proxy`
+ - Exceptions for common zmq errnos: :class:`zmq.Again`, :class:`zmq.ContextTerminated`
+ (subclass :class:`ZMQError`, so fully backward-compatible).
+
+
+* Setting and getting :attr:`.Socket.hwm` sets or gets *both* SNDHWM/RCVHWM for libzmq-3.
+* Implementation splits core Cython bindings from pure-Python subclasses
+ with sugar methods (send/recv_multipart). This should facilitate
+ non-Cython backends and PyPy support [spoiler: it did!].
+
+
+Bugs Fixed
+----------
+
+* Unicode fixes in log and monitored queue
+* MinGW, ppc, cross-compilation, and HP-UX build fixes
+* :mod:`zmq.green` should be complete - devices and tornado eventloop both work
+ in gevent contexts.
+
+
+2.2.0.1
+=======
+
+This is a tech-preview release, to try out some new features.
+It is expected to be short-lived, as there are likely to be issues to iron out,
+particularly with the new pip-install support.
+
+Experimental New Stuff
+----------------------
+
+These features are marked 'experimental', which means that their APIs are not set in stone,
+and may be removed or changed in incompatible ways in later releases.
+
+
+Threadsafe ZMQStream
+********************
+
+With the IOLoop inherited from tornado, there is exactly one method that is threadsafe:
+:meth:`.IOLoop.add_callback`. With this release, we are trying an experimental option
+to pass all IOLoop calls via this method, so that ZMQStreams can be used from one thread
+while the IOLoop runs in another. To try out a threadsafe stream:
+
+.. sourcecode:: python
+
+ stream = ZMQStream(socket, threadsafe=True)
+
+
+pip install pyzmq
+*****************
+
+PyZMQ should now be pip installable, even on systems without libzmq.
+In these cases, when pyzmq fails to find an appropriate libzmq to link against,
+it will try to build libzmq as a Python extension.
+This work is derived from `pyzmq_static <https://github.com/brandon-rhodes/pyzmq-static>`_.
+
+To this end, PyZMQ source distributions include the sources for libzmq (2.2.0) and libuuid (2.21),
+both used under the LGPL.
+
+
+zmq.green
+*********
+
+The excellent `gevent_zeromq <https://github.com/traviscline/gevent_zeromq>`_ socket
+subclass which provides `gevent <http://www.gevent.org/>`_ compatibility has been merged as
+:mod:`zmq.green`.
+
+.. seealso::
+
+ :ref:`zmq_green`
+
+
+Bugs Fixed
+----------
+
+* TIMEO sockopts are properly included for libzmq-2.2.0
+* avoid garbage collection of sockets after fork (would cause ``assert (mailbox.cpp:79)``).
+
+
+2.2.0
+=====
+
+Some effort has gone into refining the pyzmq API in this release to make it a model for
+other language bindings. This is principally made in a few renames of objects and methods,
+all of which leave the old name for backwards compatibility.
+
+.. note::
+
+ As of this release, all code outside ``zmq.core`` is BSD licensed (where
+ possible), to allow more permissive use of less-critical code and utilities.
+
+Name Changes
+------------
+
+* The :class:`~.Message` class has been renamed to :class:`~.Frame`, to better match other
+ zmq bindings. The old Message name remains for backwards-compatibility. Wherever pyzmq
+ docs say "Message", they should refer to a complete zmq atom of communication (one or
+ more Frames, connected by ZMQ_SNDMORE). Please report any remaining instances of
+ Message==MessagePart with an Issue (or better yet a Pull Request).
+
+* All ``foo_unicode`` methods are now called ``foo_string`` (``_unicode`` remains for
+ backwards compatibility). This is not only for cross-language consistency, but it makes
+ more sense in Python 3, where native strings are unicode, and the ``_unicode`` suffix
+ was wedded too much to Python 2.
+
+Other Changes and Removals
+--------------------------
+
+* ``prefix`` removed as an unused keyword argument from :meth:`~.Socket.send_multipart`.
+
+* ZMQStream :meth:`~.ZMQStream.send` default has been changed to `copy=True`, so it matches
+ Socket :meth:`~.Socket.send`.
+
+* ZMQStream :meth:`~.ZMQStream.on_err` is deprecated, because it never did anything.
+
+* Python 2.5 compatibility has been dropped, and some code has been cleaned up to reflect
+ no-longer-needed hacks.
+
+* Some Cython files in :mod:`zmq.core` have been split, to reduce the amount of
+ Cython-compiled code. Much of the body of these files were pure Python, and thus did
+ not benefit from the increased compile time. This change also aims to ease maintaining
+ feature parity in other projects, such as
+ `pyzmq-ctypes <https://github.com/svpcom/pyzmq-ctypes>`_.
+
+
+New Stuff
+---------
+
+* :class:`~.Context` objects can now set default options when they create a socket. These
+ are set and accessed as attributes to the context. Socket options that do not apply to a
+ socket (e.g. SUBSCRIBE on non-SUB sockets) will simply be ignored.
+
+* :meth:`~.ZMQStream.on_recv_stream` has been added, which adds the stream itself as a
+ second argument to the callback, making it easier to use a single callback on multiple
+ streams.
+
+* A :attr:`~Frame.more` boolean attribute has been added to the :class:`~.Frame` (née
+ Message) class, so that frames can be identified as terminal without extra queires of
+ :attr:`~.Socket.rcvmore`.
+
+
+Experimental New Stuff
+----------------------
+
+These features are marked 'experimental', which means that their APIs are not
+set in stone, and may be removed or changed in incompatible ways in later releases.
+
+* :mod:`zmq.web` added for load-balancing requests in a tornado webapp with zeromq.
+
+
+2.1.11
+======
+
+* remove support for LABEL prefixes. A major feature of libzmq-3.0, the LABEL
+ prefix, has been removed from libzmq, prior to the first stable libzmq 3.x release.
+
+ * The prefix argument to :meth:`~.Socket.send_multipart` remains, but it continue to behave in
+ exactly the same way as it always has on 2.1.x, simply prepending message parts.
+
+ * :meth:`~.Socket.recv_multipart` will always return a list, because prefixes are once
+ again indistinguishable from regular message parts.
+
+* add :meth:`.Socket.poll` method, for simple polling of events on a single socket.
+
+* no longer require monkeypatching tornado IOLoop. The :class:`.ioloop.ZMQPoller` class
+ is a poller implementation that matches tornado's expectations, and pyzmq sockets can
+ be used with any tornado application just by specifying the use of this poller. The
+ pyzmq IOLoop implementation now only trivially differs from tornado's.
+
+ It is still recommended to use :func:`.ioloop.install`, which sets *both* the zmq and
+ tornado global IOLoop instances to the same object, but it is no longer necessary.
+
+ .. warning::
+
+ The most important part of this change is that the ``IOLoop.READ/WRITE/ERROR``
+ constants now match tornado's, rather than being mapped directly to the zmq
+ ``POLLIN/OUT/ERR``. So applications that used the low-level :meth:`IOLoop.add_handler`
+ code with ``POLLIN/OUT/ERR`` directly (used to work, but was incorrect), rather than
+ using the IOLoop class constants will no longer work. Fixing these to use the IOLoop
+ constants should be insensitive to the actual value of the constants.
+
+2.1.10
+======
+
+* Add support for libzmq-3.0 LABEL prefixes:
+
+ .. warning::
+
+ This feature has been removed from libzmq, and thus removed from future pyzmq
+ as well.
+
+ * send a message with label-prefix with:
+
+ .. sourcecode:: python
+
+ send_multipart([b'msg', b'parts'], prefix=[b'label', b'prefix'])
+
+ * :meth:`recv_multipart` returns a tuple of ``(prefix,msg)`` if a label prefix is detected
+ * ZMQStreams and devices also respect the LABEL prefix
+
+* add czmq-style close&term as :meth:`ctx.destroy`, so that :meth:`ctx.term`
+ remains threadsafe and 1:1 with libzmq.
+* :meth:`Socket.close` takes optional linger option, for setting linger prior
+ to closing.
+* add :func:`~zmq.core.version.zmq_version_info` and
+ :func:`~zmq.core.version.pyzmq_version_info` for getting libzmq and pyzmq versions as
+ tuples of numbers. This helps with the fact that version string comparison breaks down
+ once versions get into double-digits.
+* ioloop changes merged from upstream `Tornado <http://www.tornadoweb.org>`_ 2.1
+
+2.1.9
+=====
+
+* added zmq.ssh tools for tunneling socket connections, copied from IPython
+* Expanded sockopt support to cover changes in libzmq-4.0 dev.
+* Fixed an issue that prevented :exc:`KeyboardInterrupts` from being catchable.
+* Added attribute-access for set/getsockopt. Setting/Getting attributes of :class:`Sockets`
+ with the names of socket options is mapped to calls of set/getsockopt.
+
+.. sourcecode:: python
+
+ s.hwm = 10
+ s.identity = b'whoda'
+ s.linger
+ # -1
+
+* Terminating a :class:`~Context` closes the sockets it created, matching the behavior in
+ `czmq <http://czmq.zeromq.org/>`_.
+* :class:`ThreadDevices` use :meth:`Context.instance` to create sockets, so they can use
+ inproc connections to sockets in other threads.
+* fixed units error on :func:`zmq.select`, where the poll timeout was 1000 times longer
+ than expected.
+* Add missing ``DEALER/ROUTER`` socket type names (currently aliases, to be replacements for ``XREP/XREQ``).
+* base libzmq dependency raised to 2.1.4 (first stable release) from 2.1.0.
+
+
+2.1.7.1
+=======
+
+* bdist for 64b Windows only. This fixed a type mismatch on the ``ZMQ_FD`` sockopt
+ that only affected that platform.
+
+
+2.1.7
+=====
+
+* Added experimental support for libzmq-3.0 API
+* Add :func:`zmq.eventloop.ioloop.install` for using pyzmq's IOLoop in a tornado
+ application.
+
+
+2.1.4
+=====
+
+* First version with binary distribution support
+* Added :meth:`~Context.instance()` method for using a single Context throughout an application
+ without passing references around.
+
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..0761991
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+#
+# PyZMQ documentation build configuration file, created by
+# sphinx-quickstart on Sat Feb 20 23:31:19 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+import string
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0,os.path.abspath('../sphinxext'))
+
+# patch autodoc to work with Cython Sources
+import sphinx_cython
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.intersphinx',
+ 'ipython_console_highlighting',
+ 'numpydoc',
+ ]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'PyZMQ'
+copyright = u"""2013, Brian E. Granger & Min Ragan-Kelley.
+ØMQ logo © iMatix Corportation, used under the Creative Commons Attribution-Share Alike 3.0 License.
+Python logo â„¢ of the Python Software Foundation, used by Min RK with permission from the Foundation"""
+
+intersphinx_mapping = {'python': ('http://docs.python.org/', None)}
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+
+import zmq
+
+# The short X.Y version.
+version = zmq.__version__.split('-')[0]
+# The full version, including alpha/beta/rc tags.
+release = zmq.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = 'zeromq.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'PyZMQdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'PyZMQ.tex', u'PyZMQ Documentation',
+ u'Brian E. Granger \\and Min Ragan-Kelley', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
diff --git a/docs/source/devices.rst b/docs/source/devices.rst
new file mode 100644
index 0000000..4ef70ce
--- /dev/null
+++ b/docs/source/devices.rst
@@ -0,0 +1,82 @@
+.. PyZMQ devices doc, by Min Ragan-Kelley, 2011
+
+.. _devices:
+
+Devices in PyZMQ
+================
+
+.. seealso::
+
+ ØMQ Guide `Device coverage <http://zguide.zeromq.org/chapter:all#toc32>`_.
+
+ØMQ has a notion of Devices - simple programs that manage a send-recv pattern for
+connecting two or more sockets. Being full programs, devices include a ``while(True)``
+loop and thus block execution permanently once invoked. We have provided in the
+:mod:`devices` subpackage some facilities for running these devices in the background, as
+well as a custom three-socket MonitoredQueue_ device.
+
+
+BackgroundDevices
+-----------------
+
+It seems fairly rare that in a Python program one would actually want to create a zmq
+device via :func:`.device` in the main thread, since such a call would block execution
+forever. The most likely model for launching devices is in background threads or
+processes. We have provided classes for launching devices in a background thread with
+:class:`.ThreadDevice` and via multiprocessing with :class:`.ProcessDevice`. For
+threadsafety and running across processes, these methods do not take Socket objects as
+arguments, but rather socket types, and then the socket creation and configuration happens
+via the BackgroundDevice's :meth:`foo_in` proxy methods. For each configuration method
+(bind/connect/setsockopt), there are proxy methods for calling those methods on the Socket
+objects created in the background thread or process, prefixed with 'in\_' or 'out\_',
+corresponding to the `in_socket` and `out_socket`::
+
+ from zmq.devices import ProcessDevice
+
+ pd = ProcessDevice(zmq.QUEUE, zmq.ROUTER, zmq.DEALER)
+ pd.bind_in('tcp://*:12345')
+ pd.connect_out('tcp://127.0.0.1:12543')
+ pd.setsockopt_in(zmq.IDENTITY, 'ROUTER')
+ pd.setsockopt_out(zmq.IDENTITY, 'DEALER')
+ pd.start()
+ # it will now be running in a background process
+
+MonitoredQueue
+--------------
+
+One of ØMQ's builtin devices is the ``QUEUE``. This is a symmetric two-socket device that
+fully supports passing messages in either direction via any pattern. We saw a logical
+extension of the ``QUEUE`` as one that behaves in the same way with respect to the in/out
+sockets, but also sends every message in either direction *also* on a third `monitor`
+socket. For performance reasons, this :func:`.monitored_queue` function is written in
+Cython, so the loop does not involve Python, and should have the same performance as the
+basic ``QUEUE`` device.
+
+One shortcoming of the ``QUEUE`` device is that it does not support having ``ROUTER``
+sockets as both input and output. This is because ``ROUTER`` sockets, when they receive a
+message, prepend the ``IDENTITY`` of the socket that sent the message (for use in routing
+the reply). The result is that the output socket will always try to route the incoming
+message back to the original sender, which is presumably not the intended pattern. In
+order for the queue to support a ROUTER-ROUTER connection, it must swap the first two parts
+of the message in order to get the right message out the other side.
+
+To invoke a monitored queue is similar to invoking a regular ØMQ device::
+
+ from zmq.devices import monitored_queue
+ ins = ctx.socket(zmq.ROUTER)
+ outs = ctx.socket(zmq.DEALER)
+ mons = ctx.socket(zmq.PUB)
+ configure_sockets(ins,outs,mons)
+ monitored_queue(ins, outs, mons, in_prefix='in', out_prefix='out')
+
+The `in_prefix` and `out_prefix` default to 'in' and 'out' respectively, and a PUB socket
+is most logical for the monitor socket, since it will never receive messages, and the
+in/out prefix is well suited to the PUB/SUB topic subscription model. All messages sent on
+`mons` will be multipart, the first part being the prefix corresponding to the socket that
+received the message.
+
+Or for launching an MQ in the background, there are :class:`.ThreadMonitoredQueue` and
+:class:`.ProcessMonitoredQueue`, which function just like the base
+BackgroundDevice objects, but add :meth:`foo_mon` methods for configuring the monitor socket.
+
+
diff --git a/docs/source/eventloop.rst b/docs/source/eventloop.rst
new file mode 100644
index 0000000..3126b8b
--- /dev/null
+++ b/docs/source/eventloop.rst
@@ -0,0 +1,197 @@
+.. PyZMQ eventloop doc, by Min Ragan-Kelley, 2011
+
+.. _eventloop:
+
+====================
+Eventloops and PyZMQ
+====================
+
+Tornado IOLoop
+==============
+
+Facebook's `Tornado`_ includes an eventloop for handing poll events on filedescriptors and
+native sockets. We have included a small part of Tornado (specifically its
+:mod:`.ioloop`), and adapted its :class:`IOStream` class into :class:`.ZMQStream` for
+handling poll events on ØMQ sockets. A ZMQStream object works much like a Socket object,
+but instead of calling :meth:`~.Socket.recv` directly, you register a callback with
+:meth:`~.ZMQStream.on_recv`. Callbacks can also be registered for send events
+with :meth:`~.ZMQStream.on_send`.
+
+
+:func:`install()`
+-----------------
+
+With PyZMQ's ioloop, you can use zmq sockets in any tornado application. You must first
+install PyZMQ's :class:`.IOLoop`, with the :func:`.ioloop.install` function:
+
+.. sourcecode:: python
+
+ from zmq.eventloop import ioloop
+ ioloop.install()
+
+This sets the global instance of :class:`tornado.ioloop.IOLoop` with the global instance of
+our IOLoop class. The reason this must happen is that tornado objects avoid having to pass
+the active IOLoop instance around by having a staticmethod :meth:`.IOLoop.instance`, which
+always returns the active instance. If PyZMQ's IOLoop is installed after the first call to
+:meth:`.IOLoop.instance()` (called in almost every tornado object constructor), then it will
+raise an :exc:`AssertionError`, because the global IOLoop instance has already been
+created, and proceeding would result in not all objects being associated with the right
+IOLoop.
+
+It is possible to use PyZMQ sockets with tornado *without* calling :func:`.ioloop.install`,
+but it is less convenient. First, you must instruct the tornado IOLoop to use the zmq poller:
+
+.. sourcecode:: python
+
+ from zmq.eventloop.ioloop import ZMQIOLoop
+
+ loop = ZMQIOLoop()
+
+Then, when you instantiate tornado and ZMQStream objects, you must pass the `io_loop`
+argument to ensure that they use this loop, instead of the global instance.
+
+This is especially useful for writing tests, such as this:
+
+.. sourcecode:: python
+
+ from tornado.testing import AsyncTestCase
+ from zmq.eventloop.ioloop import ZMQIOLoop
+ from zmq.eventloop.zmqstream import ZMQStream
+
+ class TestZMQBridge(AsyncTestCase):
+
+ # Use a ZMQ-compatible I/O loop so that we can use `ZMQStream`.
+ def get_new_ioloop(self):
+ return ZMQIOLoop()
+
+You can also manually install this IOLoop as the global tornado instance, with:
+
+.. sourcecode:: python
+
+ from zmq.eventloop.ioloop import ZMQIOLoop
+ loop = ZMQIOLoop()
+ loop.install()
+
+but it will **NOT** be the global *pyzmq* IOLoop instance, so it must still be
+passed to your ZMQStream constructors.
+
+
+:meth:`send`
+------------
+
+ZMQStream objects do have :meth:`~.ZMQStream.send` and :meth:`~.ZMQStream.send_multipart`
+methods, which behaves the same way as :meth:`.Socket.send`, but instead of sending right
+away, the :class:`.IOLoop` will wait until socket is able to send (for instance if ``HWM``
+is met, or a ``REQ/REP`` pattern prohibits sending at a certain point). Messages sent via
+send will also be passed to the callback registered with :meth:`~.ZMQStream.on_send` after
+sending.
+
+:meth:`on_recv`
+---------------
+
+:meth:`.ZMQStream.on_recv` is the primary method for using a ZMQStream. It registers a
+callback to fire with messages as they are received, which will *always* be multipart,
+even if its length is 1. You can easily use this to build things like an echo socket:
+
+.. sourcecode:: python
+
+ s = ctx.socket(zmq.REP)
+ s.bind('tcp://localhost:12345')
+ stream = ZMQStream(s)
+ def echo(msg):
+ stream.send_multipart(msg)
+ stream.on_recv(echo)
+ ioloop.IOLoop.instance().start()
+
+on_recv can also take a `copy` flag, just like :meth:`.Socket.recv`. If `copy=False`, then
+callbacks registered with on_recv will receive tracked :class:`.Frame` objects instead of
+bytes.
+
+.. note::
+
+ A callback must be registered using either :meth:`.ZMQStream.on_recv` or
+ :meth:`.ZMQStream.on_recv_stream` before any data will be received on the
+ underlying socket. This allows you to temporarily pause processing on a
+ socket by setting both callbacks to None. Processing can later be resumed
+ by restoring either callback.
+
+
+:meth:`on_recv_stream`
+----------------------
+
+:meth:`.ZMQStream.on_recv_stream` is just like on_recv above, but the callback will be
+passed both the message and the stream, rather than just the message. This is meant to make
+it easier to use a single callback with multiple streams.
+
+.. sourcecode:: python
+
+ s1 = ctx.socket(zmq.REP)
+ s1.bind('tcp://localhost:12345')
+ stream1 = ZMQStream(s1)
+
+ s2 = ctx.socket(zmq.REP)
+ s2.bind('tcp://localhost:54321')
+ stream2 = ZMQStream(s2)
+
+ def echo(stream, msg):
+ stream.send_multipart(msg)
+
+ stream1.on_recv_stream(echo)
+ stream2.on_recv_stream(echo)
+
+ ioloop.IOLoop.instance().start()
+
+
+:meth:`flush`
+-------------
+
+Sometimes with an eventloop, there can be multiple events ready on a single iteration of
+the loop. The :meth:`~.ZMQStream.flush` method allows developers to pull messages off of
+the queue to enforce some priority over the event loop ordering. flush pulls any pending
+events off of the queue. You can specify to flush only recv events, only send events, or
+any events, and you can specify a limit for how many events to flush in order to prevent
+starvation.
+
+.. _Tornado: https://github.com/facebook/tornado
+
+.. _zmq_green:
+
+PyZMQ and gevent
+================
+
+PyZMQ ≥ 2.2.0.1 ships with a `gevent <http://www.gevent.org/>`_ compatible API as :mod:`zmq.green`.
+To use it, simply:
+
+.. sourcecode:: python
+
+ import zmq.green as zmq
+
+Then write your code as normal.
+
+Socket.send/recv and zmq.Poller are gevent-aware.
+
+In PyZMQ ≥ 2.2.0.2, green.device and green.eventloop should be gevent-friendly as well.
+
+.. note::
+
+ The green device does *not* release the GIL, unlike the true device in zmq.core.
+
+zmq.green.eventloop includes minimally patched IOLoop/ZMQStream in order to use the gevent-enabled Poller,
+so you should be able to use the ZMQStream interface in gevent apps as well,
+though using two eventloops simultaneously (tornado + gevent) is not recommended.
+
+.. warning::
+
+ There is a `known issue <https://github.com/zeromq/pyzmq/issues/229>`_ in gevent ≤ 1.0 or libevent,
+ which can cause zeromq socket events to be missed.
+ PyZMQ works around this by adding a timeout so it will not wait forever for gevent to notice events.
+ The only known solution for this is to use gevent ≥ 1.0, which is currently at 1.0b3,
+ and does not exhibit this behavior.
+
+.. seealso::
+
+ zmq.green examples `on GitHub <https://github.com/zeromq/pyzmq/tree/master/examples/gevent>`_.
+
+:mod:`zmq.green` is simply `gevent_zeromq <https://github.com/traviscline/gevent_zeromq>`_,
+merged into the pyzmq project.
+
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000..f32c90d
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,95 @@
+.. PyZMQ documentation master file, created by
+ sphinx-quickstart on Sat Feb 20 23:31:19 2010.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+PyZMQ Documentation
+===================
+
+:Release: |release|
+:Date: |today|
+
+
+PyZMQ is the Python bindings for ØMQ_. This
+documentation currently contains notes on some important aspects of developing PyZMQ and
+an overview of what the ØMQ API looks like in Python. For information on how to use
+ØMQ in general, see the many examples in the excellent `ØMQ Guide`_, all of which
+have a version in Python.
+
+PyZMQ works with Python 3 (≥ 3.2), and Python 2 (≥ 2.6), with no transformations or 2to3,
+as well as PyPy (at least 2.0 beta), thanks to a new CFFI backend.
+
+Please don't hesitate to report pyzmq-specific issues to our tracker_ on GitHub.
+General questions about ØMQ are better sent to the ØMQ `mailing list`_ or `IRC Channel`_.
+
+:ref:`Summary of Changes in PyZMQ <changelog>`
+
+
+Supported LibZMQ
+================
+
+PyZMQ aims to support all stable ( ≥2.1.4, ≥ 3.2.2, ≥ 4.0.1 ) and active development ( ≥ 4.1.0 )
+versions of libzmq. Building the same pyzmq against various versions of libzmq is supported,
+but only the functionality of the linked libzmq will be available.
+
+.. note::
+
+ libzmq 3.0-3.1 are not, and will never be supported.
+ There never was a stable release of either.
+
+
+Binary distributions (eggs, MSIs, and wheels on `PyPI <http://pypi.python.org/pypi/pyzmq>`__
+or `GitHub <https://www.github.com/zeromq/pyzmq/downloads>`__) of PyZMQ ship with
+the stable version of libzmq at the time of release, built with default configuration.
+For pyzmq-|release|, this is 4.0.3.
+
+Using PyZMQ
+===========
+
+.. toctree::
+ :maxdepth: 2
+
+ api/index.rst
+ morethanbindings.rst
+ serialization.rst
+ devices.rst
+ eventloop.rst
+ logging.rst
+ ssh.rst
+
+
+Notes from developing PyZMQ
+===========================
+
+.. toctree::
+ :maxdepth: 2
+
+ pyversions.rst
+ unicode.rst
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+Links
+=====
+
+* ØMQ_ Home
+* The `ØMQ Guide`_
+* `PyZMQ Installation`_ notes on the ZeroMQ website
+* PyZMQ on GitHub_
+* Issue Tracker_
+
+.. _ØMQ: http://www.zeromq.org
+.. _ØMQ Guide: http://zguide.zeromq.org
+.. _mailing list: http://www.zeromq.org/docs:mailing-lists
+.. _IRC Channel: http://www.zeromq.org/chatroom
+.. _Cython: http://cython.org/
+.. _GitHub: https://www.github.com/zeromq/pyzmq
+.. _ØMQ Manual: http://www.zeromq.org/intro:read-the-manual
+.. _PyZMQ Installation: http://www.zeromq.org/bindings:python
+.. _tracker: https://www.github.com/zeromq/pyzmq/issues
+
diff --git a/docs/source/logging.rst b/docs/source/logging.rst
new file mode 100644
index 0000000..ea16da3
--- /dev/null
+++ b/docs/source/logging.rst
@@ -0,0 +1,84 @@
+.. PyZMQ logging doc, by Min Ragan-Kelley, 2011
+
+.. _logging:
+
+Asynchronous Logging via PyZMQ
+==============================
+
+.. seealso::
+
+ * The ØMQ guide `coverage <http://zguide.zeromq.org/chapter:all#toc7>`_ of PUB/SUB
+ messaging
+ * Python logging module `documentation <http://docs.python.org/library/logging.html>`_
+
+Python provides extensible logging facilities through its :py:mod:`logging` module. This
+module allows for easily extensible logging functionality through the use of
+:py:class:`~logging.Handler` objects. The most obvious case for hooking up pyzmq to
+logging would be to broadcast log messages over a PUB socket, so we have provides a
+:class:`.PUBHandler` class for doing just that.
+
+PUB/SUB and Topics
+------------------
+
+The ØMQ PUB/SUB pattern consists of a PUB socket broadcasting messages, and a collection
+of SUB sockets that receive those messages. Each PUB message is a multipart-message, where
+the first part is interpreted as a topic. SUB sockets can subscribe to topics by setting
+their ``SUBSCRIBE`` sockopt, e.g.::
+
+ sub = ctx.socket(zmq.SUB)
+ sub.setsockopt(zmq.SUBSCRIBE, 'topic1')
+ sub.setsockopt(zmq.SUBSCRIBE, 'topic2')
+
+When subscribed, the SUB socket will only receive messages where the first part *starts
+with* one of the topics set via ``SUBSCRIBE``. The default behavior is to exclude all
+messages, and subscribing to the empty string '' will receive all messages.
+
+PUBHandler
+----------
+
+The :class:`.PUBHandler` object is created for allowing the python logging to be emitted
+on a PUB socket. The main difference between a PUBHandler and a regular logging Handler is
+the inclusion of topics. For the most basic logging, you can simply create a PUBHandler
+with an interface or a configured PUB socket, and just let it go::
+
+ pub = context.socket(zmq.PUB)
+ pub.bind('tcp://*:12345')
+ handler = PUBHandler(pub)
+ logger = logging.getLogger()
+ logger.addHandler(handler)
+
+At this point, all messages logged with the default logger will be broadcast on the pub
+socket.
+
+the PUBHandler does work with topics, and the handler has an attribute ``root_topic``::
+
+ handler.root_topic = 'myprogram'
+
+Python loggers also have loglevels. The base topic of messages emitted by the PUBHandler
+will be of the form: ``<handler.root_topic>.<loglevel>``, e.g. 'myprogram.INFO' or
+'whatever.ERROR'. This way, subscribers can easily subscribe to subsets of the logging
+messages. Log messages are always two-part, where the first part is the topic tree, and
+the second part is the actual log message.
+
+ >>> logger.info('hello there')
+ >>> print sub.recv_multipart()
+ ['myprogram.INFO', 'hello there']
+
+Subtopics
+*********
+
+You can also add to the topic tree below the loglevel on an individual message basis.
+Assuming your logger is connected to a PUBHandler, you can add as many additional topics
+on the front of the message, which will be added always after the loglevel. A special
+delimiter defined at ``zmq.log.handlers.TOPIC_DELIM`` is scanned by the PUBHandler, so if
+you pass your own subtopics prior to that symbol, they will be stripped from the message
+and added to the topic tree::
+
+ >>> log_msg = "hello there"
+ >>> subtopic = "sub.topic"
+ >>> msg = zmq.log.handlers.TOPIC_DELIM.join([subtopic, log_msg])
+ >>> logger.warn(msg)
+ >>> print sub.recv_multipart()
+ ['myprogram.WARN.sub.topic', 'hello there']
+
+
diff --git a/docs/source/morethanbindings.rst b/docs/source/morethanbindings.rst
new file mode 100644
index 0000000..1bff8f0
--- /dev/null
+++ b/docs/source/morethanbindings.rst
@@ -0,0 +1,166 @@
+.. PyZMQ Bindings doc, by Min Ragan-Kelley, 2011
+
+.. _bindings:
+
+More Than Just Bindings
+=======================
+
+PyZMQ is ostensibly the Python bindings for `ØMQ`_, but the project, following
+Python's 'batteries included' philosophy, provides more than just Python methods and
+objects for calling into the ØMQ C++ library.
+
+
+
+The Core as Bindings
+--------------------
+
+PyZMQ is currently broken up into four subpackages. First, is the Core. :mod:`zmq.core`
+contains the actual bindings for ZeroMQ, and no extended functionality beyond the very
+basic. The core modules are split, such that each basic ZeroMQ object (or function, if no
+object is associated) is a separate module, e.g. :mod:`zmq.core.context` contains the
+:class:`.Context` object, :mod:`zmq.core.poll` contains a :class:`.Poller` object, as well
+as the :func:`.select` function, etc. ZMQ constants are, for convenience, all kept
+together in :mod:`zmq.core.constants`.
+
+There are two reasons for breaking the core into submodules: *recompilation* and
+*derivative projects*. The monolithic PyZMQ became quite tedious to have to recompile
+everything for a small change to a single object. With separate files, that's no longer
+necessary. The second reason has to do with Cython. PyZMQ is written in Cython, a tool for
+efficiently writing C-extensions for Python. By separating out our objects into individual
+`pyx` files, each with their declarations in a `pxd` header, other projects can write
+extensions in Cython and call directly to ZeroMQ at the C-level without the penalty of
+going through our Python objects.
+
+Thread Safety
+-------------
+
+In ØMQ, Contexts are threadsafe objects, but Sockets are **not**. It is safe to use a
+single Context (e.g. via :meth:`zmq.Context.instance`) in your entire multithreaded
+application, but you should create sockets on a per-thread basis. If you share sockets
+across threads, you are likely to encounter uncatchable c-level crashes of your
+application unless you use judicious application of :py:class:`threading.Lock`, but this
+approach is not recommended.
+
+.. seealso::
+
+ ZeroMQ API note on threadsafety on `2.2 <http://api.zeromq.org/2-2:zmq>`_
+ or `3.2 <http://api.zeromq.org/3-2:zmq>`_
+
+
+Socket Options as Attributes
+----------------------------
+
+.. versionadded:: 2.1.9
+
+In 0MQ, socket options are set/retrieved with the :meth:`set/getsockopt` methods. With the
+class-based approach in pyzmq, it would be logical to perform these operations with
+simple attribute access, and this has been added in pyzmq 2.1.9. Simply assign to or
+request a Socket attribute with the (case-insensitive) name of a sockopt, and it should
+behave just as you would expect:
+
+.. sourcecode:: python
+
+ s = ctx.socket(zmq.DEALER)
+ s.identity = b'dealer'
+ s.hwm = 10
+ s.events
+ # 0
+ s.fd
+ # 16
+
+
+Default Options on the Context
+******************************
+
+.. versionadded:: 2.1.11
+
+Just like setting socket options as attributes on Sockets, you can do the same on Contexts.
+This affects the default options of any *new* sockets created after the assignment.
+
+.. sourcecode:: python
+
+ ctx = zmq.Context()
+ ctx.linger = 0
+ rep = ctx.socket(zmq.REP)
+ req = ctx.socket(zmq.REQ)
+
+Socket options that do not apply to a socket (e.g. SUBSCRIBE on non-SUB sockets) will
+simply be ignored.
+
+
+Core Extensions
+---------------
+
+We have extended the core functionality in two ways that appear inside the :mod:`core`
+bindings, and are not general ØMQ features.
+
+Builtin Serialization
+*********************
+
+First, we added common serialization with the builtin :py:mod:`json` and :py:mod:`pickle`
+as first-class methods to the :class:`Socket` class. A socket has the methods
+:meth:`~.Socket.send_json` and :meth:`~.Socket.send_pyobj`, which correspond to sending an
+object over the wire after serializing with :mod:`json` and :mod:`pickle` respectively,
+and any object sent via those methods can be reconstructed with the
+:meth:`~.Socket.recv_json` and :meth:`~.Socket.recv_pyobj` methods. Unicode strings are
+other objects that are not unambiguously sendable over the wire, so we include
+:meth:`~.Socket.send_string` and :meth:`~.Socket.recv_string` that simply send bytes
+after encoding the message ('utf-8' is the default).
+
+.. seealso::
+
+ * :ref:`Further information <serialization>` on serialization in pyzmq.
+
+ * :ref:`Our Unicode discussion <unicode>` for more information on the trials and
+ tribulations of working with Unicode in a C extension while supporting Python 2 and 3.
+
+
+MessageTracker
+**************
+
+The second extension of basic ØMQ functionality is the :class:`MessageTracker`. The
+MessageTracker is an object used to track when the underlying ZeroMQ is done with a
+message buffer. One of the main use cases for ØMQ in Python is the ability to perform
+non-copying sends. Thanks to Python's buffer interface, many objects (including NumPy
+arrays) provide the buffer interface, and are thus directly sendable. However, as with any
+asynchronous non-copying messaging system like ØMQ or MPI, it can be important to know
+when the message has actually been sent, so it is safe again to edit the buffer without
+worry of corrupting the message. This is what the MessageTracker is for.
+
+The MessageTracker is a simple object, but there is a penalty to its use. Since by its
+very nature, the MessageTracker must involve threadsafe communication (specifically a
+builtin :py:class:`~Queue.Queue` object), instantiating a MessageTracker takes a modest
+amount of time (10s of µs), so in situations instantiating many small messages, this can
+actually dominate performance. As a result, tracking is optional, via the ``track`` flag,
+which is optionally passed, always defaulting to ``False``, in each of the three places
+where a Frame object (the pyzmq object for wrapping a segment of a message) is
+instantiated: The :class:`.Frame` constructor, and non-copying sends and receives.
+
+A MessageTracker is very simple, and has just one method and one attribute. The property
+:attr:`MessageTracker.done` will be ``True`` when the Frame(s) being tracked are no
+longer in use by ØMQ, and :meth:`.MessageTracker.wait` will block, waiting for the
+Frame(s) to be released.
+
+.. Note::
+
+ A Frame cannot be tracked after it has been instantiated without tracking. If a
+ Frame is to even have the *option* of tracking, it must be constructed with
+ ``track=True``.
+
+
+Extensions
+----------
+
+So far, PyZMQ includes four extensions to core ØMQ that we found basic enough to be
+included in PyZMQ itself:
+
+* :ref:`zmq.log <logging>` : Logging handlers for hooking Python logging up to the
+ network
+* :ref:`zmq.devices <devices>` : Custom devices and objects for running devices in the
+ background
+* :ref:`zmq.eventloop <eventloop>` : The `Tornado`_ event loop, adapted for use
+ with ØMQ sockets.
+* :ref:`zmq.ssh <ssh>` : Simple tools for tunneling zeromq connections via ssh.
+
+.. _ØMQ: http://www.zeromq.org
+.. _Tornado: https://github.com/facebook/tornado
diff --git a/docs/source/pyversions.rst b/docs/source/pyversions.rst
new file mode 100644
index 0000000..8a509dc
--- /dev/null
+++ b/docs/source/pyversions.rst
@@ -0,0 +1,195 @@
+.. PyZMQ Version compatibility doc, by Min Ragan-Kelley, 2010
+
+.. _pyversions:
+
+PyZMQ, Python2.5, and Python3
+=============================
+
+PyZMQ is a fairly light, low-level library, so supporting as many versions
+as is reasonable is our goal. Currently, we support at least Python 2.5-3.1.
+Making the changes to the codebase required a few tricks, which are documented here
+for future reference, either by us or by other developers looking to support several
+versions of Python.
+
+.. Note::
+
+ It is far simpler to support 2.6-3.x than to include 2.5. Many of the significant
+ syntax changes have been backported to 2.6, so just writing new-style code would work
+ in many cases. I will try to note these points as they come up.
+
+
+pyversion_compat.h
+------------------
+
+Many functions we use, primarily involved in converting between C-buffers and Python
+objects, are not available on all supported versions of Python. In order to resolve
+missing symbols, we added a header :file:`utils/pyversion_compat.h` that defines missing
+symbols with macros. Some of these macros alias new names to old functions (e.g.
+``PyBytes_AsString``), so that we can call new-style functions on older versions, and some
+simply define the function as an empty exception raiser. The important thing is that the
+symbols are defined to prevent compiler warnings and linking errors. Everywhere we use
+C-API functions that may not be available in a supported version, at the top of the file
+is the code:
+
+.. sourcecode:: guess
+
+ cdef extern from "pyversion_compat.h":
+ pass
+
+This ensures that the symbols are defined in the Cython generated C-code. Higher level
+switching logic exists in the code itself, to prevent actually calling unavailable
+functions, but the symbols must still be defined.
+
+Bytes and Strings
+-----------------
+
+.. Note::
+
+ If you are using Python >= 2.6, to prepare your PyZMQ code for Python3 you should use
+ the ``b'message'`` syntax to ensure all your string literal messages will still be
+ :class:`bytes` after you make the upgrade.
+
+The most cumbersome part of PyZMQ compatibility from a user's perspective is the fact
+that, since ØMQ uses C-strings, and would like to do so without copying, we must use the
+Py3k :class:`bytes` object, which is backported to 2.6. In order to do this in a
+Python-version independent way, we added a small utility that unambiguously defines the
+string types: :class:`bytes`, :class:`unicode`, :obj:`basestring`. This is important,
+because :class:`str` means different things on 2.x and 3.x, and :class:`bytes` is
+undefined on 2.5, and both :class:`unicode` and :obj:`basestring` are undefined on 3.x.
+All typechecking in PyZMQ is done against these types:
+
+================= ================= ====================
+Explicit Type 2.x 3.x
+================= ================= ====================
+:obj:`bytes` :obj:`str` :obj:`bytes`
+:obj:`unicode` :obj:`unicode` :obj:`str`
+:obj:`basestring` :obj:`basestring` :obj:`(str, bytes)`
+================= ================= ====================
+
+.. Note::
+
+ 2.5 specific
+
+ Where we really noticed the issue of :class:`bytes` vs :obj:`strings` coming up for
+ users was in updating the tests to run on every version. Since the ``b'bytes
+ literal'`` syntax was not backported to 2.5, we must call ``"message".encode()`` for
+ *every* string in the test suite.
+
+.. seealso:: :ref:`Unicode discussion <unicode>` for more information on strings/bytes.
+
+``PyBytes_*``
+*************
+
+The standard C-API function for turning a C-string into a Python string was a set of
+functions with the prefix ``PyString_*``. However, with the Unicode changes made in
+Python3, this was broken into ``PyBytes_*`` for bytes objects and ``PyUnicode_*`` for
+unicode objects. We changed all our ``PyString_*`` code to ``PyBytes_*``, which was
+backported to 2.6.
+
+
+.. Note::
+
+ 2.5 Specific:
+
+ Since Python 2.5 doesn't support the ``PyBytes_*`` functions, we had to alias them to
+ the ``PyString_*`` methods in utils/pyversion_compat.h.
+
+ .. sourcecode:: c++
+
+ #define PyBytes_FromStringAndSize PyString_FromStringAndSize
+ #define PyBytes_FromString PyString_FromString
+ #define PyBytes_AsString PyString_AsString
+ #define PyBytes_Size PyString_Size
+
+Buffers
+-------
+
+The layer that is most complicated for developers, but shouldn't trouble users, is the
+Python C-Buffer APIs. These are the methods for converting between Python objects and C
+buffers. The reason it is complicated is that it keeps changing.
+
+There are two buffer interfaces for converting an object to a C-buffer, known as new-style
+and old-style. Old-style buffers were introduced long ago, but the new-style is only
+backported to 2.6. The old-style buffer interface is not available in 3.x. There is also
+an old- and new-style interface for creating Python objects that view C-memory. The
+old-style object is called a :class:`buffer`, and the new-style object is
+:class:`memoryview`. Unlike the new-style buffer interface for objects,
+:class:`memoryview` has only been backported to *2.7*. This means that the available
+buffer-related functions are not the same in any two versions of Python 2.5, 2.6, 2.7, or
+3.1.
+
+We have a :file:`utils/buffers.pxd` file that defines our :func:`asbuffer` and
+:func:`frombuffer` functions. :file:`utils/buffers.pxd` was adapted from mpi4py_'s
+:file:`asbuffer.pxi`. The :func:`frombuffer` functionality was added. These functions
+internally switch based on Python version to call the appropriate C-API functions.
+
+.. seealso:: `Python Buffer API <bufferapi>`_
+
+.. _bufferapi: http://docs.python.org/c-api/buffer.html
+
+
+``__str__``
+-----------
+
+As discussed, :class:`str` is not a platform independent type. The two places where we are
+required to return native str objects are :func:`error.strerror`, and
+:func:`Message.__str__`. In both of these cases, the natural return is actually a
+:class:`bytes` object. In the methods, the native :class:`str` type is checked, and if the
+native str is actually unicode, then we decode the bytes into unicode:
+
+.. sourcecode:: py
+
+ # ...
+ b = natural_result()
+ if str is unicode:
+ return b.decode()
+ else:
+ return b
+
+Exceptions
+----------
+
+.. Note::
+
+ This section is only relevant for supporting Python 2.5 and 3.x, not for 2.6-3.x.
+
+The syntax for handling exceptions has `changed <PEP-3110>`_ in Python 3. The old syntax:
+
+.. sourcecode:: py
+
+ try:
+ s.send(msg)
+ except zmq.ZMQError, e:
+ handle(e)
+
+is no longer valid in Python 3. Instead, the new syntax for this is:
+
+.. sourcecode:: py
+
+ try:
+ s.send(msg)
+ except zmq.ZMQError as e:
+ handle(e)
+
+This new syntax is backported to Python 2.6, but is invalid on 2.5. For 2.6-3.x compatible
+code, we could just use the new syntax. However, the only method we found to catch an
+exception for handling on both 2.5 and 3.1 is to get the exception object inside the
+exception block:
+
+.. sourcecode:: py
+
+ try:
+ s.send(msg)
+ except zmq.ZMQError:
+ e = sys.exc_info()[1]
+ handle(e)
+
+This is certainly not as elegant as either the old or new syntax, but it's the only way we
+have found to work everywhere.
+
+.. seealso:: PEP-3110_
+
+.. _PEP-3110: http://www.python.org/dev/peps/pep-3110/
+
+
+.. _mpi4py: http://mpi4py.googlecode.com \ No newline at end of file
diff --git a/docs/source/serialization.rst b/docs/source/serialization.rst
new file mode 100644
index 0000000..8f9095a
--- /dev/null
+++ b/docs/source/serialization.rst
@@ -0,0 +1,90 @@
+.. PyZMQ serialization doc, by Min Ragan-Kelley, 2011
+
+.. _serialization:
+
+Serializing messages with PyZMQ
+===============================
+
+When sending messages over a network, you often need to marshall your data into bytes.
+
+
+Builtin serialization
+---------------------
+
+PyZMQ is primarily bindings for libzmq, but we do provide three builtin serialization
+methods for convenience, to help Python developers learn libzmq. Python has two primary
+packages for serializing objects: :py:mod:`json` and :py:mod:`pickle`, so we provide
+simple convenience methods for sending and receiving objects serialized with these
+modules. A socket has the methods :meth:`~.Socket.send_json` and
+:meth:`~.Socket.send_pyobj`, which correspond to sending an object over the wire after
+serializing with json and pickle respectively, and any object sent via those
+methods can be reconstructed with the :meth:`~.Socket.recv_json` and
+:meth:`~.Socket.recv_pyobj` methods.
+
+
+These methods designed for convenience, not for performance, so developers who do want
+to emphasize performance should use their own serialized send/recv methods.
+
+Using your own serialization
+----------------------------
+
+In general, you will want to provide your own serialization that is optimized for your
+application or library availability. This may include using your own preferred
+serialization ([msgpack]_, [protobuf]_), or adding compression via [zlib]_ in the standard
+library, or the super fast [blosc]_ library.
+
+There are two simple models for implementing your own serialization: write a function
+that takes the socket as an argument, or subclass Socket for use in your own apps.
+
+For instance, pickles can often be reduced substantially in size by compressing the data.
+The following will send *compressed* pickles over the wire:
+
+.. sourcecode:: python
+
+ import zlib, cPickle as pickle
+
+ def send_zipped_pickle(socket, obj, flags=0, protocol=-1):
+ """pickle an object, and zip the pickle before sending it"""
+ p = pickle.dumps(obj, protocol)
+ z = zlib.compress(p)
+ return socket.send(z, flags=flags)
+
+ def recv_zipped_pickle(socket, flags=0, protocol=-1):
+ """inverse of send_zipped_pickle"""
+ z = socket.recv(flags)
+ p = zlib.uncompress(z)
+ return pickle.loads(p)
+
+A common data structure in Python is the numpy array. PyZMQ supports sending
+numpy arrays without copying any data, since they provide the Python buffer interface.
+However just the buffer is not enough information to reconstruct the array on the
+receiving side. Here is an example of a send/recv that allow non-copying
+sends/recvs of numpy arrays including the dtype/shape data necessary for reconstructing
+the array.
+
+.. sourcecode:: python
+
+ import numpy
+
+ def send_array(socket, A, flags=0, copy=True, track=False):
+ """send a numpy array with metadata"""
+ md = dict(
+ dtype = str(A.dtype),
+ shape = A.shape,
+ )
+ socket.send_json(md, flags|zmq.SNDMORE)
+ return socket.send(A, flags, copy=copy, track=track)
+
+ def recv_array(socket, flags=0, copy=True, track=False):
+ """recv a numpy array"""
+ md = socket.recv_json(flags=flags)
+ msg = socket.recv(flags=flags, copy=copy, track=track)
+ buf = buffer(msg)
+ A = numpy.frombuffer(buf, dtype=md['dtype'])
+ return A.reshape(md['shape'])
+
+
+.. [msgpack] Message Pack serialization library http://msgpack.org
+.. [protobuf] Google Protocol Buffers http://code.google.com/p/protobuf
+.. [zlib] Python stdlib module for zip compression: :py:mod:`zlib`
+.. [blosc] Blosc: A blocking, shuffling and loss-less (and crazy-fast) compression library http://blosc.pytables.org/trac
diff --git a/docs/source/ssh.rst b/docs/source/ssh.rst
new file mode 100644
index 0000000..ca2a607
--- /dev/null
+++ b/docs/source/ssh.rst
@@ -0,0 +1,80 @@
+.. PyZMQ ssh doc, by Min Ragan-Kelley, 2011
+
+.. _ssh:
+
+Tunneling PyZMQ Connections with SSH
+====================================
+
+.. versionadded:: 2.1.9
+
+You may want to connect ØMQ sockets across machines, or untrusted networks. One common way
+to do this is to tunnel the connection via SSH. IPython_ introduced some tools for
+tunneling ØMQ connections over ssh in simple cases. These functions have been brought into
+pyzmq as :mod:`zmq.ssh` under IPython's BSD license.
+
+PyZMQ will use the shell ssh command via pexpect_ by default, but it also supports
+using paramiko_ for tunnels, so it should work on Windows.
+
+.. note::
+
+ pexpect has no Python3 support at this time, so Python 3 users should get Thomas
+ Kluyver's `pexpect-u`_ fork.
+
+An SSH tunnel has five basic components:
+
+* server : the SSH server through which the tunnel will be created
+* remote ip : the IP of the remote machine *as seen from the server*
+ (remote ip may be, but is not not generally the same machine as server).
+* remote port : the port on the remote machine that you want to connect to.
+* local ip : the interface on your local machine you want to use (default: 127.0.0.1)
+* local port : the local port you want to forward to the remote port (default: high random)
+
+So once you have established the tunnel, connections to ``localip:localport`` will actually
+be connections to ``remoteip:remoteport``.
+
+In most cases, you have a zeromq url for a remote machine, but you need to tunnel the
+connection through an ssh server. This is
+
+So if you would use this command from the same LAN as the remote machine:
+
+.. sourcecode:: python
+
+ sock.connect("tcp://10.0.1.2:5555")
+
+to make the same connection from another machine that is outside the network, but you have
+ssh access to a machine ``server`` on the same LAN, you would simply do:
+
+.. sourcecode:: python
+
+ from zmq import ssh
+ ssh.tunnel_connection(sock, "tcp://10.0.1.2:5555", "server")
+
+Note that ``"server"`` can actually be a fully specified ``"user@server:port"`` ssh url.
+Since this really just launches a shell command, all your ssh configuration of usernames,
+aliases, keys, etc. will be respected. If necessary, :func:`tunnel_connection` does take
+arguments for specific passwords, private keys (the ssh ``-i`` option), and non-default
+choice of whether to use paramiko.
+
+If you are on the same network as the machine, but it is only listening on localhost, you
+can still connect by making the machine itself the server, and using loopback as the
+remote ip:
+
+.. sourcecode:: python
+
+ from zmq import ssh
+ ssh.tunnel_connection(sock, "tcp://127.0.0.1:5555", "10.0.1.2")
+
+The :func:`tunnel_connection` function is a simple utility that forwards a random
+localhost port to the real destination, and connects a socket to the new local url,
+rather than the remote one that wouldn't actually work.
+
+.. seealso::
+
+ A short discussion of ssh tunnels: http://www.revsys.com/writings/quicktips/ssh-tunnel.html
+
+
+.. _IPython: http://ipython.org
+.. _pexpect: http://www.noah.org/wiki/pexpect
+.. _pexpect-u: http://pypi.python.org/pypi/pexpect-u
+.. _paramiko: http://www.lag.net/paramiko/
+
diff --git a/docs/source/unicode.rst b/docs/source/unicode.rst
new file mode 100644
index 0000000..a0c7878
--- /dev/null
+++ b/docs/source/unicode.rst
@@ -0,0 +1,188 @@
+.. PyZMQ Unicode doc, by Min Ragan-Kelley, 2010
+
+.. _unicode:
+
+PyZMQ and Unicode
+=================
+
+PyZMQ is built with an eye towards an easy transition to Python 3, and part of
+that is dealing with unicode strings. This is an overview of some of what we
+found, and what it means for PyZMQ.
+
+First, Unicode in Python 2 and 3
+********************************
+
+In Python < 3, a ``str`` object is really a C string with some sugar - a
+specific series of bytes with some fun methods like ``endswith()`` and
+``split()``. In 2.0, the ``unicode`` object was added, which handles different
+methods of encoding. In Python 3, however, the meaning of ``str`` changes. A
+``str`` in Python 3 is a full unicode object, with encoding and everything. If
+you want a C string with some sugar, there is a new object called ``bytes``,
+that behaves much like the 2.x ``str``. The idea is that for a user, a string is
+a series of *characters*, not a series of bytes. For simple ascii, the two are
+interchangeable, but if you consider accents and non-Latin characters, then the
+character meaning of byte sequences can be ambiguous, since it depends on the
+encoding scheme. They decided to avoid the ambiguity by forcing users who want
+the actual bytes to specify the encoding every time they want to convert a
+string to bytes. That way, users are aware of the difference between a series of
+bytes and a collection of characters, and don't confuse the two, as happens in
+Python 2.x.
+
+The problems (on both sides) come from the fact that regardless of the language
+design, users are mostly going to use ``str`` objects to represent collections
+of characters, and the behavior of that object is dramatically different in
+certain aspects between the 2.x ``bytes`` approach and the 3.x ``unicode``
+approach. The ``unicode`` approach has the advantage of removing byte ambiguity
+- it's a list of characters, not bytes. However, if you really do want the
+bytes, it's very inefficient to get them. The ``bytes`` approach has the
+advantage of efficiency. A ``bytes`` object really is just a char* pointer with
+some methods to be used on it, so when interacting with, so interacting with C
+code, etc is highly efficient and straightforward. However, understanding a
+bytes object as a string with extended characters introduces ambiguity and
+possibly confusion.
+
+To avoid ambiguity, hereafter we will refer to encoded C arrays as 'bytes' and
+abstract unicode objects as 'strings'.
+
+Unicode Buffers
+---------------
+
+Since unicode objects have a wide range of representations, they are not stored
+as the bytes according to their encoding, but rather in a format called UCS (an
+older fixed-width Unicode format). On some platforms (OS X, Windows), the storage
+is UCS-2, which is 2 bytes per character. On most \*ix systems, it is UCS-4, or
+4 bytes per character. The contents of the *buffer* of a ``unicode`` object are
+not encoding dependent (always UCS-2 or UCS-4), but they are *platform*
+dependent. As a result of this, and the further insistence on not interpreting
+``unicode`` objects as bytes without specifying encoding, ``str`` objects in
+Python 3 don't even provide the buffer interface. You simply cannot get the raw
+bytes of a ``unicode`` object without specifying the encoding for the bytes. In
+Python 2.x, you can get to the raw buffer, but the platform dependence and the
+fact that the encoding of the buffer is not the encoding of the object makes it
+very confusing, so this is probably a good move.
+
+The efficiency problem here comes from the fact that simple ascii strings are 4x
+as big in memory as they need to be (on most Linux, 2x on other platforms).
+Also, to translate to/from C code that works with char*, you always have to copy
+data and encode/decode the bytes. This really is horribly inefficient from a
+memory standpoint. Essentially, Where memory efficiency matters to you, you
+should never ever use strings; use bytes. The problem is that users will almost
+always use ``str``, and in 2.x they are efficient, but in 3.x they are not. We
+want to make sure that we don't help the user make this mistake, so we ensure
+that zmq methods don't try to hide what strings really are.
+
+What This Means for PyZMQ
+*************************
+
+PyZMQ is a wrapper for a C library, so it really should use bytes, since a
+string is not a simple wrapper for ``char *`` like it used to be, but an
+abstract sequence of characters. The representations of bytes in Python are
+either the ``bytes`` object itself, or any object that provides the buffer
+interface (aka memoryview). In Python 2.x, unicode objects do provide the buffer
+interface, but as they do not in Python 3, where pyzmq requires bytes, we
+specifically reject unicode objects.
+
+The relevant methods here are ``socket.send/recv``, ``socket.get/setsockopt``,
+``socket.bind/connect``. The important consideration for send/recv and
+set/getsockopt is that when you put in something, you really should get the same
+object back with its partner method. We can easily coerce unicode objects to
+bytes with send/setsockopt, but the problem is that the pair method of
+recv/getsockopt will always be bytes, and there should be symmetry. We certainly
+shouldn't try to always decode on the retrieval side, because if users just want
+bytes, then we are potentially using up enormous amounts of excess memory
+unnecessarily, due to copying and larger memory footprint of unicode strings.
+
+Still, we recognize the fact that users will quite frequently have unicode
+strings that they want to send, so we have added ``socket.<method>_string()``
+wrappers. These methods simply wrap their bytes counterpart by encoding
+to/decoding from bytes around them, and they all take an `encoding` keyword
+argument that defaults to utf-8. Since encoding and decoding are necessary to
+translate between unicode and bytes, it is impossible to perform non-copying
+actions with these wrappers.
+
+``socket.bind/connect`` methods are different from these, in that they are
+strictly setters and there is not corresponding getter method. As a result, we
+feel that we can safely coerce unicode objects to bytes (always to utf-8) in
+these methods.
+
+.. note::
+
+ For cross-language symmetry (including Python 3), the ``_unicode`` methods
+ are now ``_string``. Many languages have a notion of native strings, and
+ the use of ``_unicode`` was wedded too closely to the name of such objects
+ in Python 2. For the time being, anywhere you see ``_string``, ``_unicode``
+ also works, and is the only option in pyzmq ≤ 2.1.11.
+
+
+The Methods
+-----------
+
+Overview of the relevant methods:
+
+.. py:function:: socket.bind(self, addr)
+
+ `addr` is ``bytes`` or ``unicode``. If ``unicode``,
+ encoded to utf-8 ``bytes``
+
+.. py:function:: socket.connect(self, addr)
+
+ `addr` is ``bytes`` or ``unicode``. If ``unicode``,
+ encoded to utf-8 ``bytes``
+
+.. py:function:: socket.send(self, object obj, flags=0, copy=True)
+
+ `obj` is ``bytes`` or provides buffer interface.
+
+ if `obj` is ``unicode``, raise ``TypeError``
+
+.. py:function:: socket.recv(self, flags=0, copy=True)
+
+ returns ``bytes`` if `copy=True`
+
+ returns ``zmq.Message`` if `copy=False`:
+
+ `message.buffer` is a buffer view of the ``bytes``
+
+ `str(message)` provides the ``bytes``
+
+ `unicode(message)` decodes `message.buffer` with utf-8
+
+.. py:function:: socket.send_string(self, unicode s, flags=0, encoding='utf-8')
+
+ takes a ``unicode`` string `s`, and sends the ``bytes``
+ after encoding without an extra copy, via:
+
+ `socket.send(s.encode(encoding), flags, copy=False)`
+
+.. py:function:: socket.recv_string(self, flags=0, encoding='utf-8')
+
+ always returns ``unicode`` string
+
+ there will be a ``UnicodeError`` if it cannot decode the buffer
+
+ performs non-copying `recv`, and decodes the buffer with `encoding`
+
+.. py:function:: socket.setsockopt(self, opt, optval)
+
+ only accepts ``bytes`` for `optval` (or ``int``, depending on `opt`)
+
+ ``TypeError`` if ``unicode`` or anything else
+
+.. py:function:: socket.getsockopt(self, opt)
+
+ returns ``bytes`` (or ``int``), never ``unicode``
+
+.. py:function:: socket.setsockopt_string(self, opt, unicode optval, encoding='utf-8')
+
+ accepts ``unicode`` string for `optval`
+
+ encodes `optval` with `encoding` before passing the ``bytes`` to
+ `setsockopt`
+
+.. py:function:: socket.getsockopt_string(self, opt, encoding='utf-8')
+
+ always returns ``unicode`` string, after decoding with `encoding`
+
+ note that `zmq.IDENTITY` is the only `sockopt` with a string value
+ that can be queried with `getsockopt`
+
diff --git a/docs/sphinxext/apigen.py b/docs/sphinxext/apigen.py
new file mode 100644
index 0000000..b06e3d4
--- /dev/null
+++ b/docs/sphinxext/apigen.py
@@ -0,0 +1,441 @@
+"""Attempt to generate templates for module reference with Sphinx
+
+XXX - we exclude extension modules
+
+To include extension modules, first identify them as valid in the
+``_uri2path`` method, then handle them in the ``_parse_module`` script.
+
+We get functions and classes by parsing the text of .py files.
+Alternatively we could import the modules for discovery, and we'd have
+to do that for extension modules. This would involve changing the
+``_parse_module`` method to work via import and introspection, and
+might involve changing ``discover_modules`` (which determines which
+files are modules, and therefore which module URIs will be passed to
+``_parse_module``).
+
+NOTE: this is a modified version of a script originally shipped with the
+PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
+project."""
+
+# Stdlib imports
+import os
+import re
+
+# Functions and classes
+class ApiDocWriter(object):
+ ''' Class for automatic detection and parsing of API docs
+ to Sphinx-parsable reST format'''
+
+ # only separating first two levels
+ rst_section_levels = ['*', '=', '-', '~', '^']
+
+ def __init__(self,
+ package_name,
+ rst_extension='.rst',
+ package_skip_patterns=None,
+ module_skip_patterns=None,
+ ):
+ ''' Initialize package for parsing
+
+ Parameters
+ ----------
+ package_name : string
+ Name of the top-level package. *package_name* must be the
+ name of an importable package
+ rst_extension : string, optional
+ Extension for reST files, default '.rst'
+ package_skip_patterns : None or sequence of {strings, regexps}
+ Sequence of strings giving URIs of packages to be excluded
+ Operates on the package path, starting at (including) the
+ first dot in the package path, after *package_name* - so,
+ if *package_name* is ``sphinx``, then ``sphinx.util`` will
+ result in ``.util`` being passed for earching by these
+ regexps. If is None, gives default. Default is:
+ ['\.tests$']
+ module_skip_patterns : None or sequence
+ Sequence of strings giving URIs of modules to be excluded
+ Operates on the module name including preceding URI path,
+ back to the first dot after *package_name*. For example
+ ``sphinx.util.console`` results in the string to search of
+ ``.util.console``
+ If is None, gives default. Default is:
+ ['\.setup$', '\._']
+ '''
+ if package_skip_patterns is None:
+ package_skip_patterns = ['\\.tests$']
+ if module_skip_patterns is None:
+ module_skip_patterns = ['\\.setup$', '\\._']
+ self.package_name = package_name
+ self.rst_extension = rst_extension
+ self.package_skip_patterns = package_skip_patterns
+ self.module_skip_patterns = module_skip_patterns
+
+ def get_package_name(self):
+ return self._package_name
+
+ def set_package_name(self, package_name):
+ ''' Set package_name
+
+ >>> docwriter = ApiDocWriter('sphinx')
+ >>> import sphinx
+ >>> docwriter.root_path == sphinx.__path__[0]
+ True
+ >>> docwriter.package_name = 'docutils'
+ >>> import docutils
+ >>> docwriter.root_path == docutils.__path__[0]
+ True
+ '''
+ # It's also possible to imagine caching the module parsing here
+ self._package_name = package_name
+ self.root_module = __import__(package_name)
+ self.root_path = self.root_module.__path__[0]
+ self.written_modules = None
+
+ package_name = property(get_package_name, set_package_name, None,
+ 'get/set package_name')
+
+ def _get_object_name(self, line):
+ ''' Get second token in line
+ >>> docwriter = ApiDocWriter('sphinx')
+ >>> docwriter._get_object_name(" def func(): ")
+ 'func'
+ >>> docwriter._get_object_name(" class Klass(object): ")
+ 'Klass'
+ >>> docwriter._get_object_name(" class Klass: ")
+ 'Klass'
+ '''
+ if line.startswith('cdef'):
+ line = line.split(None,1)[1]
+ name = line.split()[1].split('(')[0].strip()
+ # in case we have classes which are not derived from object
+ # ie. old style classes
+ return name.rstrip(':')
+
+ def _uri2path(self, uri):
+ ''' Convert uri to absolute filepath
+
+ Parameters
+ ----------
+ uri : string
+ URI of python module to return path for
+
+ Returns
+ -------
+ path : None or string
+ Returns None if there is no valid path for this URI
+ Otherwise returns absolute file system path for URI
+
+ Examples
+ --------
+ >>> docwriter = ApiDocWriter('sphinx')
+ >>> import sphinx
+ >>> modpath = sphinx.__path__[0]
+ >>> res = docwriter._uri2path('sphinx.builder')
+ >>> res == os.path.join(modpath, 'builder.py')
+ True
+ >>> res = docwriter._uri2path('sphinx')
+ >>> res == os.path.join(modpath, '__init__.py')
+ True
+ >>> docwriter._uri2path('sphinx.does_not_exist')
+
+ '''
+ if uri == self.package_name:
+ return os.path.join(self.root_path, '__init__.py')
+ path = uri.replace('.', os.path.sep)
+ path = path.replace(self.package_name + os.path.sep, '')
+ path = os.path.join(self.root_path, path)
+ # XXX maybe check for extensions as well?
+ if os.path.exists(path + '.py'): # file
+ path += '.py'
+ elif os.path.exists(path + '.pyx'): # file
+ path += '.pyx'
+ elif os.path.exists(os.path.join(path, '__init__.py')):
+ path = os.path.join(path, '__init__.py')
+ else:
+ return None
+ return path
+
+ def _path2uri(self, dirpath):
+ ''' Convert directory path to uri '''
+ relpath = dirpath.replace(self.root_path, self.package_name)
+ if relpath.startswith(os.path.sep):
+ relpath = relpath[1:]
+ return relpath.replace(os.path.sep, '.')
+
+ def _parse_module(self, uri):
+ ''' Parse module defined in *uri* '''
+ filename = self._uri2path(uri)
+ if filename is None:
+ # nothing that we could handle here.
+ return ([],[])
+ f = open(filename, 'rt')
+ functions, classes = self._parse_lines(f)
+ f.close()
+ return functions, classes
+
+ def _parse_lines(self, linesource):
+ ''' Parse lines of text for functions and classes '''
+ functions = []
+ classes = []
+ for line in linesource:
+ if line.startswith('def ') and line.count('('):
+ # exclude private stuff
+ name = self._get_object_name(line)
+ if not name.startswith('_'):
+ functions.append(name)
+ elif line.startswith('class '):
+ # exclude private stuff
+ name = self._get_object_name(line)
+ if not name.startswith('_'):
+ classes.append(name)
+ elif line.startswith('cpdef ') and line.count('('):
+ # exclude private stuff
+ name = self._get_object_name(line)
+ if not name.startswith('_'):
+ functions.append(name)
+ elif line.startswith('cdef class '):
+ # exclude private stuff
+ name = self._get_object_name(line)
+ if not name.startswith('_'):
+ classes.append(name)
+ else:
+ pass
+ functions.sort()
+ classes.sort()
+ return functions, classes
+
+ def generate_api_doc(self, uri):
+ '''Make autodoc documentation template string for a module
+
+ Parameters
+ ----------
+ uri : string
+ python location of module - e.g 'sphinx.builder'
+
+ Returns
+ -------
+ S : string
+ Contents of API doc
+ '''
+ # get the names of all classes and functions
+ functions, classes = self._parse_module(uri)
+ if not len(functions) and not len(classes):
+ print 'WARNING: Empty -',uri # dbg
+ return ''
+
+ # Make a shorter version of the uri that omits the package name for
+ # titles
+ uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
+
+ ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
+
+ chap_title = uri_short
+ ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title)
+ + '\n\n')
+
+ # Set the chapter title to read 'module' for all modules except for the
+ # main packages
+ if '.' in uri:
+ title = 'Module: :mod:`' + uri_short + '`'
+ else:
+ title = ':mod:`' + uri_short + '`'
+ ad += title + '\n' + self.rst_section_levels[2] * len(title)
+
+ # if len(classes):
+ # ad += '\nInheritance diagram for ``%s``:\n\n' % uri
+ # ad += '.. inheritance-diagram:: %s \n' % uri
+ # ad += ' :parts: 3\n'
+
+ ad += '\n.. automodule:: ' + uri + '\n'
+ ad += '\n.. currentmodule:: ' + uri + '\n'
+ multi_class = len(classes) > 1
+ multi_fx = len(functions) > 1
+ if multi_class:
+ ad += '\n' + 'Classes' + '\n' + \
+ self.rst_section_levels[2] * 7 + '\n'
+ elif len(classes) and multi_fx:
+ ad += '\n' + 'Class' + '\n' + \
+ self.rst_section_levels[2] * 5 + '\n'
+ for c in classes:
+ ad += '\n:class:`' + c + '`\n' \
+ + self.rst_section_levels[multi_class + 2 ] * \
+ (len(c)+9) + '\n\n'
+ ad += '\n.. autoclass:: ' + c + '\n'
+ # must NOT exclude from index to keep cross-refs working
+ ad += ' :members:\n' \
+ ' :undoc-members:\n' \
+ ' :inherited-members:\n' \
+ '\n'
+ # skip class.__init__()
+ # ' .. automethod:: __init__\n'
+ if multi_fx:
+ ad += '\n' + 'Functions' + '\n' + \
+ self.rst_section_levels[2] * 9 + '\n\n'
+ elif len(functions) and multi_class:
+ ad += '\n' + 'Function' + '\n' + \
+ self.rst_section_levels[2] * 8 + '\n\n'
+ for f in functions:
+ # must NOT exclude from index to keep cross-refs working
+ ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n'
+ return ad
+
+ def _survives_exclude(self, matchstr, match_type):
+ ''' Returns True if *matchstr* does not match patterns
+
+ ``self.package_name`` removed from front of string if present
+
+ Examples
+ --------
+ >>> dw = ApiDocWriter('sphinx')
+ >>> dw._survives_exclude('sphinx.okpkg', 'package')
+ True
+ >>> dw.package_skip_patterns.append('^\\.badpkg$')
+ >>> dw._survives_exclude('sphinx.badpkg', 'package')
+ False
+ >>> dw._survives_exclude('sphinx.badpkg', 'module')
+ True
+ >>> dw._survives_exclude('sphinx.badmod', 'module')
+ True
+ >>> dw.module_skip_patterns.append('^\\.badmod$')
+ >>> dw._survives_exclude('sphinx.badmod', 'module')
+ False
+ '''
+ if match_type == 'module':
+ patterns = self.module_skip_patterns
+ elif match_type == 'package':
+ patterns = self.package_skip_patterns
+ else:
+ raise ValueError('Cannot interpret match type "%s"'
+ % match_type)
+ # Match to URI without package name
+ L = len(self.package_name)
+ if matchstr[:L] == self.package_name:
+ matchstr = matchstr[L:]
+ for pat in patterns:
+ try:
+ pat.search
+ except AttributeError:
+ pat = re.compile(pat)
+ if pat.search(matchstr):
+ return False
+ return True
+
+ def discover_modules(self):
+ ''' Return module sequence discovered from ``self.package_name``
+
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ mods : sequence
+ Sequence of module names within ``self.package_name``
+
+ Examples
+ --------
+ >>> dw = ApiDocWriter('sphinx')
+ >>> mods = dw.discover_modules()
+ >>> 'sphinx.util' in mods
+ True
+ >>> dw.package_skip_patterns.append('\.util$')
+ >>> 'sphinx.util' in dw.discover_modules()
+ False
+ >>>
+ '''
+ modules = [self.package_name]
+ # raw directory parsing
+ for dirpath, dirnames, filenames in os.walk(self.root_path):
+ # Check directory names for packages
+ root_uri = self._path2uri(os.path.join(self.root_path,
+ dirpath))
+ for dirname in dirnames[:]: # copy list - we modify inplace
+ package_uri = '.'.join((root_uri, dirname))
+ if (self._uri2path(package_uri) and
+ self._survives_exclude(package_uri, 'package')):
+ modules.append(package_uri)
+ else:
+ dirnames.remove(dirname)
+ # Check filenames for modules
+ for filename in filenames:
+ module_name = filename[:-3]
+ module_uri = '.'.join((root_uri, module_name))
+ if (self._uri2path(module_uri) and
+ self._survives_exclude(module_uri, 'module')):
+ modules.append(module_uri)
+ return sorted(modules)
+
+ def write_modules_api(self, modules,outdir):
+ # write the list
+ written_modules = []
+ for m in modules:
+ api_str = self.generate_api_doc(m)
+ if not api_str:
+ continue
+ # write out to file
+ outfile = os.path.join(outdir,
+ m + self.rst_extension)
+ fileobj = open(outfile, 'wt')
+ fileobj.write(api_str)
+ fileobj.close()
+ written_modules.append(m)
+ self.written_modules = written_modules
+
+ def write_api_docs(self, outdir):
+ """Generate API reST files.
+
+ Parameters
+ ----------
+ outdir : string
+ Directory name in which to store files
+ We create automatic filenames for each module
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ Sets self.written_modules to list of written modules
+ """
+ if not os.path.exists(outdir):
+ os.mkdir(outdir)
+ # compose list of modules
+ modules = self.discover_modules()
+ self.write_modules_api(modules,outdir)
+
+ def write_index(self, outdir, froot='gen', relative_to=None):
+ """Make a reST API index file from written files
+
+ Parameters
+ ----------
+ path : string
+ Filename to write index to
+ outdir : string
+ Directory to which to write generated index file
+ froot : string, optional
+ root (filename without extension) of filename to write to
+ Defaults to 'gen'. We add ``self.rst_extension``.
+ relative_to : string
+ path to which written filenames are relative. This
+ component of the written file path will be removed from
+ outdir, in the generated index. Default is None, meaning,
+ leave path as it is.
+ """
+ if self.written_modules is None:
+ raise ValueError('No modules written')
+ # Get full filename path
+ path = os.path.join(outdir, froot+self.rst_extension)
+ # Path written into index is relative to rootpath
+ if relative_to is not None:
+ relpath = outdir.replace(relative_to + os.path.sep, '')
+ else:
+ relpath = outdir
+ idx = open(path,'wt')
+ w = idx.write
+ w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
+ w('.. toctree::\n\n')
+ for f in self.written_modules:
+ w(' %s\n' % os.path.join(relpath,f))
+ idx.close()
diff --git a/docs/sphinxext/docscrape.py b/docs/sphinxext/docscrape.py
new file mode 100644
index 0000000..f374b3d
--- /dev/null
+++ b/docs/sphinxext/docscrape.py
@@ -0,0 +1,497 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+4
+class Reader(object):
+ """A line-based string reader.
+
+ """
+ def __init__(self, data):
+ """
+ Parameters
+ ----------
+ data : str
+ String with lines separated by '\n'.
+
+ """
+ if isinstance(data,list):
+ self._str = data
+ else:
+ self._str = data.split('\n') # store string as list of lines
+
+ self.reset()
+
+ def __getitem__(self, n):
+ return self._str[n]
+
+ def reset(self):
+ self._l = 0 # current line nr
+
+ def read(self):
+ if not self.eof():
+ out = self[self._l]
+ self._l += 1
+ return out
+ else:
+ return ''
+
+ def seek_next_non_empty_line(self):
+ for l in self[self._l:]:
+ if l.strip():
+ break
+ else:
+ self._l += 1
+
+ def eof(self):
+ return self._l >= len(self._str)
+
+ def read_to_condition(self, condition_func):
+ start = self._l
+ for line in self[start:]:
+ if condition_func(line):
+ return self[start:self._l]
+ self._l += 1
+ if self.eof():
+ return self[start:self._l+1]
+ return []
+
+ def read_to_next_empty_line(self):
+ self.seek_next_non_empty_line()
+ def is_empty(line):
+ return not line.strip()
+ return self.read_to_condition(is_empty)
+
+ def read_to_next_unindented_line(self):
+ def is_unindented(line):
+ return (line.strip() and (len(line.lstrip()) == len(line)))
+ return self.read_to_condition(is_unindented)
+
+ def peek(self,n=0):
+ if self._l + n < len(self._str):
+ return self[self._l + n]
+ else:
+ return ''
+
+ def is_empty(self):
+ return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+ def __init__(self,docstring):
+ docstring = textwrap.dedent(docstring).split('\n')
+
+ self._doc = Reader(docstring)
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': [''],
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Returns': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'Other Parameters': [],
+ 'Attributes': [],
+ 'Methods': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'Warnings': [],
+ 'References': '',
+ 'Examples': '',
+ 'index': {}
+ }
+
+ self._parse()
+
+ def __getitem__(self,key):
+ return self._parsed_data[key]
+
+ def __setitem__(self,key,val):
+ if not self._parsed_data.has_key(key):
+ warn("Unknown section %s" % key)
+ else:
+ self._parsed_data[key] = val
+
+ def _is_at_section(self):
+ self._doc.seek_next_non_empty_line()
+
+ if self._doc.eof():
+ return False
+
+ l1 = self._doc.peek().strip() # e.g. Parameters
+
+ if l1.startswith('.. index::'):
+ return True
+
+ l2 = self._doc.peek(1).strip() # ---------- or ==========
+ return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+ def _strip(self,doc):
+ i = 0
+ j = 0
+ for i,line in enumerate(doc):
+ if line.strip(): break
+
+ for j,line in enumerate(doc[::-1]):
+ if line.strip(): break
+
+ return doc[i:len(doc)-j]
+
+ def _read_to_next_section(self):
+ section = self._doc.read_to_next_empty_line()
+
+ while not self._is_at_section() and not self._doc.eof():
+ if not self._doc.peek(-1).strip(): # previous line was empty
+ section += ['']
+
+ section += self._doc.read_to_next_empty_line()
+
+ return section
+
+ def _read_sections(self):
+ while not self._doc.eof():
+ data = self._read_to_next_section()
+ name = data[0].strip()
+
+ if name.startswith('..'): # index section
+ yield name, data[1:]
+ elif len(data) < 2:
+ yield StopIteration
+ else:
+ yield name, self._strip(data[2:])
+
+ def _parse_param_list(self,content):
+ r = Reader(content)
+ params = []
+ while not r.eof():
+ header = r.read().strip()
+ if ' : ' in header:
+ arg_name, arg_type = header.split(' : ')[:2]
+ else:
+ arg_name, arg_type = header, ''
+
+ desc = r.read_to_next_unindented_line()
+ desc = dedent_lines(desc)
+
+ params.append((arg_name,arg_type,desc))
+
+ return params
+
+
+ _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+ r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+ def _parse_see_also(self, content):
+ """
+ func_name : Descriptive text
+ continued text
+ another_func_name : Descriptive text
+ func_name1, func_name2, :meth:`func_name`, func_name3
+
+ """
+ items = []
+
+ def parse_item_name(text):
+ """Match ':role:`name`' or 'name'"""
+ m = self._name_rgx.match(text)
+ if m:
+ g = m.groups()
+ if g[1] is None:
+ return g[3], None
+ else:
+ return g[2], g[1]
+ raise ValueError("%s is not a item name" % text)
+
+ def push_item(name, rest):
+ if not name:
+ return
+ name, role = parse_item_name(name)
+ items.append((name, list(rest), role))
+ del rest[:]
+
+ current_func = None
+ rest = []
+
+ for line in content:
+ if not line.strip(): continue
+
+ m = self._name_rgx.match(line)
+ if m and line[m.end():].strip().startswith(':'):
+ push_item(current_func, rest)
+ current_func, line = line[:m.end()], line[m.end():]
+ rest = [line.split(':', 1)[1].strip()]
+ if not rest[0]:
+ rest = []
+ elif not line.startswith(' '):
+ push_item(current_func, rest)
+ current_func = None
+ if ',' in line:
+ for func in line.split(','):
+ push_item(func, [])
+ elif line.strip():
+ current_func = line
+ elif current_func is not None:
+ rest.append(line.strip())
+ push_item(current_func, rest)
+ return items
+
+ def _parse_index(self, section, content):
+ """
+ .. index: default
+ :refguide: something, else, and more
+
+ """
+ def strip_each_in(lst):
+ return [s.strip() for s in lst]
+
+ out = {}
+ section = section.split('::')
+ if len(section) > 1:
+ out['default'] = strip_each_in(section[1].split(','))[0]
+ for line in content:
+ line = line.split(':')
+ if len(line) > 2:
+ out[line[1]] = strip_each_in(line[2].split(','))
+ return out
+
+ def _parse_summary(self):
+ """Grab signature (if given) and summary"""
+ if self._is_at_section():
+ return
+
+ summary = self._doc.read_to_next_empty_line()
+ summary_str = " ".join([s.strip() for s in summary]).strip()
+ if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+ self['Signature'] = summary_str
+ if not self._is_at_section():
+ self['Summary'] = self._doc.read_to_next_empty_line()
+ else:
+ self['Summary'] = summary
+
+ if not self._is_at_section():
+ self['Extended Summary'] = self._read_to_next_section()
+
+ def _parse(self):
+ self._doc.reset()
+ self._parse_summary()
+
+ for (section,content) in self._read_sections():
+ if not section.startswith('..'):
+ section = ' '.join([s.capitalize() for s in section.split(' ')])
+ if section in ('Parameters', 'Attributes', 'Methods',
+ 'Returns', 'Raises', 'Warns'):
+ self[section] = self._parse_param_list(content)
+ elif section.startswith('.. index::'):
+ self['index'] = self._parse_index(section, content)
+ elif section == 'See Also':
+ self['See Also'] = self._parse_see_also(content)
+ else:
+ self[section] = content
+
+ # string conversion routines
+
+ def _str_header(self, name, symbol='-'):
+ return [name, len(name)*symbol]
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' '*indent + line]
+ return out
+
+ def _str_signature(self):
+ if self['Signature']:
+ return [self['Signature'].replace('*','\*')] + ['']
+ else:
+ return ['']
+
+ def _str_summary(self):
+ if self['Summary']:
+ return self['Summary'] + ['']
+ else:
+ return []
+
+ def _str_extended_summary(self):
+ if self['Extended Summary']:
+ return self['Extended Summary'] + ['']
+ else:
+ return []
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ for param,param_type,desc in self[name]:
+ out += ['%s : %s' % (param, param_type)]
+ out += self._str_indent(desc)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += self[name]
+ out += ['']
+ return out
+
+ def _str_see_also(self, func_role):
+ if not self['See Also']: return []
+ out = []
+ out += self._str_header("See Also")
+ last_had_desc = True
+ for func, desc, role in self['See Also']:
+ if role:
+ link = ':%s:`%s`' % (role, func)
+ elif func_role:
+ link = ':%s:`%s`' % (func_role, func)
+ else:
+ link = "`%s`_" % func
+ if desc or last_had_desc:
+ out += ['']
+ out += [link]
+ else:
+ out[-1] += ", %s" % link
+ if desc:
+ out += self._str_indent([' '.join(desc)])
+ last_had_desc = True
+ else:
+ last_had_desc = False
+ out += ['']
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.iteritems():
+ if section == 'default':
+ continue
+ out += [' :%s: %s' % (section, ', '.join(references))]
+ return out
+
+ def __str__(self, func_role=''):
+ out = []
+ out += self._str_signature()
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters','Returns','Raises'):
+ out += self._str_param_list(param_list)
+ out += self._str_section('Warnings')
+ out += self._str_see_also(func_role)
+ for s in ('Notes','References','Examples'):
+ out += self._str_section(s)
+ out += self._str_index()
+ return '\n'.join(out)
+
+
+def indent(str,indent=4):
+ indent_str = ' '*indent
+ if str is None:
+ return indent_str
+ lines = str.split('\n')
+ return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+ """Deindent a list of lines maximally"""
+ return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+ return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+ def __init__(self, func, role='func', doc=None):
+ self._f = func
+ self._role = role # e.g. "func" or "meth"
+ if doc is None:
+ doc = inspect.getdoc(func) or ''
+ try:
+ NumpyDocString.__init__(self, doc)
+ except ValueError, e:
+ print '*'*78
+ print "ERROR: '%s' while parsing `%s`" % (e, self._f)
+ print '*'*78
+ #print "Docstring follows:"
+ #print doclines
+ #print '='*78
+
+ if not self['Signature']:
+ func, func_name = self.get_func()
+ try:
+ # try to read signature
+ argspec = inspect.getargspec(func)
+ argspec = inspect.formatargspec(*argspec)
+ argspec = argspec.replace('*','\*')
+ signature = '%s%s' % (func_name, argspec)
+ except TypeError, e:
+ signature = '%s()' % func_name
+ self['Signature'] = signature
+
+ def get_func(self):
+ func_name = getattr(self._f, '__name__', self.__class__.__name__)
+ if inspect.isclass(self._f):
+ func = getattr(self._f, '__call__', self._f.__init__)
+ else:
+ func = self._f
+ return func, func_name
+
+ def __str__(self):
+ out = ''
+
+ func, func_name = self.get_func()
+ signature = self['Signature'].replace('*', '\*')
+
+ roles = {'func': 'function',
+ 'meth': 'method'}
+
+ if self._role:
+ if not roles.has_key(self._role):
+ print "Warning: invalid role %s" % self._role
+ out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
+ func_name)
+
+ out += super(FunctionDoc, self).__str__(func_role=self._role)
+ return out
+
+
+class ClassDoc(NumpyDocString):
+ def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None):
+ if not inspect.isclass(cls):
+ raise ValueError("Initialise using a class. Got %r" % cls)
+ self._cls = cls
+
+ if modulename and not modulename.endswith('.'):
+ modulename += '.'
+ self._mod = modulename
+ self._name = cls.__name__
+ self._func_doc = func_doc
+
+ if doc is None:
+ doc = pydoc.getdoc(cls)
+
+ NumpyDocString.__init__(self, doc)
+
+ @property
+ def methods(self):
+ return [name for name,func in inspect.getmembers(self._cls)
+ if not name.startswith('_') and callable(func)]
+
+ def __str__(self):
+ out = ''
+ out += super(ClassDoc, self).__str__()
+ out += "\n\n"
+
+ #for m in self.methods:
+ # print "Parsing `%s`" % m
+ # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
+ # out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
+
+ return out
+
+
diff --git a/docs/sphinxext/docscrape_sphinx.py b/docs/sphinxext/docscrape_sphinx.py
new file mode 100644
index 0000000..77ed271
--- /dev/null
+++ b/docs/sphinxext/docscrape_sphinx.py
@@ -0,0 +1,136 @@
+import re, inspect, textwrap, pydoc
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+ # string conversion routines
+ def _str_header(self, name, symbol='`'):
+ return ['.. rubric:: ' + name, '']
+
+ def _str_field_list(self, name):
+ return [':' + name + ':']
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' '*indent + line]
+ return out
+
+ def _str_signature(self):
+ return ['']
+ if self['Signature']:
+ return ['``%s``' % self['Signature']] + ['']
+ else:
+ return ['']
+
+ def _str_summary(self):
+ return self['Summary'] + ['']
+
+ def _str_extended_summary(self):
+ return self['Extended Summary'] + ['']
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_field_list(name)
+ out += ['']
+ for param,param_type,desc in self[name]:
+ out += self._str_indent(['**%s** : %s' % (param.strip(),
+ param_type)])
+ out += ['']
+ out += self._str_indent(desc,8)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += ['']
+ content = textwrap.dedent("\n".join(self[name])).split("\n")
+ out += content
+ out += ['']
+ return out
+
+ def _str_see_also(self, func_role):
+ out = []
+ if self['See Also']:
+ see_also = super(SphinxDocString, self)._str_see_also(func_role)
+ out = ['.. seealso::', '']
+ out += self._str_indent(see_also[2:])
+ return out
+
+ def _str_warnings(self):
+ out = []
+ if self['Warnings']:
+ out = ['.. warning::', '']
+ out += self._str_indent(self['Warnings'])
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ if len(idx) == 0:
+ return out
+
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.iteritems():
+ if section == 'default':
+ continue
+ elif section == 'refguide':
+ out += [' single: %s' % (', '.join(references))]
+ else:
+ out += [' %s: %s' % (section, ','.join(references))]
+ return out
+
+ def _str_references(self):
+ out = []
+ if self['References']:
+ out += self._str_header('References')
+ if isinstance(self['References'], str):
+ self['References'] = [self['References']]
+ out.extend(self['References'])
+ out += ['']
+ return out
+
+ def __str__(self, indent=0, func_role="obj"):
+ out = []
+ out += self._str_signature()
+ out += self._str_index() + ['']
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Attributes', 'Methods',
+ 'Returns','Raises'):
+ out += self._str_param_list(param_list)
+ out += self._str_warnings()
+ out += self._str_see_also(func_role)
+ out += self._str_section('Notes')
+ out += self._str_references()
+ out += self._str_section('Examples')
+ out = self._str_indent(out,indent)
+ return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+ pass
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+ pass
+
+def get_doc_object(obj, what=None, doc=None):
+ if what is None:
+ if inspect.isclass(obj):
+ what = 'class'
+ elif inspect.ismodule(obj):
+ what = 'module'
+ elif callable(obj):
+ what = 'function'
+ else:
+ what = 'object'
+ if what == 'class':
+ return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
+ elif what in ('function', 'method'):
+ return SphinxFunctionDoc(obj, '', doc=doc)
+ else:
+ if doc is None:
+ doc = pydoc.getdoc(obj)
+ return SphinxDocString(doc)
+
diff --git a/docs/sphinxext/inheritance_diagram.py b/docs/sphinxext/inheritance_diagram.py
new file mode 100644
index 0000000..407fc13
--- /dev/null
+++ b/docs/sphinxext/inheritance_diagram.py
@@ -0,0 +1,407 @@
+"""
+Defines a docutils directive for inserting inheritance diagrams.
+
+Provide the directive with one or more classes or modules (separated
+by whitespace). For modules, all of the classes in that module will
+be used.
+
+Example::
+
+ Given the following classes:
+
+ class A: pass
+ class B(A): pass
+ class C(A): pass
+ class D(B, C): pass
+ class E(B): pass
+
+ .. inheritance-diagram: D E
+
+ Produces a graph like the following:
+
+ A
+ / \
+ B C
+ / \ /
+ E D
+
+The graph is inserted as a PNG+image map into HTML and a PDF in
+LaTeX.
+"""
+
+import inspect
+import os
+import re
+import subprocess
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+from docutils.nodes import Body, Element
+from docutils.parsers.rst import directives
+from sphinx.roles import xfileref_role
+
+def my_import(name):
+ """Module importer - taken from the python documentation.
+
+ This function allows importing names with dots in them."""
+
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+class DotException(Exception):
+ pass
+
+class InheritanceGraph(object):
+ """
+ Given a list of classes, determines the set of classes that
+ they inherit from all the way to the root "object", and then
+ is able to generate a graphviz dot graph from them.
+ """
+ def __init__(self, class_names, show_builtins=False):
+ """
+ *class_names* is a list of child classes to show bases from.
+
+ If *show_builtins* is True, then Python builtins will be shown
+ in the graph.
+ """
+ self.class_names = class_names
+ self.classes = self._import_classes(class_names)
+ self.all_classes = self._all_classes(self.classes)
+ if len(self.all_classes) == 0:
+ raise ValueError("No classes found for inheritance diagram")
+ self.show_builtins = show_builtins
+
+ py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
+ (\w+) \s* $ # optionally arguments
+ ''', re.VERBOSE)
+
+ def _import_class_or_module(self, name):
+ """
+ Import a class using its fully-qualified *name*.
+ """
+ try:
+ path, base = self.py_sig_re.match(name).groups()
+ except:
+ raise ValueError(
+ "Invalid class or module '%s' specified for inheritance diagram" % name)
+ fullname = (path or '') + base
+ path = (path and path.rstrip('.'))
+ if not path:
+ path = base
+ try:
+ module = __import__(path, None, None, [])
+ # We must do an import of the fully qualified name. Otherwise if a
+ # subpackage 'a.b' is requested where 'import a' does NOT provide
+ # 'a.b' automatically, then 'a.b' will not be found below. This
+ # second call will force the equivalent of 'import a.b' to happen
+ # after the top-level import above.
+ my_import(fullname)
+
+ except ImportError:
+ raise ValueError(
+ "Could not import class or module '%s' specified for inheritance diagram" % name)
+
+ try:
+ todoc = module
+ for comp in fullname.split('.')[1:]:
+ todoc = getattr(todoc, comp)
+ except AttributeError:
+ raise ValueError(
+ "Could not find class or module '%s' specified for inheritance diagram" % name)
+
+ # If a class, just return it
+ if inspect.isclass(todoc):
+ return [todoc]
+ elif inspect.ismodule(todoc):
+ classes = []
+ for cls in todoc.__dict__.values():
+ if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
+ classes.append(cls)
+ return classes
+ raise ValueError(
+ "'%s' does not resolve to a class or module" % name)
+
+ def _import_classes(self, class_names):
+ """
+ Import a list of classes.
+ """
+ classes = []
+ for name in class_names:
+ classes.extend(self._import_class_or_module(name))
+ return classes
+
+ def _all_classes(self, classes):
+ """
+ Return a list of all classes that are ancestors of *classes*.
+ """
+ all_classes = {}
+
+ def recurse(cls):
+ all_classes[cls] = None
+ for c in cls.__bases__:
+ if c not in all_classes:
+ recurse(c)
+
+ for cls in classes:
+ recurse(cls)
+
+ return all_classes.keys()
+
+ def class_name(self, cls, parts=0):
+ """
+ Given a class object, return a fully-qualified name. This
+ works for things I've tested in matplotlib so far, but may not
+ be completely general.
+ """
+ module = cls.__module__
+ if module == '__builtin__':
+ fullname = cls.__name__
+ else:
+ fullname = "%s.%s" % (module, cls.__name__)
+ if parts == 0:
+ return fullname
+ name_parts = fullname.split('.')
+ return '.'.join(name_parts[-parts:])
+
+ def get_all_class_names(self):
+ """
+ Get all of the class names involved in the graph.
+ """
+ return [self.class_name(x) for x in self.all_classes]
+
+ # These are the default options for graphviz
+ default_graph_options = {
+ "rankdir": "LR",
+ "size": '"8.0, 12.0"'
+ }
+ default_node_options = {
+ "shape": "box",
+ "fontsize": 10,
+ "height": 0.25,
+ "fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
+ "style": '"setlinewidth(0.5)"'
+ }
+ default_edge_options = {
+ "arrowsize": 0.5,
+ "style": '"setlinewidth(0.5)"'
+ }
+
+ def _format_node_options(self, options):
+ return ','.join(["%s=%s" % x for x in options.items()])
+ def _format_graph_options(self, options):
+ return ''.join(["%s=%s;\n" % x for x in options.items()])
+
+ def generate_dot(self, fd, name, parts=0, urls={},
+ graph_options={}, node_options={},
+ edge_options={}):
+ """
+ Generate a graphviz dot graph from the classes that
+ were passed in to __init__.
+
+ *fd* is a Python file-like object to write to.
+
+ *name* is the name of the graph
+
+ *urls* is a dictionary mapping class names to http urls
+
+ *graph_options*, *node_options*, *edge_options* are
+ dictionaries containing key/value pairs to pass on as graphviz
+ properties.
+ """
+ g_options = self.default_graph_options.copy()
+ g_options.update(graph_options)
+ n_options = self.default_node_options.copy()
+ n_options.update(node_options)
+ e_options = self.default_edge_options.copy()
+ e_options.update(edge_options)
+
+ fd.write('digraph %s {\n' % name)
+ fd.write(self._format_graph_options(g_options))
+
+ for cls in self.all_classes:
+ if not self.show_builtins and cls in __builtins__.values():
+ continue
+
+ name = self.class_name(cls, parts)
+
+ # Write the node
+ this_node_options = n_options.copy()
+ url = urls.get(self.class_name(cls))
+ if url is not None:
+ this_node_options['URL'] = '"%s"' % url
+ fd.write(' "%s" [%s];\n' %
+ (name, self._format_node_options(this_node_options)))
+
+ # Write the edges
+ for base in cls.__bases__:
+ if not self.show_builtins and base in __builtins__.values():
+ continue
+
+ base_name = self.class_name(base, parts)
+ fd.write(' "%s" -> "%s" [%s];\n' %
+ (base_name, name,
+ self._format_node_options(e_options)))
+ fd.write('}\n')
+
+ def run_dot(self, args, name, parts=0, urls={},
+ graph_options={}, node_options={}, edge_options={}):
+ """
+ Run graphviz 'dot' over this graph, returning whatever 'dot'
+ writes to stdout.
+
+ *args* will be passed along as commandline arguments.
+
+ *name* is the name of the graph
+
+ *urls* is a dictionary mapping class names to http urls
+
+ Raises DotException for any of the many os and
+ installation-related errors that may occur.
+ """
+ try:
+ dot = subprocess.Popen(['dot'] + list(args),
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ close_fds=True)
+ except OSError:
+ raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
+ except ValueError:
+ raise DotException("'dot' called with invalid arguments")
+ except:
+ raise DotException("Unexpected error calling 'dot'")
+
+ self.generate_dot(dot.stdin, name, parts, urls, graph_options,
+ node_options, edge_options)
+ dot.stdin.close()
+ result = dot.stdout.read()
+ returncode = dot.wait()
+ if returncode != 0:
+ raise DotException("'dot' returned the errorcode %d" % returncode)
+ return result
+
+class inheritance_diagram(Body, Element):
+ """
+ A docutils node to use as a placeholder for the inheritance
+ diagram.
+ """
+ pass
+
+def inheritance_diagram_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state,
+ state_machine):
+ """
+ Run when the inheritance_diagram directive is first encountered.
+ """
+ node = inheritance_diagram()
+
+ class_names = arguments
+
+ # Create a graph starting with the list of classes
+ graph = InheritanceGraph(class_names)
+
+ # Create xref nodes for each target of the graph's image map and
+ # add them to the doc tree so that Sphinx can resolve the
+ # references to real URLs later. These nodes will eventually be
+ # removed from the doctree after we're done with them.
+ for name in graph.get_all_class_names():
+ refnodes, x = xfileref_role(
+ 'class', ':class:`%s`' % name, name, 0, state)
+ node.extend(refnodes)
+ # Store the graph object so we can use it to generate the
+ # dot file later
+ node['graph'] = graph
+ # Store the original content for use as a hash
+ node['parts'] = options.get('parts', 0)
+ node['content'] = " ".join(class_names)
+ return [node]
+
+def get_graph_hash(node):
+ return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
+
+def html_output_graph(self, node):
+ """
+ Output the graph for HTML. This will insert a PNG with clickable
+ image map.
+ """
+ graph = node['graph']
+ parts = node['parts']
+
+ graph_hash = get_graph_hash(node)
+ name = "inheritance%s" % graph_hash
+ path = '_images'
+ dest_path = os.path.join(setup.app.builder.outdir, path)
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+ png_path = os.path.join(dest_path, name + ".png")
+ path = setup.app.builder.imgpath
+
+ # Create a mapping from fully-qualified class names to URLs.
+ urls = {}
+ for child in node:
+ if child.get('refuri') is not None:
+ urls[child['reftitle']] = child.get('refuri')
+ elif child.get('refid') is not None:
+ urls[child['reftitle']] = '#' + child.get('refid')
+
+ # These arguments to dot will save a PNG file to disk and write
+ # an HTML image map to stdout.
+ image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
+ name, parts, urls)
+ return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
+ (path, name, name, image_map))
+
+def latex_output_graph(self, node):
+ """
+ Output the graph for LaTeX. This will insert a PDF.
+ """
+ graph = node['graph']
+ parts = node['parts']
+
+ graph_hash = get_graph_hash(node)
+ name = "inheritance%s" % graph_hash
+ dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+ pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
+
+ graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
+ name, parts, graph_options={'size': '"6.0,6.0"'})
+ return '\n\\includegraphics{%s}\n\n' % pdf_path
+
+def visit_inheritance_diagram(inner_func):
+ """
+ This is just a wrapper around html/latex_output_graph to make it
+ easier to handle errors and insert warnings.
+ """
+ def visitor(self, node):
+ try:
+ content = inner_func(self, node)
+ except DotException, e:
+ # Insert the exception as a warning in the document
+ warning = self.document.reporter.warning(str(e), line=node.line)
+ warning.parent = node
+ node.children = [warning]
+ else:
+ source = self.document.attributes['source']
+ self.body.append(content)
+ node.children = []
+ return visitor
+
+def do_nothing(self, node):
+ pass
+
+def setup(app):
+ setup.app = app
+ setup.confdir = app.confdir
+
+ app.add_node(
+ inheritance_diagram,
+ latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
+ html=(visit_inheritance_diagram(html_output_graph), do_nothing))
+ app.add_directive(
+ 'inheritance-diagram', inheritance_diagram_directive,
+ False, (1, 100, 0), parts = directives.nonnegative_int)
diff --git a/docs/sphinxext/ipython_console_highlighting.py b/docs/sphinxext/ipython_console_highlighting.py
new file mode 100644
index 0000000..217b779
--- /dev/null
+++ b/docs/sphinxext/ipython_console_highlighting.py
@@ -0,0 +1,114 @@
+"""reST directive for syntax-highlighting ipython interactive sessions.
+
+XXX - See what improvements can be made based on the new (as of Sept 2009)
+'pycon' lexer for the python console. At the very least it will give better
+highlighted tracebacks.
+"""
+
+#-----------------------------------------------------------------------------
+# Needed modules
+
+# Standard library
+import re
+
+# Third party
+from pygments.lexer import Lexer, do_insertions
+from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
+ PythonTracebackLexer)
+from pygments.token import Comment, Generic
+
+from sphinx import highlighting
+
+#-----------------------------------------------------------------------------
+# Global constants
+line_re = re.compile('.*?\n')
+
+#-----------------------------------------------------------------------------
+# Code begins - classes and functions
+
+class IPythonConsoleLexer(Lexer):
+ """
+ For IPython console output or doctests, such as:
+
+ .. sourcecode:: ipython
+
+ In [1]: a = 'foo'
+
+ In [2]: a
+ Out[2]: 'foo'
+
+ In [3]: print a
+ foo
+
+ In [4]: 1 / 0
+
+ Notes:
+
+ - Tracebacks are not currently supported.
+
+ - It assumes the default IPython prompts, not customized ones.
+ """
+
+ name = 'IPython console session'
+ aliases = ['ipython']
+ mimetypes = ['text/x-ipython-console']
+ input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
+ output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
+ continue_prompt = re.compile(" \.\.\.+:")
+ tb_start = re.compile("\-+")
+
+ def get_tokens_unprocessed(self, text):
+ pylexer = PythonLexer(**self.options)
+ tblexer = PythonTracebackLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ for match in line_re.finditer(text):
+ line = match.group()
+ input_prompt = self.input_prompt.match(line)
+ continue_prompt = self.continue_prompt.match(line.rstrip())
+ output_prompt = self.output_prompt.match(line)
+ if line.startswith("#"):
+ insertions.append((len(curcode),
+ [(0, Comment, line)]))
+ elif input_prompt is not None:
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, input_prompt.group())]))
+ curcode += line[input_prompt.end():]
+ elif continue_prompt is not None:
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, continue_prompt.group())]))
+ curcode += line[continue_prompt.end():]
+ elif output_prompt is not None:
+ # Use the 'error' token for output. We should probably make
+ # our own token, but error is typicaly in a bright color like
+ # red, so it works fine for our output prompts.
+ insertions.append((len(curcode),
+ [(0, Generic.Error, output_prompt.group())]))
+ curcode += line[output_prompt.end():]
+ else:
+ if curcode:
+ for item in do_insertions(insertions,
+ pylexer.get_tokens_unprocessed(curcode)):
+ yield item
+ curcode = ''
+ insertions = []
+ yield match.start(), Generic.Output, line
+ if curcode:
+ for item in do_insertions(insertions,
+ pylexer.get_tokens_unprocessed(curcode)):
+ yield item
+
+
+def setup(app):
+ """Setup as a sphinx extension."""
+
+ # This is only a lexer, so adding it below to pygments appears sufficient.
+ # But if somebody knows that the right API usage should be to do that via
+ # sphinx, by all means fix it here. At least having this setup.py
+ # suppresses the sphinx warning we'd get without it.
+ pass
+
+#-----------------------------------------------------------------------------
+# Register the extension as a valid pygments lexer
+highlighting.lexers['ipython'] = IPythonConsoleLexer()
diff --git a/docs/sphinxext/numpydoc.py b/docs/sphinxext/numpydoc.py
new file mode 100644
index 0000000..7b4821b
--- /dev/null
+++ b/docs/sphinxext/numpydoc.py
@@ -0,0 +1,116 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
+
+"""
+
+import os, re, pydoc
+from docscrape_sphinx import get_doc_object, SphinxDocString
+import inspect
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+ reference_offset=[0]):
+ if what == 'module':
+ # Strip top title
+ title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+ re.I|re.S)
+ lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
+ else:
+ doc = get_doc_object(obj, what, "\n".join(lines))
+ lines[:] = unicode(doc).split("\n")
+
+ if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+ obj.__name__:
+ if hasattr(obj, '__module__'):
+ v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__))
+ else:
+ v = dict(full_name=obj.__name__)
+ lines += ['', '.. htmlonly::', '']
+ lines += [' %s' % x for x in
+ (app.config.numpydoc_edit_link % v).split("\n")]
+
+ # replace reference numbers so that there are no duplicates
+ references = []
+ for l in lines:
+ l = l.strip()
+ if l.startswith('.. ['):
+ try:
+ references.append(int(l[len('.. ['):l.index(']')]))
+ except ValueError:
+ print "WARNING: invalid reference in %s docstring" % name
+
+ # Start renaming from the biggest number, otherwise we may
+ # overwrite references.
+ references.sort()
+ if references:
+ for i, line in enumerate(lines):
+ for r in references:
+ new_r = reference_offset[0] + r
+ lines[i] = lines[i].replace('[%d]_' % r,
+ '[%d]_' % new_r)
+ lines[i] = lines[i].replace('.. [%d]' % r,
+ '.. [%d]' % new_r)
+
+ reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+ # Do not try to inspect classes that don't define `__init__`
+ if (inspect.isclass(obj) and
+ 'initializes x; see ' in pydoc.getdoc(obj.__init__)):
+ return '', ''
+
+ if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
+ if not hasattr(obj, '__doc__'): return
+
+ doc = SphinxDocString(pydoc.getdoc(obj))
+ if doc['Signature']:
+ sig = re.sub("^[^(]*", "", doc['Signature'])
+ return sig, ''
+
+def initialize(app):
+ try:
+ app.connect('autodoc-process-signature', mangle_signature)
+ except:
+ monkeypatch_sphinx_ext_autodoc()
+
+def setup(app, get_doc_object_=get_doc_object):
+ global get_doc_object
+ get_doc_object = get_doc_object_
+
+ app.connect('autodoc-process-docstring', mangle_docstrings)
+ app.connect('builder-inited', initialize)
+ app.add_config_value('numpydoc_edit_link', None, True)
+
+#------------------------------------------------------------------------------
+# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
+#------------------------------------------------------------------------------
+
+def monkeypatch_sphinx_ext_autodoc():
+ global _original_format_signature
+ import sphinx.ext.autodoc
+
+ if sphinx.ext.autodoc.format_signature is our_format_signature:
+ return
+
+ print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
+ _original_format_signature = sphinx.ext.autodoc.format_signature
+ sphinx.ext.autodoc.format_signature = our_format_signature
+
+def our_format_signature(what, obj):
+ r = mangle_signature(None, what, None, obj, None, None, None)
+ if r is not None:
+ return r[0]
+ else:
+ return _original_format_signature(what, obj)
diff --git a/docs/sphinxext/sphinx_cython.py b/docs/sphinxext/sphinx_cython.py
new file mode 100644
index 0000000..38c6322
--- /dev/null
+++ b/docs/sphinxext/sphinx_cython.py
@@ -0,0 +1,115 @@
+'''
+
+sphinx_cython.py
+
+This module monkeypatches sphinx autodoc to support Cython generated
+function signatures in the first line of the docstring of functions
+implemented as C extensions.
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This file is part of LLFUSE (http://python-llfuse.googlecode.com).
+LLFUSE can be distributed under the terms of the GNU LGPL.
+
+It has been slightly modified by MinRK.
+'''
+
+import sphinx.ext.autodoc as SphinxAutodoc
+from sphinx.util.docstrings import prepare_docstring
+import inspect
+import re
+from sphinx.util import force_decode
+
+TYPE_RE = re.compile(r'(?:int|char)(?:\s+\*?\s*|\s*\*?\s+)([a-zA-Z_].*)')
+
+ClassDocumenter = SphinxAutodoc.ClassDocumenter
+MethodDocumenter = SphinxAutodoc.MethodDocumenter
+FunctionDocumenter = SphinxAutodoc.FunctionDocumenter
+
+class MyDocumenter(SphinxAutodoc.Documenter):
+ '''
+ Overwrites `get_doc()` to remove function and
+ method signatures and `format_args` to parse and give
+ precedence to function signatures in the first line
+ of the docstring.
+ '''
+
+ def get_doc(self, encoding=None):
+ docstr = self.get_attr(self.object, '__doc__', None)
+ if docstr:
+ docstr = force_decode(docstr, encoding)
+
+ myname = self.fullname[len(self.modname)+1:]
+ if myname.endswith('()'):
+ myname = myname[:-2]
+
+ if (docstr
+ and (myname + '(') in docstr
+ and '\n' in docstr
+ and docstr[docstr.index('\n')-1] == ')'):
+ docstr = docstr[docstr.index('\n')+1:]
+
+ if docstr:
+ # make sure we have Unicode docstrings, then sanitize and split
+ # into lines
+ return [prepare_docstring(force_decode(docstr, encoding))]
+ return []
+
+
+ def format_args(self):
+ myname = self.fullname[len(self.modname)+1:]
+ if myname.endswith('()'):
+ myname = myname[:-2]
+ # Try to parse docstring
+ docstr = self.get_attr(self.object, '__doc__', None)
+ if docstr:
+ docstr = force_decode(docstr, 'utf-8')
+ if (docstr
+ and (myname + '(') in docstr
+ and '\n' in docstr
+ and docstr[docstr.index('\n')-1] == ')'):
+ args = docstr[len(myname)+1:docstr.index('\n')-1]
+
+ # Get rid of Cython style types declarations
+ argl = []
+ for arg in [ x.strip() for x in args.split(',') ]:
+ if (arg in ('cls', 'self')
+ and isinstance(self, SphinxAutodoc.MethodDocumenter)):
+ continue
+ hit = TYPE_RE.match(arg)
+ if hit:
+ argl.append(hit.group(1))
+ else:
+ argl.append(arg)
+ args = '(%s)' % ', '.join(argl)
+ else:
+ # super seems to get this wrong:
+ for cls in (MethodDocumenter,
+ FunctionDocumenter,
+ ClassDocumenter):
+ if isinstance(self, cls):
+ return cls.format_args(self)
+ # return super(self.__class__, self).format_args()
+
+ # escape backslashes for reST
+ args = args.replace('\\', '\\\\')
+ return args
+
+
+class MyFunctionDocumenter(MyDocumenter, SphinxAutodoc.FunctionDocumenter):
+ pass
+
+class MyMethodDocumenter(MyDocumenter, SphinxAutodoc.MethodDocumenter):
+ pass
+
+class MyClassDocumenter(MyDocumenter, SphinxAutodoc.ClassDocumenter):
+ def format_signature(self):
+ return self.format_args() or "()"
+
+SphinxAutodoc.ClassDocumenter = MyClassDocumenter
+SphinxAutodoc.MethodDocumenter = MyMethodDocumenter
+SphinxAutodoc.FunctionDocumenter = MyFunctionDocumenter
+
+# don't use AttributeDocumenter on 'method_descriptor' members:
+AD = SphinxAutodoc.AttributeDocumenter
+AD.method_types = tuple(list(AD.method_types) + [type(str.count)])
diff --git a/docs/update_ghpages.sh b/docs/update_ghpages.sh
new file mode 100755
index 0000000..a097691
--- /dev/null
+++ b/docs/update_ghpages.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env sh
+# pick repo for gh-pages branch
+repo=origin
+
+if [ ! -d gh-pages ]; then
+ echo "setting up gh-pages subdir"
+ mkdir gh-pages || exit -1
+ cp -r ../.git gh-pages/ || exit -1
+ cd gh-pages || exit -1
+ init=0
+ git checkout $repo/gh-pages || init=1
+ if [ "$init" != "0" ]; then
+ echo "initializing gh-pages repo"
+ git symbolic-ref HEAD refs/heads/gh-pages || exit -1
+ rm .git/index || exit -1
+ git clean -fdx || exit -1
+ touch index.html
+ git add .
+ git commit -a -m 'init gh-pages' || exit -1
+ git push origin HEAD:gh-pages
+ fi
+ cd ..
+fi
+echo "updating local gh-pages with html build"
+rsync -va build/html/ gh-pages/ --delete --exclude .git --exclude .nojekyll || exit -1
+cd gh-pages
+touch .nojekyll
+git add .nojekyll
+git add .
+git commit -a || exit -1
+echo "pushing to remote gh-pages"
+# pwd
+git push $repo HEAD:gh-pages